content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
import os import json import pkgutil import datetime import ast import yaml from yaml import Loader # Get version without importing module mod = ast.parse(pkgutil.get_data(__name__, "__init__.py").decode()) assignments = [node for node in mod.body if isinstance(node, ast.Assign)] __version__ = [node.value.s for node in assignments if node.targets[0].id == '__version__'][0] def write_viz(vega_taxonomy, outname="viz.html"): """ Use the current taxonomy to vizualize the tree with d3. >> import tdtax >> tdtax.write_viz(tdtax.vega_taxonomy) """ text = pkgutil.get_data(__name__, "viz_template.html").decode() text = text.replace("%%JSON%%", json.dumps(vega_taxonomy)) text = text.replace("%%VERSION%%", __version__) text = text.replace("%%DATE%%", str(datetime.datetime.utcnow())) f = open(outname, "w") f.write(text) f.close() print(f"wrote {outname}") def walk_and_replace(d, path="./", verbose=False): """ recursively replace references to YAMLs """ if not isinstance(d, dict): return for key, value in d.items(): if isinstance(value, dict): walk_and_replace(value, path=path, verbose=verbose) elif isinstance(value, list): for i in range(len(value)): if isinstance(value[i], dict): if value[i].get("ref") is not None: ref = path + value[i].get("ref") if os.path.exists(ref): replacement = yaml.load(open(ref), Loader=Loader) value[i] = replacement else: if verbose: print( f"Did not find file {ref}." "Adding placeholder." ) basename = os.path.basename(ref).split(".")[0] value[i] = {"class": basename + "-placeholder"} walk_and_replace(value[i], path=path, verbose=verbose) def merge_yamls(fname): taxonomy = yaml.load(open(fname), Loader=Loader) path = os.path.dirname(fname) + "/" walk_and_replace(taxonomy, path) return taxonomy
nilq/baby-python
python
import os import json import base64 from urllib import request from io import BytesIO def _download_file(url, local_file_path): response = request.urlopen(url) with open(local_file_path, 'wb') as local_file: local_file.write(BytesIO(response.read()).read()) def _upload_to_s3(s3_interface, local_software_dir): s3_interface.upload_compressed( key_name_prefix="cbm3_aws/instance_prep", document_name="instance_software", local_path=local_software_dir) def _load_software_list(): software_list_path = os.path.join( get_local_dir(), "instance_prep_software.json") with open(software_list_path) as software_list_file: return json.load(software_list_file)["software_list"] def upload_software(s3_interface, local_software_dir): """downloads software for instance installation using the links in the packaged ./instance_prep_software.json file and upload them to s3 using the specified s3_interface object. Args: s3_interface (cbm3_aws.s3_interface.S3Interface): object for uploading the software to s3 local_software_dir (str): directory to store the downloaded software """ for software in _load_software_list(): _download_file( url=software["url"], local_file_path=os.path.join( local_software_dir, software["file_name"])) _upload_to_s3(s3_interface, local_software_dir) def get_local_dir(): """Gets the directory containing this script Returns: str: full path to the the script's directory """ return os.path.dirname(os.path.realpath(__file__)) def get_userdata(bucket_name, base64_encode=False): """Returns a string, optionally base64 encoded to be run in the user-data field of an EC2 instance in order to prepare the OS for running CBM3 and a cbm3_aws worker script Args: bucket_name (str): name of bucket from which the instance can download the required software. base64_encode (bool, optional): If set to true the returned string is base64 encoded. Defaults to False. Returns: str: the user data script """ ps1_script_path = os.path.join(get_local_dir(), "instance_prep.ps1") ps1_variables = [ f'Set-Variable "s3bucket" -Value "{bucket_name}"' ] with open(ps1_script_path) as ps1_script_file: ps1_script = ps1_script_file.read() user_data_script = '\n'.join([ '<powershell>', '\n'.join(ps1_variables), ps1_script, '</powershell>' ]) if base64_encode: return base64.b64encode(user_data_script.encode()).decode("ascii") return user_data_script
nilq/baby-python
python
from runtime import * """list.pop(n)""" def main(): a = list(range(10)) print a b = a.pop() print b print a assert( b==9 ) c = a.pop(0) assert( c==0 ) d = ['A', 'B', 'C'] assert( d.pop(1)=='B' ) assert( len(d)==2 ) main()
nilq/baby-python
python
# -*- coding: utf-8 -*- import json import re from datetime import timedelta, datetime from DictObject import DictObject from luckydonaldUtils.logger import logging from luckydonaldUtils.encoding import unicode_type, to_unicode as u, to_native as n from luckydonaldUtils.functions import deprecated from luckydonaldUtils.exceptions import assert_type_or_raise from ..exceptions import TgApiServerException, TgApiParseException from ..exceptions import TgApiTypeError, TgApiResponseException from ..exceptions import TgApiException from ..api_types.sendable.inline import InlineQueryResult from ..api_types import from_array_list from .base import BotBase from ..api_types.sendable.files import InputFile # sync imports from time import sleep import requests.exceptions import requests __author__ = 'luckydonald' __all__ = ["SyncBot", "Bot"] logger = logging.getLogger(__name__) class SyncBot(BotBase): def _load_info(self): """ This functions stores the id and the username of the bot. Called by `.username` and `.id` properties. This function is synchronous. In fact, `AsyncBot` uses `SyncBot` to load those. :return: """ myself = self.get_me() if self.return_python_objects: self._me = myself else: from ..api_types.receivable.peer import User self._me = User.from_array(myself["result"]) # end if # end def def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query): """ Send a request to the api. If the bot is set to return the json objects, it will look like this: ```json { "ok": bool, "result": {...}, # optionally present: "description": "human-readable description of the result", "error_code": int } ``` :param command: The Url command parameter :type command: str :param request_timeout: When the request should time out. Default: `self._default_timeout` :type request_timeout: int :param files: if it needs to send files. :param use_long_polling: if it should use long polling. Default: `False` (see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content) :type use_long_polling: bool :param query: all the other `**kwargs` will get json encoded. :return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type. :rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable """ request_timeout = self._default_timeout if request_timeout is None else request_timeout url, params, files = self._prepare_request(command, query) r = requests.post( url, params=params, files=files, stream=use_long_polling, verify=True, # No self signed certificates. Telegram should be trustworthy anyway... timeout=request_timeout ) json = r.json() return self._postprocess_request(r.request, response=r, json=json) # end def do def _do_fileupload(self, file_param_name, value, _command=None, **kwargs): """ :param file_param_name: For what field the file should be uploaded. :type file_param_name: str :param value: File to send. You can either pass a file_id as String to resend a file file that is already on the Telegram servers, or upload a new file, specifying the file path as :class:`pytgbot.api_types.sendable.files.InputFile`. :type value: pytgbot.api_types.sendable.files.InputFile | str :param _command: Overwrite the sended command. Default is to convert `file_param_name` to camel case (`"voice_note"` -> `"sendVoiceNote"`) :param kwargs: will get json encoded. :return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type. :rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable :raises TgApiTypeError, TgApiParseException, TgApiServerException: Everything from :meth:`Bot.do`, and :class:`TgApiTypeError` """ if isinstance(value, str): kwargs[file_param_name] = str(value) elif isinstance(value, unicode_type): kwargs[file_param_name] = n(value) elif isinstance(value, InputFile): kwargs["files"] = value.get_request_files(file_param_name) else: raise TgApiTypeError("Parameter {key} is not type (str, {text_type}, {input_file_type}), but type {type}".format( key=file_param_name, type=type(value), input_file_type=InputFile, text_type=unicode_type)) # end if if not _command: # command as camelCase # "voice_note" -> "sendVoiceNote" # https://stackoverflow.com/a/10984923/3423324 command = re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), "send_" + file_param_name) else: command = _command # end def return self.do(command, **kwargs) # end def _do_fileupload # compatibility @deprecated("The function `bot.send_msg(…) is now named `bot.send_message(…)`.") def send_msg(self, *args, **kwargs): """ alias to the newer :func:`send_message` """ return self.send_message(*args, **kwargs) # end def send_msg @deprecated("The function `bot.kick_chat_member(…) is now named `bot.ban_chat_member(…)`.") def kick_chat_member(self, *args, **kwargs): """ alias to the newer :func:`ban_chat_member` """ return self.ban_chat_member(*args, **kwargs) # end def kick_chat_member @deprecated("The function `bot.get_chat_members_count(…) is now named `bot.get_chat_member_count(…)`.") def get_chat_members_count(self, *args, **kwargs): """ alias to the newer :func:`get_chat_member_count` """ return self.get_chat_member_count(*args, **kwargs) # end def get_chat_members_count # start of generated functions def get_updates(self, offset=None, limit=100, poll_timeout=None, allowed_updates=None, request_timeout=None, delta=timedelta(milliseconds=100), error_as_empty=False): """ Use this method to receive incoming updates using long polling (See https://en.wikipedia.org/wiki/Push_technology#Long_polling). An Array of Update objects is returned. You can choose to set `error_as_empty` to `True` or `False`. If `error_as_empty` is set to `True`, it will log that exception as warning, and fake an empty result, intended for use in for loops. In case of such error (and only in such case) it contains an "exception" field. Ìt will look like this: `{"result": [], "exception": e}` This is useful if you want to use a for loop, but ignore Network related burps. If `error_as_empty` is set to `False` however, all `requests.RequestException` exceptions are normally raised. :keyword offset: (Optional) Identifier of the first update to be returned. Must be greater by one than the highest among the identifiers of previously received updates. By default, updates starting with the earliest unconfirmed update are returned. An update is considered confirmed as soon as :func:`get_updates` is called with an offset higher than its `update_id`. :type offset: int :param limit: Limits the number of updates to be retrieved. Values between 1—100 are accepted. Defaults to 100 :type limit: int :param poll_timeout: Timeout in seconds for long polling, e.g. how long we want to wait maximum. Defaults to 0, i.e. usual short polling. :type poll_timeout: int :param allowed_updates: List the types of updates you want your bot to receive. For example, specify [“message”, “edited_channel_post”, “callback_query”] to only receive updates of these types. See Update for a complete list of available update types. Specify an empty list to receive all updates regardless of type (default). If not specified, the previous setting will be used. Please note that this parameter doesn't affect updates created before the call to the get_updates, so unwanted updates may be received for a short period of time. :type allowed_updates: list of str :param request_timeout: Timeout of the request. Not the long polling server side timeout. If not specified, it is set to `poll_timeout`+2. :type request_timeout: int :param delta: Wait minimal 'delta' seconds, between requests. Useful in a loop. :type delta: datetime. :param error_as_empty: If errors which subclasses `requests.RequestException` will be logged but not raised. Instead the returned DictObject will contain an "exception" field containing the exception occured, the "result" field will be an empty list `[]`. Defaults to `False`. :type error_as_empty: bool Returns: :return: An Array of Update objects is returned, or an empty array if there was an requests.RequestException and error_as_empty is set to True. :rtype: list of pytgbot.api_types.receivable.updates.Update """ assert(offset is None or isinstance(offset, int)) assert(limit is None or isinstance(limit, int)) assert(poll_timeout is None or isinstance(poll_timeout, int)) assert(allowed_updates is None or isinstance(allowed_updates, list)) if poll_timeout and not request_timeout is None: request_timeout = poll_timeout + 2 # end if if delta.total_seconds() > poll_timeout: now = datetime.now() if self._last_update - now < delta: wait = ((now - self._last_update) - delta).total_seconds() # can be 0.2 wait = 0 if wait < 0 else wait if wait != 0: logger.debug("Sleeping {i} seconds.".format(i=wait)) # end if sleep(wait) # end if # end if self._last_update = datetime.now() use_long_polling = poll_timeout != 0 try: result = self.do( "getUpdates", offset=offset, limit=limit, timeout=poll_timeout, allowed_updates=allowed_updates, use_long_polling=use_long_polling, request_timeout=request_timeout ) return self._get_updates__process_result(result) except (requests.exceptions.RequestException, TgApiException) as e: if error_as_empty: if not isinstance(e, requests.exceptions.Timeout) or not use_long_polling: logger.warning( "Network related error happened in get_updates(), but will be ignored: " + str(e), exc_info=True ) # end if self._last_update = datetime.now() return DictObject(result=[], exception=e) else: raise # end if # end try # end def get_updates def set_webhook(self, url, certificate=None, ip_address=None, max_connections=None, allowed_updates=None, drop_pending_updates=None): """ Use this method to specify a url and receive incoming updates via an outgoing webhook. Whenever there is an update for the bot, we will send an HTTPS POST request to the specified url, containing a JSON-serialized Update. In case of an unsuccessful request, we will give up after a reasonable amount of attempts. Returns True on success. If you'd like to make sure that the Webhook request comes from Telegram, we recommend using a secret path in the URL, e.g. https://www.example.com/<token>. Since nobody else knows your bot's token, you can be pretty sure it's us. Notes1. You will not be able to receive updates using getUpdates for as long as an outgoing webhook is set up.2. To use a self-signed certificate, you need to upload your public key certificate using certificate parameter. Please upload as InputFile, sending a String will not work.3. Ports currently supported for Webhooks: 443, 80, 88, 8443. NEW! If you're having any trouble setting up webhooks, please check out this amazing guide to Webhooks. https://core.telegram.org/bots/api#setwebhook Parameters: :param url: HTTPS url to send updates to. Use an empty string to remove webhook integration :type url: str|unicode Optional keyword parameters: :param certificate: Upload your public key certificate so that the root certificate in use can be checked. See our self-signed guide for details. :type certificate: pytgbot.api_types.sendable.files.InputFile :param ip_address: The fixed IP address which will be used to send webhook requests instead of the IP address resolved through DNS :type ip_address: str|unicode :param max_connections: Maximum allowed number of simultaneous HTTPS connections to the webhook for update delivery, 1-100. Defaults to 40. Use lower values to limit the load on your bot's server, and higher values to increase your bot's throughput. :type max_connections: int :param allowed_updates: A JSON-serialized list of the update types you want your bot to receive. For example, specify ["message", "edited_channel_post", "callback_query"] to only receive updates of these types. See Update for a complete list of available update types. Specify an empty list to receive all update types except chat_member (default). If not specified, the previous setting will be used.Please note that this parameter doesn't affect updates created before the call to the setWebhook, so unwanted updates may be received for a short period of time. :type allowed_updates: list of str|unicode :param drop_pending_updates: Pass True to drop all pending updates :type drop_pending_updates: bool Returns: :return: Returns True on success :rtype: bool """ result = self._set_webhook__make_request(url=url, certificate=certificate, ip_address=ip_address, max_connections=max_connections, allowed_updates=allowed_updates, drop_pending_updates=drop_pending_updates) return self._set_webhook__process_result(result) # end def set_webhook def delete_webhook(self, drop_pending_updates=None): """ Use this method to remove webhook integration if you decide to switch back to getUpdates. Returns True on success. https://core.telegram.org/bots/api#deletewebhook Optional keyword parameters: :param drop_pending_updates: Pass True to drop all pending updates :type drop_pending_updates: bool Returns: :return: Returns True on success :rtype: bool """ result = self._delete_webhook__make_request(drop_pending_updates=drop_pending_updates) return self._delete_webhook__process_result(result) # end def delete_webhook def get_webhook_info(self): """ Use this method to get current webhook status. Requires no parameters. On success, returns a WebhookInfo object. If the bot is using getUpdates, will return an object with the url field empty. https://core.telegram.org/bots/api#getwebhookinfo Returns: :return: On success, returns a WebhookInfo object :rtype: pytgbot.api_types.receivable.updates.WebhookInfo """ result = self._get_webhook_info__make_request() return self._get_webhook_info__process_result(result) # end def get_webhook_info def get_me(self): """ A simple method for testing your bot's authentication token. Requires no parameters. Returns basic information about the bot in form of a User object. https://core.telegram.org/bots/api#getme Returns: :return: Returns basic information about the bot in form of a User object :rtype: pytgbot.api_types.receivable.peer.User """ result = self._get_me__make_request() return self._get_me__process_result(result) # end def get_me def log_out(self): """ Use this method to log out from the cloud Bot API server before launching the bot locally. You must log out the bot before running it locally, otherwise there is no guarantee that the bot will receive updates. After a successful call, you can immediately log in on a local server, but will not be able to log in back to the cloud Bot API server for 10 minutes. Returns True on success. Requires no parameters. https://core.telegram.org/bots/api#logout Returns: :return: Returns True on success :rtype: bool """ result = self._log_out__make_request() return self._log_out__process_result(result) # end def log_out def send_message(self, chat_id, text, parse_mode=None, entities=None, disable_web_page_preview=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send text messages. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendmessage Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param text: Text of the message to be sent, 1-4096 characters after entities parsing :type text: str|unicode Optional keyword parameters: :param parse_mode: Mode for parsing entities in the message text. See formatting options for more details. :type parse_mode: str|unicode :param entities: A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode :type entities: list of pytgbot.api_types.receivable.media.MessageEntity :param disable_web_page_preview: Disables link previews for links in this message :type disable_web_page_preview: bool :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_message__make_request(chat_id=chat_id, text=text, parse_mode=parse_mode, entities=entities, disable_web_page_preview=disable_web_page_preview, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_message__process_result(result) # end def send_message def forward_message(self, chat_id, from_chat_id, message_id, disable_notification=None, protect_content=None): """ Use this method to forward messages of any kind. Service messages can't be forwarded. On success, the sent Message is returned. https://core.telegram.org/bots/api#forwardmessage Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param from_chat_id: Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername) :type from_chat_id: int | str|unicode :param message_id: Message identifier in the chat specified in from_chat_id :type message_id: int Optional keyword parameters: :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the forwarded message from forwarding and saving :type protect_content: bool Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._forward_message__make_request(chat_id=chat_id, from_chat_id=from_chat_id, message_id=message_id, disable_notification=disable_notification, protect_content=protect_content) return self._forward_message__process_result(result) # end def forward_message def copy_message(self, chat_id, from_chat_id, message_id, caption=None, parse_mode=None, caption_entities=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to copy messages of any kind. Service messages and invoice messages can't be copied. The method is analogous to the method forwardMessage, but the copied message doesn't have a link to the original message. Returns the MessageId of the sent message on success. https://core.telegram.org/bots/api#copymessage Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param from_chat_id: Unique identifier for the chat where the original message was sent (or channel username in the format @channelusername) :type from_chat_id: int | str|unicode :param message_id: Message identifier in the chat specified in from_chat_id :type message_id: int Optional keyword parameters: :param caption: New caption for media, 0-1024 characters after entities parsing. If not specified, the original caption is kept :type caption: str|unicode :param parse_mode: Mode for parsing entities in the new caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the new caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: Returns the MessageId of the sent message on success :rtype: pytgbot.api_types.receivable.responses.MessageId """ result = self._copy_message__make_request(chat_id=chat_id, from_chat_id=from_chat_id, message_id=message_id, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._copy_message__process_result(result) # end def copy_message def send_photo(self, chat_id, photo, caption=None, parse_mode=None, caption_entities=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send photos. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendphoto Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param photo: Photo to send. Pass a file_id as String to send a photo that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a photo from the Internet, or upload a new photo using multipart/form-data. The photo must be at most 10 MB in size. The photo's width and height must not exceed 10000 in total. Width and height ratio must be at most 20. More info on Sending Files » :type photo: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param caption: Photo caption (may also be used when resending photos by file_id), 0-1024 characters after entities parsing :type caption: str|unicode :param parse_mode: Mode for parsing entities in the photo caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_photo__make_request(chat_id=chat_id, photo=photo, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_photo__process_result(result) # end def send_photo def send_audio(self, chat_id, audio, caption=None, parse_mode=None, caption_entities=None, duration=None, performer=None, title=None, thumb=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .MP3 or .M4A format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future. For sending voice messages, use the sendVoice method instead. https://core.telegram.org/bots/api#sendaudio Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files » :type audio: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param caption: Audio caption, 0-1024 characters after entities parsing :type caption: str|unicode :param parse_mode: Mode for parsing entities in the audio caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param duration: Duration of the audio in seconds :type duration: int :param performer: Performer :type performer: str|unicode :param title: Track name :type title: str|unicode :param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files » :type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_audio__make_request(chat_id=chat_id, audio=audio, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, duration=duration, performer=performer, title=title, thumb=thumb, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_audio__process_result(result) # end def send_audio def send_document(self, chat_id, document, thumb=None, caption=None, parse_mode=None, caption_entities=None, disable_content_type_detection=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send general files. On success, the sent Message is returned. Bots can currently send files of any type of up to 50 MB in size, this limit may be changed in the future. https://core.telegram.org/bots/api#senddocument Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param document: File to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files » :type document: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files » :type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode :param caption: Document caption (may also be used when resending documents by file_id), 0-1024 characters after entities parsing :type caption: str|unicode :param parse_mode: Mode for parsing entities in the document caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param disable_content_type_detection: Disables automatic server-side content type detection for files uploaded using multipart/form-data :type disable_content_type_detection: bool :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_document__make_request(chat_id=chat_id, document=document, thumb=thumb, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, disable_content_type_detection=disable_content_type_detection, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_document__process_result(result) # end def send_document def send_video(self, chat_id, video, duration=None, width=None, height=None, thumb=None, caption=None, parse_mode=None, caption_entities=None, supports_streaming=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document). On success, the sent Message is returned. Bots can currently send video files of up to 50 MB in size, this limit may be changed in the future. https://core.telegram.org/bots/api#sendvideo Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param video: Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data. More info on Sending Files » :type video: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param duration: Duration of sent video in seconds :type duration: int :param width: Video width :type width: int :param height: Video height :type height: int :param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files » :type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode :param caption: Video caption (may also be used when resending videos by file_id), 0-1024 characters after entities parsing :type caption: str|unicode :param parse_mode: Mode for parsing entities in the video caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param supports_streaming: Pass True, if the uploaded video is suitable for streaming :type supports_streaming: bool :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_video__make_request(chat_id=chat_id, video=video, duration=duration, width=width, height=height, thumb=thumb, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, supports_streaming=supports_streaming, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_video__process_result(result) # end def send_video def send_animation(self, chat_id, animation, duration=None, width=None, height=None, thumb=None, caption=None, parse_mode=None, caption_entities=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). On success, the sent Message is returned. Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future. https://core.telegram.org/bots/api#sendanimation Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param animation: Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data. More info on Sending Files » :type animation: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param duration: Duration of sent animation in seconds :type duration: int :param width: Animation width :type width: int :param height: Animation height :type height: int :param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files » :type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode :param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters after entities parsing :type caption: str|unicode :param parse_mode: Mode for parsing entities in the animation caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_animation__make_request(chat_id=chat_id, animation=animation, duration=duration, width=width, height=height, thumb=thumb, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_animation__process_result(result) # end def send_animation def send_voice(self, chat_id, voice, caption=None, parse_mode=None, caption_entities=None, duration=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .OGG file encoded with OPUS (other formats may be sent as Audio or Document). On success, the sent Message is returned. Bots can currently send voice messages of up to 50 MB in size, this limit may be changed in the future. https://core.telegram.org/bots/api#sendvoice Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param voice: Audio file to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files » :type voice: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param caption: Voice message caption, 0-1024 characters after entities parsing :type caption: str|unicode :param parse_mode: Mode for parsing entities in the voice message caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param duration: Duration of the voice message in seconds :type duration: int :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_voice__make_request(chat_id=chat_id, voice=voice, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, duration=duration, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_voice__process_result(result) # end def send_voice def send_video_note(self, chat_id, video_note, duration=None, length=None, thumb=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute long. Use this method to send video messages. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendvideonote Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param video_note: Video note to send. Pass a file_id as String to send a video note that exists on the Telegram servers (recommended) or upload a new video using multipart/form-data. More info on Sending Files ». Sending video notes by a URL is currently unsupported :type video_note: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param duration: Duration of sent video in seconds :type duration: int :param length: Video width and height, i.e. diameter of the video message :type length: int :param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files » :type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_video_note__make_request(chat_id=chat_id, video_note=video_note, duration=duration, length=length, thumb=thumb, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_video_note__process_result(result) # end def send_video_note def send_media_group(self, chat_id, media, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None): """ Use this method to send a group of photos, videos, documents or audios as an album. Documents and audio files can be only grouped in an album with messages of the same type. On success, an array of Messages that were sent is returned. https://core.telegram.org/bots/api#sendmediagroup Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param media: A JSON-serialized array describing messages to be sent, must include 2-10 items :type media: list of pytgbot.api_types.sendable.input_media.InputMediaAudio | list of pytgbot.api_types.sendable.input_media.InputMediaDocument | list of pytgbot.api_types.sendable.input_media.InputMediaPhoto | list of pytgbot.api_types.sendable.input_media.InputMediaVideo Optional keyword parameters: :param disable_notification: Sends messages silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent messages from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the messages are a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool Returns: :return: On success, an array of Messages that were sent is returned :rtype: list of pytgbot.api_types.receivable.updates.Message """ result = self._send_media_group__make_request(chat_id=chat_id, media=media, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply) return self._send_media_group__process_result(result) # end def send_media_group def send_location(self, chat_id, latitude, longitude, horizontal_accuracy=None, live_period=None, heading=None, proximity_alert_radius=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send point on the map. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendlocation Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param latitude: Latitude of the location :type latitude: float :param longitude: Longitude of the location :type longitude: float Optional keyword parameters: :param horizontal_accuracy: The radius of uncertainty for the location, measured in meters; 0-1500 :type horizontal_accuracy: float :param live_period: Period in seconds for which the location will be updated (see Live Locations, should be between 60 and 86400. :type live_period: int :param heading: For live locations, a direction in which the user is moving, in degrees. Must be between 1 and 360 if specified. :type heading: int :param proximity_alert_radius: For live locations, a maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified. :type proximity_alert_radius: int :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_location__make_request(chat_id=chat_id, latitude=latitude, longitude=longitude, horizontal_accuracy=horizontal_accuracy, live_period=live_period, heading=heading, proximity_alert_radius=proximity_alert_radius, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_location__process_result(result) # end def send_location def edit_message_live_location(self, latitude, longitude, chat_id=None, message_id=None, inline_message_id=None, horizontal_accuracy=None, heading=None, proximity_alert_radius=None, reply_markup=None): """ Use this method to edit live location messages. A location can be edited until its live_period expires or editing is explicitly disabled by a call to stopMessageLiveLocation. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. https://core.telegram.org/bots/api#editmessagelivelocation Parameters: :param latitude: Latitude of new location :type latitude: float :param longitude: Longitude of new location :type longitude: float Optional keyword parameters: :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Required if inline_message_id is not specified. Identifier of the message to edit :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode :param horizontal_accuracy: The radius of uncertainty for the location, measured in meters; 0-1500 :type horizontal_accuracy: float :param heading: Direction in which the user is moving, in degrees. Must be between 1 and 360 if specified. :type heading: int :param proximity_alert_radius: Maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified. :type proximity_alert_radius: int :param reply_markup: A JSON-serialized object for a new inline keyboard. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned :rtype: pytgbot.api_types.receivable.updates.Message | bool """ result = self._edit_message_live_location__make_request(latitude=latitude, longitude=longitude, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, horizontal_accuracy=horizontal_accuracy, heading=heading, proximity_alert_radius=proximity_alert_radius, reply_markup=reply_markup) return self._edit_message_live_location__process_result(result) # end def edit_message_live_location def stop_message_live_location(self, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None): """ Use this method to stop updating a live location message before live_period expires. On success, if the message is not an inline message, the edited Message is returned, otherwise True is returned. https://core.telegram.org/bots/api#stopmessagelivelocation Optional keyword parameters: :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Required if inline_message_id is not specified. Identifier of the message with live location to stop :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode :param reply_markup: A JSON-serialized object for a new inline keyboard. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, if the message is not an inline message, the edited Message is returned, otherwise True is returned :rtype: pytgbot.api_types.receivable.updates.Message | bool """ result = self._stop_message_live_location__make_request(chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, reply_markup=reply_markup) return self._stop_message_live_location__process_result(result) # end def stop_message_live_location def send_venue(self, chat_id, latitude, longitude, title, address, foursquare_id=None, foursquare_type=None, google_place_id=None, google_place_type=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send information about a venue. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendvenue Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param latitude: Latitude of the venue :type latitude: float :param longitude: Longitude of the venue :type longitude: float :param title: Name of the venue :type title: str|unicode :param address: Address of the venue :type address: str|unicode Optional keyword parameters: :param foursquare_id: Foursquare identifier of the venue :type foursquare_id: str|unicode :param foursquare_type: Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".) :type foursquare_type: str|unicode :param google_place_id: Google Places identifier of the venue :type google_place_id: str|unicode :param google_place_type: Google Places type of the venue. (See supported types.) :type google_place_type: str|unicode :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_venue__make_request(chat_id=chat_id, latitude=latitude, longitude=longitude, title=title, address=address, foursquare_id=foursquare_id, foursquare_type=foursquare_type, google_place_id=google_place_id, google_place_type=google_place_type, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_venue__process_result(result) # end def send_venue def send_contact(self, chat_id, phone_number, first_name, last_name=None, vcard=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send phone contacts. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendcontact Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param phone_number: Contact's phone number :type phone_number: str|unicode :param first_name: Contact's first name :type first_name: str|unicode Optional keyword parameters: :param last_name: Contact's last name :type last_name: str|unicode :param vcard: Additional data about the contact in the form of a vCard, 0-2048 bytes :type vcard: str|unicode :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_contact__make_request(chat_id=chat_id, phone_number=phone_number, first_name=first_name, last_name=last_name, vcard=vcard, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_contact__process_result(result) # end def send_contact def send_poll(self, chat_id, question, options, is_anonymous=None, type=None, allows_multiple_answers=None, correct_option_id=None, explanation=None, explanation_parse_mode=None, explanation_entities=None, open_period=None, close_date=None, is_closed=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send a native poll. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendpoll Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param question: Poll question, 1-300 characters :type question: str|unicode :param options: A JSON-serialized list of answer options, 2-10 strings 1-100 characters each :type options: list of str|unicode Optional keyword parameters: :param is_anonymous: True, if the poll needs to be anonymous, defaults to True :type is_anonymous: bool :param type: Poll type, "quiz" or "regular", defaults to "regular" :type type: str|unicode :param allows_multiple_answers: True, if the poll allows multiple answers, ignored for polls in quiz mode, defaults to False :type allows_multiple_answers: bool :param correct_option_id: 0-based identifier of the correct answer option, required for polls in quiz mode :type correct_option_id: int :param explanation: Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters with at most 2 line feeds after entities parsing :type explanation: str|unicode :param explanation_parse_mode: Mode for parsing entities in the explanation. See formatting options for more details. :type explanation_parse_mode: str|unicode :param explanation_entities: A JSON-serialized list of special entities that appear in the poll explanation, which can be specified instead of parse_mode :type explanation_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param open_period: Amount of time in seconds the poll will be active after creation, 5-600. Can't be used together with close_date. :type open_period: int :param close_date: Point in time (Unix timestamp) when the poll will be automatically closed. Must be at least 5 and no more than 600 seconds in the future. Can't be used together with open_period. :type close_date: int :param is_closed: Pass True, if the poll needs to be immediately closed. This can be useful for poll preview. :type is_closed: bool :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_poll__make_request(chat_id=chat_id, question=question, options=options, is_anonymous=is_anonymous, type=type, allows_multiple_answers=allows_multiple_answers, correct_option_id=correct_option_id, explanation=explanation, explanation_parse_mode=explanation_parse_mode, explanation_entities=explanation_entities, open_period=open_period, close_date=close_date, is_closed=is_closed, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_poll__process_result(result) # end def send_poll def send_dice(self, chat_id, emoji=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send an animated emoji that will display a random value. On success, the sent Message is returned. https://core.telegram.org/bots/api#senddice Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode Optional keyword parameters: :param emoji: Emoji on which the dice throw animation is based. Currently, must be one of "🎲", "🎯", "🏀", "⚽", "🎳", or "🎰". Dice can have values 1-6 for "🎲", "🎯" and "🎳", values 1-5 for "🏀" and "⚽", and values 1-64 for "🎰". Defaults to "🎲" :type emoji: str|unicode :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_dice__make_request(chat_id=chat_id, emoji=emoji, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_dice__process_result(result) # end def send_dice def send_chat_action(self, chat_id, action): """ Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). Returns True on success. Example: The ImageBot needs some time to process a request and upload the image. Instead of sending a text message along the lines of "Retrieving image, please wait…", the bot may use sendChatAction with action = upload_photo. The user will see a "sending photo" status for the bot. We only recommend using this method when a response from the bot will take a noticeable amount of time to arrive. https://core.telegram.org/bots/api#sendchataction Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param action: Type of action to broadcast. Choose one, depending on what the user is about to receive: typing for text messages, upload_photo for photos, record_video or upload_video for videos, record_voice or upload_voice for voice notes, upload_document for general files, choose_sticker for stickers, find_location for location data, record_video_note or upload_video_note for video notes. :type action: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._send_chat_action__make_request(chat_id=chat_id, action=action) return self._send_chat_action__process_result(result) # end def send_chat_action def get_user_profile_photos(self, user_id, offset=None, limit=None): """ Use this method to get a list of profile pictures for a user. Returns a UserProfilePhotos object. https://core.telegram.org/bots/api#getuserprofilephotos Parameters: :param user_id: Unique identifier of the target user :type user_id: int Optional keyword parameters: :param offset: Sequential number of the first photo to be returned. By default, all photos are returned. :type offset: int :param limit: Limits the number of photos to be retrieved. Values between 1-100 are accepted. Defaults to 100. :type limit: int Returns: :return: Returns a UserProfilePhotos object :rtype: pytgbot.api_types.receivable.media.UserProfilePhotos """ result = self._get_user_profile_photos__make_request(user_id=user_id, offset=offset, limit=limit) return self._get_user_profile_photos__process_result(result) # end def get_user_profile_photos def get_file(self, file_id): """ Use this method to get basic info about a file and prepare it for downloading. For the moment, bots can download files of up to 20MB in size. On success, a File object is returned. The file can then be downloaded via the link https://api.telegram.org/file/bot<token>/<file_path>, where <file_path> is taken from the response. It is guaranteed that the link will be valid for at least 1 hour. When the link expires, a new one can be requested by calling getFile again. Note: This function may not preserve the original file name and MIME type. You should save the file's MIME type and name (if available) when the File object is received. https://core.telegram.org/bots/api#getfile Parameters: :param file_id: File identifier to get info about :type file_id: str|unicode Returns: :return: On success, a File object is returned :rtype: pytgbot.api_types.receivable.media.File """ result = self._get_file__make_request(file_id=file_id) return self._get_file__process_result(result) # end def get_file def ban_chat_member(self, chat_id, user_id, until_date=None, revoke_messages=None): """ Use this method to ban a user in a group, a supergroup or a channel. In the case of supergroups and channels, the user will not be able to return to the chat on their own using invite links, etc., unless unbanned first. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. https://core.telegram.org/bots/api#banchatmember Parameters: :param chat_id: Unique identifier for the target group or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int Optional keyword parameters: :param until_date: Date when the user will be unbanned, unix time. If user is banned for more than 366 days or less than 30 seconds from the current time they are considered to be banned forever. Applied for supergroups and channels only. :type until_date: int :param revoke_messages: Pass True to delete all messages from the chat for the user that is being removed. If False, the user will be able to see messages in the group that were sent before the user was removed. Always True for supergroups and channels. :type revoke_messages: bool Returns: :return: Returns True on success :rtype: bool """ result = self._ban_chat_member__make_request(chat_id=chat_id, user_id=user_id, until_date=until_date, revoke_messages=revoke_messages) return self._ban_chat_member__process_result(result) # end def ban_chat_member def unban_chat_member(self, chat_id, user_id, only_if_banned=None): """ Use this method to unban a previously banned user in a supergroup or channel. The user will not return to the group or channel automatically, but will be able to join via link, etc. The bot must be an administrator for this to work. By default, this method guarantees that after the call the user is not a member of the chat, but will be able to join it. So if the user is a member of the chat they will also be removed from the chat. If you don't want this, use the parameter only_if_banned. Returns True on success. https://core.telegram.org/bots/api#unbanchatmember Parameters: :param chat_id: Unique identifier for the target group or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int Optional keyword parameters: :param only_if_banned: Do nothing if the user is not banned :type only_if_banned: bool Returns: :return: Returns True on success :rtype: bool """ result = self._unban_chat_member__make_request(chat_id=chat_id, user_id=user_id, only_if_banned=only_if_banned) return self._unban_chat_member__process_result(result) # end def unban_chat_member def restrict_chat_member(self, chat_id, user_id, permissions, until_date=None): """ Use this method to restrict a user in a supergroup. The bot must be an administrator in the supergroup for this to work and must have the appropriate administrator rights. Pass True for all permissions to lift restrictions from a user. Returns True on success. https://core.telegram.org/bots/api#restrictchatmember Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int :param permissions: A JSON-serialized object for new user permissions :type permissions: pytgbot.api_types.receivable.peer.ChatPermissions Optional keyword parameters: :param until_date: Date when restrictions will be lifted for the user, unix time. If user is restricted for more than 366 days or less than 30 seconds from the current time, they are considered to be restricted forever :type until_date: int Returns: :return: Returns True on success :rtype: bool """ result = self._restrict_chat_member__make_request(chat_id=chat_id, user_id=user_id, permissions=permissions, until_date=until_date) return self._restrict_chat_member__process_result(result) # end def restrict_chat_member def promote_chat_member(self, chat_id, user_id, is_anonymous=None, can_manage_chat=None, can_post_messages=None, can_edit_messages=None, can_delete_messages=None, can_manage_voice_chats=None, can_restrict_members=None, can_promote_members=None, can_change_info=None, can_invite_users=None, can_pin_messages=None): """ Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Pass False for all boolean parameters to demote a user. Returns True on success. https://core.telegram.org/bots/api#promotechatmember Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int Optional keyword parameters: :param is_anonymous: Pass True, if the administrator's presence in the chat is hidden :type is_anonymous: bool :param can_manage_chat: Pass True, if the administrator can access the chat event log, chat statistics, message statistics in channels, see channel members, see anonymous administrators in supergroups and ignore slow mode. Implied by any other administrator privilege :type can_manage_chat: bool :param can_post_messages: Pass True, if the administrator can create channel posts, channels only :type can_post_messages: bool :param can_edit_messages: Pass True, if the administrator can edit messages of other users and can pin messages, channels only :type can_edit_messages: bool :param can_delete_messages: Pass True, if the administrator can delete messages of other users :type can_delete_messages: bool :param can_manage_voice_chats: Pass True, if the administrator can manage voice chats :type can_manage_voice_chats: bool :param can_restrict_members: Pass True, if the administrator can restrict, ban or unban chat members :type can_restrict_members: bool :param can_promote_members: Pass True, if the administrator can add new administrators with a subset of their own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him) :type can_promote_members: bool :param can_change_info: Pass True, if the administrator can change chat title, photo and other settings :type can_change_info: bool :param can_invite_users: Pass True, if the administrator can invite new users to the chat :type can_invite_users: bool :param can_pin_messages: Pass True, if the administrator can pin messages, supergroups only :type can_pin_messages: bool Returns: :return: Returns True on success :rtype: bool """ result = self._promote_chat_member__make_request(chat_id=chat_id, user_id=user_id, is_anonymous=is_anonymous, can_manage_chat=can_manage_chat, can_post_messages=can_post_messages, can_edit_messages=can_edit_messages, can_delete_messages=can_delete_messages, can_manage_voice_chats=can_manage_voice_chats, can_restrict_members=can_restrict_members, can_promote_members=can_promote_members, can_change_info=can_change_info, can_invite_users=can_invite_users, can_pin_messages=can_pin_messages) return self._promote_chat_member__process_result(result) # end def promote_chat_member def set_chat_administrator_custom_title(self, chat_id, user_id, custom_title): """ Use this method to set a custom title for an administrator in a supergroup promoted by the bot. Returns True on success. https://core.telegram.org/bots/api#setchatadministratorcustomtitle Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int :param custom_title: New custom title for the administrator; 0-16 characters, emoji are not allowed :type custom_title: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._set_chat_administrator_custom_title__make_request(chat_id=chat_id, user_id=user_id, custom_title=custom_title) return self._set_chat_administrator_custom_title__process_result(result) # end def set_chat_administrator_custom_title def ban_chat_sender_chat(self, chat_id, sender_chat_id): """ Use this method to ban a channel chat in a supergroup or a channel. Until the chat is unbanned, the owner of the banned chat won't be able to send messages on behalf of any of their channels. The bot must be an administrator in the supergroup or channel for this to work and must have the appropriate administrator rights. Returns True on success. https://core.telegram.org/bots/api#banchatsenderchat Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param sender_chat_id: Unique identifier of the target sender chat :type sender_chat_id: int Returns: :return: Returns True on success :rtype: bool """ result = self._ban_chat_sender_chat__make_request(chat_id=chat_id, sender_chat_id=sender_chat_id) return self._ban_chat_sender_chat__process_result(result) # end def ban_chat_sender_chat def unban_chat_sender_chat(self, chat_id, sender_chat_id): """ Use this method to unban a previously banned channel chat in a supergroup or channel. The bot must be an administrator for this to work and must have the appropriate administrator rights. Returns True on success. https://core.telegram.org/bots/api#unbanchatsenderchat Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param sender_chat_id: Unique identifier of the target sender chat :type sender_chat_id: int Returns: :return: Returns True on success :rtype: bool """ result = self._unban_chat_sender_chat__make_request(chat_id=chat_id, sender_chat_id=sender_chat_id) return self._unban_chat_sender_chat__process_result(result) # end def unban_chat_sender_chat def set_chat_permissions(self, chat_id, permissions): """ Use this method to set default chat permissions for all members. The bot must be an administrator in the group or a supergroup for this to work and must have the can_restrict_members administrator rights. Returns True on success. https://core.telegram.org/bots/api#setchatpermissions Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) :type chat_id: int | str|unicode :param permissions: A JSON-serialized object for new default chat permissions :type permissions: pytgbot.api_types.receivable.peer.ChatPermissions Returns: :return: Returns True on success :rtype: bool """ result = self._set_chat_permissions__make_request(chat_id=chat_id, permissions=permissions) return self._set_chat_permissions__process_result(result) # end def set_chat_permissions def export_chat_invite_link(self, chat_id): """ Use this method to generate a new primary invite link for a chat; any previously generated primary link is revoked. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the new invite link as String on success. Note: Each administrator in a chat generates their own invite links. Bots can't use invite links generated by other administrators. If you want your bot to work with invite links, it will need to generate its own link using exportChatInviteLink or by calling the getChat method. If your bot needs to generate a new primary invite link replacing its previous one, use exportChatInviteLink again. https://core.telegram.org/bots/api#exportchatinvitelink Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns the new invite link as String on success :rtype: str|unicode """ result = self._export_chat_invite_link__make_request(chat_id=chat_id) return self._export_chat_invite_link__process_result(result) # end def export_chat_invite_link def create_chat_invite_link(self, chat_id, name=None, expire_date=None, member_limit=None, creates_join_request=None): """ Use this method to create an additional invite link for a chat. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. The link can be revoked using the method revokeChatInviteLink. Returns the new invite link as ChatInviteLink object. https://core.telegram.org/bots/api#createchatinvitelink Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode Optional keyword parameters: :param name: Invite link name; 0-32 characters :type name: str|unicode :param expire_date: Point in time (Unix timestamp) when the link will expire :type expire_date: int :param member_limit: Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999 :type member_limit: int :param creates_join_request: True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified :type creates_join_request: bool Returns: :return: Returns the new invite link as ChatInviteLink object :rtype: pytgbot.api_types.receivable.peer.ChatInviteLink """ result = self._create_chat_invite_link__make_request(chat_id=chat_id, name=name, expire_date=expire_date, member_limit=member_limit, creates_join_request=creates_join_request) return self._create_chat_invite_link__process_result(result) # end def create_chat_invite_link def edit_chat_invite_link(self, chat_id, invite_link, name=None, expire_date=None, member_limit=None, creates_join_request=None): """ Use this method to edit a non-primary invite link created by the bot. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the edited invite link as a ChatInviteLink object. https://core.telegram.org/bots/api#editchatinvitelink Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param invite_link: The invite link to edit :type invite_link: str|unicode Optional keyword parameters: :param name: Invite link name; 0-32 characters :type name: str|unicode :param expire_date: Point in time (Unix timestamp) when the link will expire :type expire_date: int :param member_limit: Maximum number of users that can be members of the chat simultaneously after joining the chat via this invite link; 1-99999 :type member_limit: int :param creates_join_request: True, if users joining the chat via the link need to be approved by chat administrators. If True, member_limit can't be specified :type creates_join_request: bool Returns: :return: Returns the edited invite link as a ChatInviteLink object :rtype: pytgbot.api_types.receivable.peer.ChatInviteLink """ result = self._edit_chat_invite_link__make_request(chat_id=chat_id, invite_link=invite_link, name=name, expire_date=expire_date, member_limit=member_limit, creates_join_request=creates_join_request) return self._edit_chat_invite_link__process_result(result) # end def edit_chat_invite_link def revoke_chat_invite_link(self, chat_id, invite_link): """ Use this method to revoke an invite link created by the bot. If the primary link is revoked, a new link is automatically generated. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns the revoked invite link as ChatInviteLink object. https://core.telegram.org/bots/api#revokechatinvitelink Parameters: :param chat_id: Unique identifier of the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param invite_link: The invite link to revoke :type invite_link: str|unicode Returns: :return: Returns the revoked invite link as ChatInviteLink object :rtype: pytgbot.api_types.receivable.peer.ChatInviteLink """ result = self._revoke_chat_invite_link__make_request(chat_id=chat_id, invite_link=invite_link) return self._revoke_chat_invite_link__process_result(result) # end def revoke_chat_invite_link def approve_chat_join_request(self, chat_id, user_id): """ Use this method to approve a chat join request. The bot must be an administrator in the chat for this to work and must have the can_invite_users administrator right. Returns True on success. https://core.telegram.org/bots/api#approvechatjoinrequest Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int Returns: :return: Returns True on success :rtype: bool """ result = self._approve_chat_join_request__make_request(chat_id=chat_id, user_id=user_id) return self._approve_chat_join_request__process_result(result) # end def approve_chat_join_request def decline_chat_join_request(self, chat_id, user_id): """ Use this method to decline a chat join request. The bot must be an administrator in the chat for this to work and must have the can_invite_users administrator right. Returns True on success. https://core.telegram.org/bots/api#declinechatjoinrequest Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int Returns: :return: Returns True on success :rtype: bool """ result = self._decline_chat_join_request__make_request(chat_id=chat_id, user_id=user_id) return self._decline_chat_join_request__process_result(result) # end def decline_chat_join_request def set_chat_photo(self, chat_id, photo): """ Use this method to set a new profile photo for the chat. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. https://core.telegram.org/bots/api#setchatphoto Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param photo: New chat photo, uploaded using multipart/form-data :type photo: pytgbot.api_types.sendable.files.InputFile Returns: :return: Returns True on success :rtype: bool """ result = self._set_chat_photo__make_request(chat_id=chat_id, photo=photo) return self._set_chat_photo__process_result(result) # end def set_chat_photo def delete_chat_photo(self, chat_id): """ Use this method to delete a chat photo. Photos can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. https://core.telegram.org/bots/api#deletechatphoto Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._delete_chat_photo__make_request(chat_id=chat_id) return self._delete_chat_photo__process_result(result) # end def delete_chat_photo def set_chat_title(self, chat_id, title): """ Use this method to change the title of a chat. Titles can't be changed for private chats. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. https://core.telegram.org/bots/api#setchattitle Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param title: New chat title, 1-255 characters :type title: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._set_chat_title__make_request(chat_id=chat_id, title=title) return self._set_chat_title__process_result(result) # end def set_chat_title def set_chat_description(self, chat_id, description=None): """ Use this method to change the description of a group, a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Returns True on success. https://core.telegram.org/bots/api#setchatdescription Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode Optional keyword parameters: :param description: New chat description, 0-255 characters :type description: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._set_chat_description__make_request(chat_id=chat_id, description=description) return self._set_chat_description__process_result(result) # end def set_chat_description def pin_chat_message(self, chat_id, message_id, disable_notification=None): """ Use this method to add a message to the list of pinned messages in a chat. If the chat is not a private chat, the bot must be an administrator in the chat for this to work and must have the 'can_pin_messages' administrator right in a supergroup or 'can_edit_messages' administrator right in a channel. Returns True on success. https://core.telegram.org/bots/api#pinchatmessage Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Identifier of a message to pin :type message_id: int Optional keyword parameters: :param disable_notification: Pass True, if it is not necessary to send a notification to all chat members about the new pinned message. Notifications are always disabled in channels and private chats. :type disable_notification: bool Returns: :return: Returns True on success :rtype: bool """ result = self._pin_chat_message__make_request(chat_id=chat_id, message_id=message_id, disable_notification=disable_notification) return self._pin_chat_message__process_result(result) # end def pin_chat_message def unpin_chat_message(self, chat_id, message_id=None): """ Use this method to remove a message from the list of pinned messages in a chat. If the chat is not a private chat, the bot must be an administrator in the chat for this to work and must have the 'can_pin_messages' administrator right in a supergroup or 'can_edit_messages' administrator right in a channel. Returns True on success. https://core.telegram.org/bots/api#unpinchatmessage Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode Optional keyword parameters: :param message_id: Identifier of a message to unpin. If not specified, the most recent pinned message (by sending date) will be unpinned. :type message_id: int Returns: :return: Returns True on success :rtype: bool """ result = self._unpin_chat_message__make_request(chat_id=chat_id, message_id=message_id) return self._unpin_chat_message__process_result(result) # end def unpin_chat_message def unpin_all_chat_messages(self, chat_id): """ Use this method to clear the list of pinned messages in a chat. If the chat is not a private chat, the bot must be an administrator in the chat for this to work and must have the 'can_pin_messages' administrator right in a supergroup or 'can_edit_messages' administrator right in a channel. Returns True on success. https://core.telegram.org/bots/api#unpinallchatmessages Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._unpin_all_chat_messages__make_request(chat_id=chat_id) return self._unpin_all_chat_messages__process_result(result) # end def unpin_all_chat_messages def leave_chat(self, chat_id): """ Use this method for your bot to leave a group, supergroup or channel. Returns True on success. https://core.telegram.org/bots/api#leavechat Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._leave_chat__make_request(chat_id=chat_id) return self._leave_chat__process_result(result) # end def leave_chat def get_chat(self, chat_id): """ Use this method to get up to date information about the chat (current name of the user for one-on-one conversations, current username of a user, group or channel, etc.). Returns a Chat object on success. https://core.telegram.org/bots/api#getchat Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns a Chat object on success :rtype: pytgbot.api_types.receivable.peer.Chat """ result = self._get_chat__make_request(chat_id=chat_id) return self._get_chat__process_result(result) # end def get_chat def get_chat_administrators(self, chat_id): """ Use this method to get a list of administrators in a chat. On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots. If the chat is a group or a supergroup and no administrators were appointed, only the creator will be returned. https://core.telegram.org/bots/api#getchatadministrators Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: On success, returns an Array of ChatMember objects that contains information about all chat administrators except other bots :rtype: list of pytgbot.api_types.receivable.peer.ChatMember """ result = self._get_chat_administrators__make_request(chat_id=chat_id) return self._get_chat_administrators__process_result(result) # end def get_chat_administrators def get_chat_member_count(self, chat_id): """ Use this method to get the number of members in a chat. Returns Int on success. https://core.telegram.org/bots/api#getchatmembercount Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode Returns: :return: Returns Int on success :rtype: int """ result = self._get_chat_member_count__make_request(chat_id=chat_id) return self._get_chat_member_count__process_result(result) # end def get_chat_member_count def get_chat_member(self, chat_id, user_id): """ Use this method to get information about a member of a chat. Returns a ChatMember object on success. https://core.telegram.org/bots/api#getchatmember Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup or channel (in the format @channelusername) :type chat_id: int | str|unicode :param user_id: Unique identifier of the target user :type user_id: int Returns: :return: Returns a ChatMember object on success :rtype: pytgbot.api_types.receivable.peer.ChatMember """ result = self._get_chat_member__make_request(chat_id=chat_id, user_id=user_id) return self._get_chat_member__process_result(result) # end def get_chat_member def set_chat_sticker_set(self, chat_id, sticker_set_name): """ Use this method to set a new group sticker set for a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Use the field can_set_sticker_set optionally returned in getChat requests to check if the bot can use this method. Returns True on success. https://core.telegram.org/bots/api#setchatstickerset Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) :type chat_id: int | str|unicode :param sticker_set_name: Name of the sticker set to be set as the group sticker set :type sticker_set_name: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._set_chat_sticker_set__make_request(chat_id=chat_id, sticker_set_name=sticker_set_name) return self._set_chat_sticker_set__process_result(result) # end def set_chat_sticker_set def delete_chat_sticker_set(self, chat_id): """ Use this method to delete a group sticker set from a supergroup. The bot must be an administrator in the chat for this to work and must have the appropriate administrator rights. Use the field can_set_sticker_set optionally returned in getChat requests to check if the bot can use this method. Returns True on success. https://core.telegram.org/bots/api#deletechatstickerset Parameters: :param chat_id: Unique identifier for the target chat or username of the target supergroup (in the format @supergroupusername) :type chat_id: int | str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._delete_chat_sticker_set__make_request(chat_id=chat_id) return self._delete_chat_sticker_set__process_result(result) # end def delete_chat_sticker_set def answer_callback_query(self, callback_query_id, text=None, show_alert=None, url=None, cache_time=None): """ Use this method to send answers to callback queries sent from inline keyboards. The answer will be displayed to the user as a notification at the top of the chat screen or as an alert. On success, True is returned. Alternatively, the user can be redirected to the specified Game URL. For this option to work, you must first create a game for your bot via @Botfather and accept the terms. Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter. https://core.telegram.org/bots/api#answercallbackquery Parameters: :param callback_query_id: Unique identifier for the query to be answered :type callback_query_id: str|unicode Optional keyword parameters: :param text: Text of the notification. If not specified, nothing will be shown to the user, 0-200 characters :type text: str|unicode :param show_alert: If True, an alert will be shown by the client instead of a notification at the top of the chat screen. Defaults to false. :type show_alert: bool :param url: URL that will be opened by the user's client. If you have created a Game and accepted the conditions via @Botfather, specify the URL that opens your game — note that this will only work if the query comes from a callback_game button.Otherwise, you may use links like t.me/your_bot?start=XXXX that open your bot with a parameter. :type url: str|unicode :param cache_time: The maximum amount of time in seconds that the result of the callback query may be cached client-side. Telegram apps will support caching starting in version 3.14. Defaults to 0. :type cache_time: int Returns: :return: On success, True is returned :rtype: bool """ result = self._answer_callback_query__make_request(callback_query_id=callback_query_id, text=text, show_alert=show_alert, url=url, cache_time=cache_time) return self._answer_callback_query__process_result(result) # end def answer_callback_query def set_my_commands(self, commands, scope=None, language_code=None): """ Use this method to change the list of the bot's commands. See https://core.telegram.org/bots#commands for more details about bot commands. Returns True on success. https://core.telegram.org/bots/api#setmycommands Parameters: :param commands: A JSON-serialized list of bot commands to be set as the list of the bot's commands. At most 100 commands can be specified. :type commands: list of pytgbot.api_types.sendable.command.BotCommand Optional keyword parameters: :param scope: A JSON-serialized object, describing scope of users for which the commands are relevant. Defaults to BotCommandScopeDefault. :type scope: pytgbot.api_types.sendable.command.BotCommandScope :param language_code: A two-letter ISO 639-1 language code. If empty, commands will be applied to all users from the given scope, for whose language there are no dedicated commands :type language_code: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._set_my_commands__make_request(commands=commands, scope=scope, language_code=language_code) return self._set_my_commands__process_result(result) # end def set_my_commands def delete_my_commands(self, scope=None, language_code=None): """ Use this method to delete the list of the bot's commands for the given scope and user language. After deletion, higher level commands will be shown to affected users. Returns True on success. https://core.telegram.org/bots/api#deletemycommands Optional keyword parameters: :param scope: A JSON-serialized object, describing scope of users for which the commands are relevant. Defaults to BotCommandScopeDefault. :type scope: pytgbot.api_types.sendable.command.BotCommandScope :param language_code: A two-letter ISO 639-1 language code. If empty, commands will be applied to all users from the given scope, for whose language there are no dedicated commands :type language_code: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._delete_my_commands__make_request(scope=scope, language_code=language_code) return self._delete_my_commands__process_result(result) # end def delete_my_commands def get_my_commands(self, scope=None, language_code=None): """ Use this method to get the current list of the bot's commands for the given scope and user language. Returns Array of BotCommand on success. If commands aren't set, an empty list is returned. https://core.telegram.org/bots/api#getmycommands Optional keyword parameters: :param scope: A JSON-serialized object, describing scope of users. Defaults to BotCommandScopeDefault. :type scope: pytgbot.api_types.sendable.command.BotCommandScope :param language_code: A two-letter ISO 639-1 language code or an empty string :type language_code: str|unicode Returns: :return: On success, an array of the commands is returned. If commands aren't set, an empty list is returned :rtype: list of pytgbot.api_types.sendable.command.BotCommand """ result = self._get_my_commands__make_request(scope=scope, language_code=language_code) return self._get_my_commands__process_result(result) # end def get_my_commands def edit_message_text(self, text, chat_id=None, message_id=None, inline_message_id=None, parse_mode=None, entities=None, disable_web_page_preview=None, reply_markup=None): """ Use this method to edit text and game messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. https://core.telegram.org/bots/api#editmessagetext Parameters: :param text: New text of the message, 1-4096 characters after entities parsing :type text: str|unicode Optional keyword parameters: :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Required if inline_message_id is not specified. Identifier of the message to edit :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode :param parse_mode: Mode for parsing entities in the message text. See formatting options for more details. :type parse_mode: str|unicode :param entities: A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode :type entities: list of pytgbot.api_types.receivable.media.MessageEntity :param disable_web_page_preview: Disables link previews for links in this message :type disable_web_page_preview: bool :param reply_markup: A JSON-serialized object for an inline keyboard. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned :rtype: pytgbot.api_types.receivable.updates.Message | bool """ result = self._edit_message_text__make_request(text=text, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, parse_mode=parse_mode, entities=entities, disable_web_page_preview=disable_web_page_preview, reply_markup=reply_markup) return self._edit_message_text__process_result(result) # end def edit_message_text def edit_message_caption(self, chat_id=None, message_id=None, inline_message_id=None, caption=None, parse_mode=None, caption_entities=None, reply_markup=None): """ Use this method to edit captions of messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. https://core.telegram.org/bots/api#editmessagecaption Optional keyword parameters: :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Required if inline_message_id is not specified. Identifier of the message to edit :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode :param caption: New caption of the message, 0-1024 characters after entities parsing :type caption: str|unicode :param parse_mode: Mode for parsing entities in the message caption. See formatting options for more details. :type parse_mode: str|unicode :param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode :type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity :param reply_markup: A JSON-serialized object for an inline keyboard. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned :rtype: pytgbot.api_types.receivable.updates.Message | bool """ result = self._edit_message_caption__make_request(chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, caption=caption, parse_mode=parse_mode, caption_entities=caption_entities, reply_markup=reply_markup) return self._edit_message_caption__process_result(result) # end def edit_message_caption def edit_message_media(self, media, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None): """ Use this method to edit animation, audio, document, photo, or video messages. If a message is part of a message album, then it can be edited only to an audio for audio albums, only to a document for document albums and to a photo or a video otherwise. When an inline message is edited, a new file can't be uploaded; use a previously uploaded file via its file_id or specify a URL. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. https://core.telegram.org/bots/api#editmessagemedia Parameters: :param media: A JSON-serialized object for a new media content of the message :type media: pytgbot.api_types.sendable.input_media.InputMedia Optional keyword parameters: :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Required if inline_message_id is not specified. Identifier of the message to edit :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode :param reply_markup: A JSON-serialized object for a new inline keyboard. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned :rtype: pytgbot.api_types.receivable.updates.Message | bool """ result = self._edit_message_media__make_request(media=media, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, reply_markup=reply_markup) return self._edit_message_media__process_result(result) # end def edit_message_media def edit_message_reply_markup(self, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None): """ Use this method to edit only the reply markup of messages. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. https://core.telegram.org/bots/api#editmessagereplymarkup Optional keyword parameters: :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Required if inline_message_id is not specified. Identifier of the message to edit :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode :param reply_markup: A JSON-serialized object for an inline keyboard. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned :rtype: pytgbot.api_types.receivable.updates.Message | bool """ result = self._edit_message_reply_markup__make_request(chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id, reply_markup=reply_markup) return self._edit_message_reply_markup__process_result(result) # end def edit_message_reply_markup def stop_poll(self, chat_id, message_id, reply_markup=None): """ Use this method to stop a poll which was sent by the bot. On success, the stopped Poll is returned. https://core.telegram.org/bots/api#stoppoll Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Identifier of the original message with the poll :type message_id: int Optional keyword parameters: :param reply_markup: A JSON-serialized object for a new message inline keyboard. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, the stopped Poll is returned :rtype: pytgbot.api_types.receivable.media.Poll """ result = self._stop_poll__make_request(chat_id=chat_id, message_id=message_id, reply_markup=reply_markup) return self._stop_poll__process_result(result) # end def stop_poll def delete_message(self, chat_id, message_id): """ Use this method to delete a message, including service messages, with the following limitations:- A message can only be deleted if it was sent less than 48 hours ago.- A dice message in a private chat can only be deleted if it was sent more than 24 hours ago.- Bots can delete outgoing messages in private chats, groups, and supergroups.- Bots can delete incoming messages in private chats.- Bots granted can_post_messages permissions can delete outgoing messages in channels.- If the bot is an administrator of a group, it can delete any message there.- If the bot has can_delete_messages permission in a supergroup or a channel, it can delete any message there.Returns True on success. https://core.telegram.org/bots/api#deletemessage Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param message_id: Identifier of the message to delete :type message_id: int Returns: :return: Returns True on success :rtype: bool """ result = self._delete_message__make_request(chat_id=chat_id, message_id=message_id) return self._delete_message__process_result(result) # end def delete_message def send_sticker(self, chat_id, sticker, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send static .WEBP, animated .TGS, or video .WEBM stickers. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendsticker Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param sticker: Sticker to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a .WEBP file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files » :type sticker: pytgbot.api_types.sendable.files.InputFile | str|unicode Optional keyword parameters: :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_sticker__make_request(chat_id=chat_id, sticker=sticker, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_sticker__process_result(result) # end def send_sticker def get_sticker_set(self, name): """ Use this method to get a sticker set. On success, a StickerSet object is returned. https://core.telegram.org/bots/api#getstickerset Parameters: :param name: Name of the sticker set :type name: str|unicode Returns: :return: On success, a StickerSet object is returned :rtype: pytgbot.api_types.receivable.stickers.StickerSet """ result = self._get_sticker_set__make_request(name=name) return self._get_sticker_set__process_result(result) # end def get_sticker_set def upload_sticker_file(self, user_id, png_sticker): """ Use this method to upload a .PNG file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success. https://core.telegram.org/bots/api#uploadstickerfile Parameters: :param user_id: User identifier of sticker file owner :type user_id: int :param png_sticker: PNG image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files » :type png_sticker: pytgbot.api_types.sendable.files.InputFile Returns: :return: Returns the uploaded File on success :rtype: pytgbot.api_types.receivable.media.File """ result = self._upload_sticker_file__make_request(user_id=user_id, png_sticker=png_sticker) return self._upload_sticker_file__process_result(result) # end def upload_sticker_file def create_new_sticker_set(self, user_id, name, title, emojis, png_sticker=None, tgs_sticker=None, webm_sticker=None, contains_masks=None, mask_position=None): """ Use this method to create a new sticker set owned by a user. The bot will be able to edit the sticker set thus created. You must use exactly one of the fields png_sticker, tgs_sticker, or webm_sticker. Returns True on success. https://core.telegram.org/bots/api#createnewstickerset Parameters: :param user_id: User identifier of created sticker set owner :type user_id: int :param name: Short name of sticker set, to be used in t.me/addstickers/ URLs (e.g., animals). Can contain only english letters, digits and underscores. Must begin with a letter, can't contain consecutive underscores and must end in "_by_<bot username>". <bot_username> is case insensitive. 1-64 characters. :type name: str|unicode :param title: Sticker set title, 1-64 characters :type title: str|unicode :param emojis: One or more emoji corresponding to the sticker :type emojis: str|unicode Optional keyword parameters: :param png_sticker: PNG image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files » :type png_sticker: pytgbot.api_types.sendable.files.InputFile | str|unicode :param tgs_sticker: TGS animation with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#animated-sticker-requirements for technical requirements :type tgs_sticker: pytgbot.api_types.sendable.files.InputFile :param webm_sticker: WEBM video with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#video-sticker-requirements for technical requirements :type webm_sticker: pytgbot.api_types.sendable.files.InputFile :param contains_masks: Pass True, if a set of mask stickers should be created :type contains_masks: bool :param mask_position: A JSON-serialized object for position where the mask should be placed on faces :type mask_position: pytgbot.api_types.receivable.stickers.MaskPosition Returns: :return: Returns True on success :rtype: bool """ result = self._create_new_sticker_set__make_request(user_id=user_id, name=name, title=title, emojis=emojis, png_sticker=png_sticker, tgs_sticker=tgs_sticker, webm_sticker=webm_sticker, contains_masks=contains_masks, mask_position=mask_position) return self._create_new_sticker_set__process_result(result) # end def create_new_sticker_set def add_sticker_to_set(self, user_id, name, emojis, png_sticker=None, tgs_sticker=None, webm_sticker=None, mask_position=None): """ Use this method to add a new sticker to a set created by the bot. You must use exactly one of the fields png_sticker, tgs_sticker, or webm_sticker. Animated stickers can be added to animated sticker sets and only to them. Animated sticker sets can have up to 50 stickers. Static sticker sets can have up to 120 stickers. Returns True on success. https://core.telegram.org/bots/api#addstickertoset Parameters: :param user_id: User identifier of sticker set owner :type user_id: int :param name: Sticker set name :type name: str|unicode :param emojis: One or more emoji corresponding to the sticker :type emojis: str|unicode Optional keyword parameters: :param png_sticker: PNG image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files » :type png_sticker: pytgbot.api_types.sendable.files.InputFile | str|unicode :param tgs_sticker: TGS animation with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#animated-sticker-requirements for technical requirements :type tgs_sticker: pytgbot.api_types.sendable.files.InputFile :param webm_sticker: WEBM video with the sticker, uploaded using multipart/form-data. See https://core.telegram.org/stickers#video-sticker-requirements for technical requirements :type webm_sticker: pytgbot.api_types.sendable.files.InputFile :param mask_position: A JSON-serialized object for position where the mask should be placed on faces :type mask_position: pytgbot.api_types.receivable.stickers.MaskPosition Returns: :return: Returns True on success :rtype: bool """ result = self._add_sticker_to_set__make_request(user_id=user_id, name=name, emojis=emojis, png_sticker=png_sticker, tgs_sticker=tgs_sticker, webm_sticker=webm_sticker, mask_position=mask_position) return self._add_sticker_to_set__process_result(result) # end def add_sticker_to_set def set_sticker_position_in_set(self, sticker, position): """ Use this method to move a sticker in a set created by the bot to a specific position. Returns True on success. https://core.telegram.org/bots/api#setstickerpositioninset Parameters: :param sticker: File identifier of the sticker :type sticker: str|unicode :param position: New sticker position in the set, zero-based :type position: int Returns: :return: Returns True on success :rtype: bool """ result = self._set_sticker_position_in_set__make_request(sticker=sticker, position=position) return self._set_sticker_position_in_set__process_result(result) # end def set_sticker_position_in_set def delete_sticker_from_set(self, sticker): """ Use this method to delete a sticker from a set created by the bot. Returns True on success. https://core.telegram.org/bots/api#deletestickerfromset Parameters: :param sticker: File identifier of the sticker :type sticker: str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._delete_sticker_from_set__make_request(sticker=sticker) return self._delete_sticker_from_set__process_result(result) # end def delete_sticker_from_set def set_sticker_set_thumb(self, name, user_id, thumb=None): """ Use this method to set the thumbnail of a sticker set. Animated thumbnails can be set for animated sticker sets only. Video thumbnails can be set only for video sticker sets only. Returns True on success. https://core.telegram.org/bots/api#setstickersetthumb Parameters: :param name: Sticker set name :type name: str|unicode :param user_id: User identifier of the sticker set owner :type user_id: int Optional keyword parameters: :param thumb: A PNG image with the thumbnail, must be up to 128 kilobytes in size and have width and height exactly 100px, or a TGS animation with the thumbnail up to 32 kilobytes in size; see https://core.telegram.org/stickers#animated-sticker-requirements for animated sticker technical requirements, or a WEBM video with the thumbnail up to 32 kilobytes in size; see https://core.telegram.org/stickers#video-sticker-requirements for video sticker technical requirements. Pass a file_id as a String to send a file that already exists on the Telegram servers, pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files ». Animated sticker set thumbnails can't be uploaded via HTTP URL. :type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode Returns: :return: Returns True on success :rtype: bool """ result = self._set_sticker_set_thumb__make_request(name=name, user_id=user_id, thumb=thumb) return self._set_sticker_set_thumb__process_result(result) # end def set_sticker_set_thumb def answer_inline_query(self, inline_query_id, results, cache_time=None, is_personal=None, next_offset=None, switch_pm_text=None, switch_pm_parameter=None): """ Use this method to send answers to an inline query. On success, True is returned.No more than 50 results per query are allowed. https://core.telegram.org/bots/api#answerinlinequery Parameters: :param inline_query_id: Unique identifier for the answered query :type inline_query_id: str|unicode :param results: A JSON-serialized array of results for the inline query :type results: list of pytgbot.api_types.sendable.inline.InlineQueryResult Optional keyword parameters: :param cache_time: The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. :type cache_time: int :param is_personal: Pass True, if results may be cached on the server side only for the user that sent the query. By default, results may be returned to any user who sends the same query :type is_personal: bool :param next_offset: Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don't support pagination. Offset length can't exceed 64 bytes. :type next_offset: str|unicode :param switch_pm_text: If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter :type switch_pm_text: str|unicode :param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed.Example: An inline bot that sends YouTube videos can ask the user to connect the bot to their YouTube account to adapt search results accordingly. To do this, it displays a 'Connect your YouTube account' button above the results, or even before showing any. The user presses the button, switches to a private chat with the bot and, in doing so, passes a start parameter that instructs the bot to return an OAuth link. Once done, the bot can offer a switch_inline button so that the user can easily return to the chat where they wanted to use the bot's inline capabilities. :type switch_pm_parameter: str|unicode Returns: :return: On success, True is returned :rtype: bool """ result = self._answer_inline_query__make_request(inline_query_id=inline_query_id, results=results, cache_time=cache_time, is_personal=is_personal, next_offset=next_offset, switch_pm_text=switch_pm_text, switch_pm_parameter=switch_pm_parameter) return self._answer_inline_query__process_result(result) # end def answer_inline_query def send_invoice(self, chat_id, title, description, payload, provider_token, currency, prices, max_tip_amount=None, suggested_tip_amounts=None, start_parameter=None, provider_data=None, photo_url=None, photo_size=None, photo_width=None, photo_height=None, need_name=None, need_phone_number=None, need_email=None, need_shipping_address=None, send_phone_number_to_provider=None, send_email_to_provider=None, is_flexible=None, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send invoices. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendinvoice Parameters: :param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername) :type chat_id: int | str|unicode :param title: Product name, 1-32 characters :type title: str|unicode :param description: Product description, 1-255 characters :type description: str|unicode :param payload: Bot-defined invoice payload, 1-128 bytes. This will not be displayed to the user, use for your internal processes. :type payload: str|unicode :param provider_token: Payments provider token, obtained via Botfather :type provider_token: str|unicode :param currency: Three-letter ISO 4217 currency code, see more on currencies :type currency: str|unicode :param prices: Price breakdown, a JSON-serialized list of components (e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.) :type prices: list of pytgbot.api_types.sendable.payments.LabeledPrice Optional keyword parameters: :param max_tip_amount: The maximum accepted amount for tips in the smallest units of the currency (integer, not float/double). For example, for a maximum tip of US$ 1.45 pass max_tip_amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). Defaults to 0 :type max_tip_amount: int :param suggested_tip_amounts: A JSON-serialized array of suggested amounts of tips in the smallest units of the currency (integer, not float/double). At most 4 suggested tip amounts can be specified. The suggested tip amounts must be positive, passed in a strictly increased order and must not exceed max_tip_amount. :type suggested_tip_amounts: list of int :param start_parameter: Unique deep-linking parameter. If left empty, forwarded copies of the sent message will have a Pay button, allowing multiple users to pay directly from the forwarded message, using the same invoice. If non-empty, forwarded copies of the sent message will have a URL button with a deep link to the bot (instead of a Pay button), with the value used as the start parameter :type start_parameter: str|unicode :param provider_data: A JSON-serialized data about the invoice, which will be shared with the payment provider. A detailed description of required fields should be provided by the payment provider. :type provider_data: str|unicode :param photo_url: URL of the product photo for the invoice. Can be a photo of the goods or a marketing image for a service. People like it better when they see what they are paying for. :type photo_url: str|unicode :param photo_size: Photo size :type photo_size: int :param photo_width: Photo width :type photo_width: int :param photo_height: Photo height :type photo_height: int :param need_name: Pass True, if you require the user's full name to complete the order :type need_name: bool :param need_phone_number: Pass True, if you require the user's phone number to complete the order :type need_phone_number: bool :param need_email: Pass True, if you require the user's email address to complete the order :type need_email: bool :param need_shipping_address: Pass True, if you require the user's shipping address to complete the order :type need_shipping_address: bool :param send_phone_number_to_provider: Pass True, if user's phone number should be sent to provider :type send_phone_number_to_provider: bool :param send_email_to_provider: Pass True, if user's email address should be sent to provider :type send_email_to_provider: bool :param is_flexible: Pass True, if the final price depends on the shipping method :type is_flexible: bool :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: A JSON-serialized object for an inline keyboard. If empty, one 'Pay total price' button will be shown. If not empty, the first button must be a Pay button. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_invoice__make_request(chat_id=chat_id, title=title, description=description, payload=payload, provider_token=provider_token, currency=currency, prices=prices, max_tip_amount=max_tip_amount, suggested_tip_amounts=suggested_tip_amounts, start_parameter=start_parameter, provider_data=provider_data, photo_url=photo_url, photo_size=photo_size, photo_width=photo_width, photo_height=photo_height, need_name=need_name, need_phone_number=need_phone_number, need_email=need_email, need_shipping_address=need_shipping_address, send_phone_number_to_provider=send_phone_number_to_provider, send_email_to_provider=send_email_to_provider, is_flexible=is_flexible, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_invoice__process_result(result) # end def send_invoice def answer_shipping_query(self, shipping_query_id, ok, shipping_options=None, error_message=None): """ If you sent an invoice requesting a shipping address and the parameter is_flexible was specified, the Bot API will send an Update with a shipping_query field to the bot. Use this method to reply to shipping queries. On success, True is returned. https://core.telegram.org/bots/api#answershippingquery Parameters: :param shipping_query_id: Unique identifier for the query to be answered :type shipping_query_id: str|unicode :param ok: Specify True if delivery to the specified address is possible and False if there are any problems (for example, if delivery to the specified address is not possible) :type ok: bool Optional keyword parameters: :param shipping_options: Required if ok is True. A JSON-serialized array of available shipping options. :type shipping_options: list of pytgbot.api_types.sendable.payments.ShippingOption :param error_message: Required if ok is False. Error message in human readable form that explains why it is impossible to complete the order (e.g. "Sorry, delivery to your desired address is unavailable'). Telegram will display this message to the user. :type error_message: str|unicode Returns: :return: On success, True is returned :rtype: bool """ result = self._answer_shipping_query__make_request(shipping_query_id=shipping_query_id, ok=ok, shipping_options=shipping_options, error_message=error_message) return self._answer_shipping_query__process_result(result) # end def answer_shipping_query def answer_pre_checkout_query(self, pre_checkout_query_id, ok, error_message=None): """ Once the user has confirmed their payment and shipping details, the Bot API sends the final confirmation in the form of an Update with the field pre_checkout_query. Use this method to respond to such pre-checkout queries. On success, True is returned. Note: The Bot API must receive an answer within 10 seconds after the pre-checkout query was sent. https://core.telegram.org/bots/api#answerprecheckoutquery Parameters: :param pre_checkout_query_id: Unique identifier for the query to be answered :type pre_checkout_query_id: str|unicode :param ok: Specify True if everything is alright (goods are available, etc.) and the bot is ready to proceed with the order. Use False if there are any problems. :type ok: bool Optional keyword parameters: :param error_message: Required if ok is False. Error message in human readable form that explains the reason for failure to proceed with the checkout (e.g. "Sorry, somebody just bought the last of our amazing black T-shirts while you were busy filling out your payment details. Please choose a different color or garment!"). Telegram will display this message to the user. :type error_message: str|unicode Returns: :return: On success, True is returned :rtype: bool """ result = self._answer_pre_checkout_query__make_request(pre_checkout_query_id=pre_checkout_query_id, ok=ok, error_message=error_message) return self._answer_pre_checkout_query__process_result(result) # end def answer_pre_checkout_query def set_passport_data_errors(self, user_id, errors): """ Informs a user that some of the Telegram Passport elements they provided contains errors. The user will not be able to re-submit their Passport to you until the errors are fixed (the contents of the field for which you returned the error must change). Returns True on success. Use this if the data submitted by the user doesn't satisfy the standards your service requires for any reason. For example, if a birthday date seems invalid, a submitted document is blurry, a scan shows evidence of tampering, etc. Supply some details in the error message to make sure the user knows how to correct the issues. https://core.telegram.org/bots/api#setpassportdataerrors Parameters: :param user_id: User identifier :type user_id: int :param errors: A JSON-serialized array describing the errors :type errors: list of pytgbot.api_types.sendable.passport.PassportElementError Returns: :return: Returns True on success :rtype: bool """ result = self._set_passport_data_errors__make_request(user_id=user_id, errors=errors) return self._set_passport_data_errors__process_result(result) # end def set_passport_data_errors def send_game(self, chat_id, game_short_name, disable_notification=None, protect_content=None, reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None): """ Use this method to send a game. On success, the sent Message is returned. https://core.telegram.org/bots/api#sendgame Parameters: :param chat_id: Unique identifier for the target chat :type chat_id: int :param game_short_name: Short name of the game, serves as the unique identifier for the game. Set up your games via Botfather. :type game_short_name: str|unicode Optional keyword parameters: :param disable_notification: Sends the message silently. Users will receive a notification with no sound. :type disable_notification: bool :param protect_content: Protects the contents of the sent message from forwarding and saving :type protect_content: bool :param reply_to_message_id: If the message is a reply, ID of the original message :type reply_to_message_id: int :param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found :type allow_sending_without_reply: bool :param reply_markup: A JSON-serialized object for an inline keyboard. If empty, one 'Play game_title' button will be shown. If not empty, the first button must launch the game. :type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup Returns: :return: On success, the sent Message is returned :rtype: pytgbot.api_types.receivable.updates.Message """ result = self._send_game__make_request(chat_id=chat_id, game_short_name=game_short_name, disable_notification=disable_notification, protect_content=protect_content, reply_to_message_id=reply_to_message_id, allow_sending_without_reply=allow_sending_without_reply, reply_markup=reply_markup) return self._send_game__process_result(result) # end def send_game def set_game_score(self, user_id, score, force=None, disable_edit_message=None, chat_id=None, message_id=None, inline_message_id=None): """ Use this method to set the score of the specified user in a game message. On success, if the message is not an inline message, the Message is returned, otherwise True is returned. Returns an error, if the new score is not greater than the user's current score in the chat and force is False. https://core.telegram.org/bots/api#setgamescore Parameters: :param user_id: User identifier :type user_id: int :param score: New score, must be non-negative :type score: int Optional keyword parameters: :param force: Pass True, if the high score is allowed to decrease. This can be useful when fixing mistakes or banning cheaters :type force: bool :param disable_edit_message: Pass True, if the game message should not be automatically edited to include the current scoreboard :type disable_edit_message: bool :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat :type chat_id: int :param message_id: Required if inline_message_id is not specified. Identifier of the sent message :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode Returns: :return: On success, if the message is not an inline message, the Message is returned, otherwise True is returned. Returns an error, if the new score is not greater than the user's current score in the chat and force is False :rtype: pytgbot.api_types.receivable.updates.Message | bool """ result = self._set_game_score__make_request(user_id=user_id, score=score, force=force, disable_edit_message=disable_edit_message, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id) return self._set_game_score__process_result(result) # end def set_game_score def get_game_high_scores(self, user_id, chat_id=None, message_id=None, inline_message_id=None): """ Use this method to get data for high score tables. Will return the score of the specified user and several of their neighbors in a game. On success, returns an Array of GameHighScore objects. This method will currently return scores for the target user, plus two of their closest neighbors on each side. Will also return the top three users if the user and his neighbors are not among them. Please note that this behavior is subject to change. https://core.telegram.org/bots/api#getgamehighscores Parameters: :param user_id: Target user id :type user_id: int Optional keyword parameters: :param chat_id: Required if inline_message_id is not specified. Unique identifier for the target chat :type chat_id: int :param message_id: Required if inline_message_id is not specified. Identifier of the sent message :type message_id: int :param inline_message_id: Required if chat_id and message_id are not specified. Identifier of the inline message :type inline_message_id: str|unicode Returns: :return: On success, returns an Array of GameHighScore objects :rtype: list of pytgbot.api_types.receivable.game.GameHighScore """ result = self._get_game_high_scores__make_request(user_id=user_id, chat_id=chat_id, message_id=message_id, inline_message_id=inline_message_id) return self._get_game_high_scores__process_result(result) # end def get_game_high_scores # end of generated functions # end class Bot # allow importing the bot as `pytgbot.bot.syncrounous.Bot`. Bot = SyncBot
nilq/baby-python
python
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import math import unittest import unittest.mock import numpy as np from phyre.interface.scene import ttypes as scene_if from phyre import simulator from phyre import creator import phyre.objects_util @creator.define_task def build_task(C): left = C.add('static bar', scale=0.3).set_bottom(0).set_left(10) right = C.add('dynamic bar', scale=0.3).set_bottom(0.8).set_left(left.right) # Always valid. C.update_task(body1=left, body2=right, relationships=[C.SpatialRelationship.LEFT_OF]) @creator.define_task def build_task_for_objects(C): left = C.add('static bar', scale=0.3).set_center_x(50).set_center_y(30).set_angle(-10) right = C.add('dynamic bar', scale=0.2).set_center_x(70).set_center_y(200) # Always valid. C.update_task(body1=left, body2=right, relationships=[C.SpatialRelationship.TOUCHING]) @creator.define_task def build_task_for_jars(C): left = C.add('static jar', scale=0.3).push(50, 30).set_angle(0) right = C.add('dynamic bar', scale=0.2).set_center_x(70).set_center_y(200) # Always valid. C.update_task(body1=left, body2=right, relationships=[C.SpatialRelationship.TOUCHING]) class SimulatorTest(unittest.TestCase): def setUp(self): [self._task] = build_task('test') [self._task_object_test] = build_task_for_objects('test_objects') [self._task_jar_test] = build_task_for_jars('test_jars') # Build a box at position 100, 100. points = [] for dx in range(10): for dy in range(10): points.append((100 + dx, 100 + dy)) self._box_compressed_user_input = (points, None, None) self._box_user_input = simulator.build_user_input(points=points) self._ball_user_input = simulator.build_user_input(balls=[100, 100, 5]) def test_simulate_scene(self): steps = 10 # Not too many steps. scenes = simulator.simulate_scene(self._task.scene, steps=steps) self.assertEqual(len(scenes), steps) def test_simulate_task(self): steps = 200 # Not too many steps, but more than steps_for_solution.. assert steps >= simulator.STEPS_FOR_SOLUTION result = simulator.simulate_task(self._task, steps=steps, stride=1) self.assertEqual(len(result.sceneList), simulator.STEPS_FOR_SOLUTION) # Empty solution should be valid. self.assertEqual(result.isSolution, True) def test_add_user_input_to_scene(self): raise unittest.SkipTest scene = simulator.add_user_input_to_scene(self._task.scene, self._box_user_input) self.assertEqual(len(scene.bodies), 6) self.assertEqual(len(scene.user_input_bodies), 1) def test_add_user_input_to_scene_ball(self): ball = [200, 200, 30] user_input = (None, None, [ball]) scene = simulator.add_user_input_to_scene(self._task.scene, user_input) self.assertEqual(len(scene.bodies), 6) self.assertEqual(len(scene.user_input_bodies), 1) def test_add_empy_user_input_to_scene(self): points = [] scene = simulator.add_user_input_to_scene( self._task.scene, simulator.build_user_input(points)) self.assertEqual(len(scene.bodies), 6) self.assertEqual(len(scene.user_input_bodies), 0) def test_add_input_and_simulate(self): steps = 10 # Check simulate_task_with_input is identical to add_user_input_to_scene # followed by simulate_task. combined_results = simulator.simulate_task_with_input( self._task, self._box_user_input, steps=steps) task = copy.copy(self._task) task.scene = simulator.add_user_input_to_scene(task.scene, self._box_user_input) bl_resuls = simulator.simulate_task(task, steps=steps) self.assertEqual(combined_results, bl_resuls) def test_add_input_and_ponies(self): steps = 10 task_simulation = simulator.simulate_task_with_input( self._task, self._ball_user_input, steps=steps, stride=1) is_solved, had_occlusions, images, scenes = simulator.magic_ponies( self._task, self._ball_user_input, steps=steps, stride=1, need_images=True, need_featurized_objects=True) self.assertEqual(is_solved, task_simulation.isSolution) self.assertEqual(len(images), steps) self.assertEqual(len(task_simulation.sceneList), steps) self.assertEqual( had_occlusions, task_simulation.sceneList[0].user_input_status == scene_if.UserInputStatus.HAD_OCCLUSIONS) # Check images match target scenes self.assertFalse( np.array_equal( images[0], simulator.scene_to_raster(task_simulation.sceneList[-1]))) self.assertTrue((images[-1] == simulator.scene_to_raster( task_simulation.sceneList[-1])).all()) # Test just images works _, _, only_images, _ = simulator.magic_ponies( self._task, self._ball_user_input, steps=steps, stride=1, need_images=True, need_featurized_objects=False) self.assertTrue(np.array_equal(images, only_images)) # Test just scenes works _, _, _, only_scenes = simulator.magic_ponies( self._task, self._ball_user_input, steps=steps, stride=1, need_images=False, need_featurized_objects=True) self.assertTrue(np.array_equal(scenes, only_scenes)) def test_is_solution_valid(self): steps = 200 assert steps >= simulator.STEPS_FOR_SOLUTION # Empty solution should be valid. self.assertTrue( simulator.magic_ponies(self._task, self._box_compressed_user_input, steps=steps)[0]) def test_render(self): array = simulator.scene_to_raster(self._task.scene) self.assertEqual(len(array.shape), 2) self.assertEqual(array.shape[0], self._task.scene.height) self.assertEqual(array.shape[1], self._task.scene.width) def test_render_with_input(self): scene = simulator.simulate_task_with_input(self._task, self._box_user_input, steps=1).sceneList[0] array = simulator.scene_to_raster(scene) self.assertEqual(len(array.shape), 2) self.assertEqual(array.shape[0], self._task.scene.height) self.assertEqual(array.shape[1], self._task.scene.width) def test_add_input_and_simulate_strided(self): steps = 10 full_results = simulator.simulate_task_with_input(self._task, self._box_user_input, stride=1, steps=steps) strided_results = simulator.simulate_task_with_input( self._task, self._box_user_input, stride=3, steps=steps) self.assertEqual(len(full_results.sceneList), steps) self.assertEqual(len(strided_results.sceneList), math.ceil(steps / 3)) self.assertEqual(len(full_results.solvedStateList), steps) self.assertEqual(len(strided_results.solvedStateList), math.ceil(steps / 3)) for i in range(0, steps, 3): self.assertEqual(full_results.sceneList[i], strided_results.sceneList[i // 3]) self.assertEqual(full_results.solvedStateList[i], strided_results.solvedStateList[i // 3]) def test_batched_magic_ponies(self): steps = 61 workers = 3 is_solved, _, images, _ = simulator.batched_magic_ponies( [self._task] * 100, [self._box_compressed_user_input] * 100, workers, steps, need_images=True) self.assertEqual(len(is_solved), 100) self.assertEqual(len(images), 100) self.assertEqual(images[0].shape, (2, 256, 256)) self.assertEqual(images[1].shape, (2, 256, 256)) def test_magic_ponies_objects(self): steps = 1 _, _, _, objects = simulator.magic_ponies(self._task_object_test, self._ball_user_input, steps=steps, stride=1, need_images=False, need_featurized_objects=True) ideal_vector = np.array([[ 50 / 256., 30 / 256., 350. / 360., 0.3, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0 ], [70 / 256., 200 / 256., 0.0, 0.2, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0], [ 100 / 256., 100 / 256., 0, 3.9062500e-02, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0 ]]) np.testing.assert_allclose(ideal_vector, objects[0], atol=1e-3) def test_magic_ponies_jars(self): def mock_center_of_mass(**kwargs): return (0, kwargs['diameter']) with unittest.mock.patch.object( phyre.creator.shapes.Jar, 'center_of_mass', side_effect=mock_center_of_mass) as mock_method: steps = 1 _, _, _, objects = simulator.magic_ponies( self._task_jar_test, self._ball_user_input, steps=steps, stride=1, need_images=False, need_featurized_objects=True) diameter = phyre.creator.shapes.Jar._diameter( **phyre.creator.shapes.Jar.default_sizes(0.3)) ideal_vector = np.array([[ 50 / 256., 30 / 256. + diameter / 256., 0.0, diameter / 256., 0, 0, 1, 0, 0, 0, 0, 1, 0, 0 ], [70 / 256., 200 / 256., 0.0, 0.2, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0], [ 100 / 256., 100 / 256., 0, 3.9062500e-02, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0 ]]) np.testing.assert_allclose(ideal_vector, objects[0], atol=1e-3) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
from math import trunc num = float(input("Digite um valor: ")) print("\nO valor digitado foi {} e a sua porção inteira é {}".format(num, trunc(num)))
nilq/baby-python
python
from PyQt5 import QtWidgets import difflib from nixui.graphics import generic_widgets class DiffedOptionListSelector(generic_widgets.ScrollListStackSelector): ItemCls = generic_widgets.OptionListItem # TODO: remove break dependency with generic_widgets.py def __init__(self, updates, *args, **kwargs): self.updates_map = { attr: ( old_definition.expression_string if old_definition is not None else None, new_definition.expression_string if new_definition is not None else None ) for attr, (old_definition, new_definition) in updates.items() } super().__init__(*args, **kwargs) # hack: make text box 3x the width of the list view self.stack.setMinimumWidth(self.item_list.width() * 3) def insert_items(self): for option in self.updates_map: it = self.ItemCls(option) self.item_list.addItem(it) def change_selected_item(self): option = self.item_list.currentItem().option old_value, new_value = self.updates_map[option] diff = difflib.unified_diff( old_value.splitlines(1), new_value.splitlines(1), lineterm='' ) # blank lines and control lines diff = [line.strip() for line in diff][3:] diff_str = '\n'.join(diff) view = QtWidgets.QPlainTextEdit(diff_str) view.setReadOnly(True) # monospace font = view.document().defaultFont() font.setFamily("Courier New") view.document().setDefaultFont(font) old_widget = self.current_widget self.stack.addWidget(view) self.stack.setCurrentWidget(view) self.stack.removeWidget(old_widget) self.current_widget = view class DiffDialogBase(QtWidgets.QDialog): def __init__(self, statemodel, *args, **kwargs): super().__init__(*args, **kwargs) self.statemodel = statemodel diff_table = DiffedOptionListSelector(statemodel.get_diffs()) layout = QtWidgets.QVBoxLayout() layout.addWidget(diff_table) layout.addWidget(self.init_btn_box()) self.setSizePolicy(QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum) self.setLayout(layout) class DiffDialog(DiffDialogBase): def init_btn_box(self): btn_box = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok) btn_box.accepted.connect(self.accept) return btn_box class SaveDialog(DiffDialogBase): def init_btn_box(self): btn_box = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Save) btn_box.accepted.connect(self.save) btn_box.rejected.connect(self.reject) return btn_box def save(self): self.statemodel.persist_changes() self.accept()
nilq/baby-python
python
""" This file will parse the input text file and get important knowledge from it and create a database known as Knowledge Base """ import json import os from engine.components.knowledge import Knowledge from engine.logger.logger import Log class KnowledgeBaseParser: """ Class the parse the file and create the Knowledge object list Attributes ---------- __knowledgeBase : list list of the Knowledge objects """ def __init__(self): self.__knowledgeBase = list() def __parseInputFile(self, inputFile): """ Reads the `knowledge.json` and retrieves the target and the rules for the target Parameters ---------- inputFile : str name and path of the file to parsse Returns ------- list list of the Knowledge objects """ # checking if the file exists if os.path.isfile(inputFile) is False: Log.e(f"Knowledge file {inputFile} does not exists") return # reading the file with open(inputFile, "r") as file: file = json.load(file) for knowledge in file['target']: knowledgeBase = Knowledge() for rule in knowledge['rules']: knowledgeBase.addRule(target=knowledge['name'], rule=knowledge['rules'][rule]) self.__knowledgeBase.append(knowledgeBase) return self.__knowledgeBase def getKnowledgeBase(self, inputFile): """ Parsing the input file and returning the list Parameters ---------- inputFile : str name and path of the file to parse Returns ------- list list of the Knowledge objects """ return self.__parseInputFile(inputFile)
nilq/baby-python
python
# Generated by Django 3.1.2 on 2021-04-23 20:30 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('recruiters', '0002_auto_20210423_2028'), ] operations = [ migrations.RemoveField( model_name='job', name='slug', ), ]
nilq/baby-python
python
""" Ringing artifact reduction example ================================== This example shows how to subtract the impulse response from a filter to reduce ringing artifacts. """ import matplotlib.pyplot as plt import numpy as np from scipy.signal import butter, lfilter from meegkit.detrend import reduce_ringing # import config # plotting utils np.random.seed(9) ############################################################################### # Detrending # ============================================================================= ############################################################################### # Basic example with a linear trend # ----------------------------------------------------------------------------- # Simulate the effect of filtering a signal containing a discontinuity, and try # to remove the resulting ringing artifact by subtracing the opposite of the # impulse response. x = np.arange(1000) < 1 [b, a] = butter(6, 0.2) # Butterworth filter design x = lfilter(b, a, x) * 50 # Filter data using above filter x = np.roll(x, 500) x = x[:, None] + np.random.randn(1000, 2) y = reduce_ringing(x, samples=np.array([500])) plt.figure() plt.plot(x + np.array([-10, 10]), 'C0', label='before') plt.plot(y + np.array([-10, 10]), 'C1:', label='after') plt.legend() plt.show()
nilq/baby-python
python
import numpy as np import statsmodels.formula.api as smf from patsy import dmatrix, build_design_matrices from pandas import DataFrame class QuantileSpline: def __init__(self, quantiles=0.5, df=3): self.quantiles = quantiles self.df = df self.label = 'Quantile Spline' self.filename = 'spline' def fit(self, X, y): # Build the design matrix via a tensor basis expansion of natural spline bases data = {'x{}'.format(i+1): x for i, x in enumerate(X.T)} design_matrix = dmatrix("te(" + ",".join(['cr(x{}, df={})'.format(i+1, self.df) for i in range(X.shape[1])]) + ", constraints='center')", data) # Save the design information for future predictions self.design_info = design_matrix.design_info # Fit the model using the basis mod = smf.quantreg('y ~ x - 1', {'y': y, 'x': design_matrix}) if np.isscalar(self.quantiles): self.model = mod.fit(q=self.quantiles) else: self.model = [mod.fit(q=q) for q in self.quantiles] def predict(self, X): data = {'x{}'.format(i+1): x for i, x in enumerate(X.T)} design_matrix = build_design_matrices([self.design_info], data)[0] if np.isscalar(self.quantiles): return self.model.predict({'x': design_matrix}) return np.array([m.predict({'x': design_matrix}) for m in self.model]).T
nilq/baby-python
python
# Copyright 2008 German Aerospace Center (DLR) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Handling of principals for ACEs according to WebDAV ACP specification. """ from pyomni.webdav import Constants from pyomni.webdav.Connection import WebdavError __version__ = "$LastChangedRevision$" class Principal(object): """ This class provides functionality for handling principals according to the WebDAV ACP. @ivar displayname: Name of the principal for output @type displayname: C{string} @ivar principalURL: URL under which the principal can be referenced on the server. @type principalURL: C{string} @ivar property: Information on type of a pseudo/jproperty principal, e. g. DAV:owner, DAV:authenticated, etc. @type property: C{string} @cvar _TAG_LIST_PRINCIPALS: List of allowed XML tags within a principal declaration. @type _TAG_LIST_PRINCIPALS: C{tuple} of C{string}s @cvar _TAG_LIST_STATUS: List of XML tags for the status of a pseudo principal. @type _TAG_LIST_STATUS: C{tuple} of C{string}s """ # some local constants for this class to make things easier/more readable: _TAG_LIST_PRINCIPALS = (Constants.TAG_HREF, # directly by URL Constants.TAG_ALL, Constants.TAG_AUTHENTICATED, Constants.TAG_UNAUTHENTICATED, # by log-in status Constants.TAG_PROPERTY, # for property info, e. g. 'owner' Constants.TAG_SELF, # only if the resource is the principal itself Constants.TAG_PROP) # contains property info like 'displayname' _TAG_LIST_STATUS = (Constants.TAG_ALL, Constants.TAG_AUTHENTICATED, Constants.TAG_UNAUTHENTICATED) # restrict instance variables __slots__ = ('displayname', 'principalURL', 'property') def __init__(self, domroot=None, displayname=None, principalURL=None): """ Constructor should be called with either no parameters (create blank Principal), one parameter (a DOM tree), or two parameters (displayname and URL or property tag). @param domroot: A DOM tree (default: None). @type domroot: L{webdav.WebdavResponse.Element} object @param displayname: The display name of a principal (default: None). @type displayname: C{string} @param principalURL: The URL representing a principal (default: None). @type principalURL: C{string} @raise WebdavError: When non-valid parameters or sets of parameters are passed a L{WebdavError} is raised. """ self.displayname = None self.principalURL = None self.property = None if domroot: for child in domroot.children: if child.ns == Constants.NS_DAV and (child.name in self._TAG_LIST_PRINCIPALS): if child.name == Constants.TAG_PROP: self.displayname = \ child.find(Constants.PROP_DISPLAY_NAME, Constants.NS_DAV) elif child.name == Constants.TAG_HREF: self.principalURL = child.textof() if self.principalURL and self.property in self._TAG_LIST_STATUS: raise WebdavError('Principal cannot contain a URL and "%s"' % (self.property)) elif child.name == Constants.TAG_PROPERTY: if child.count() == 1: if self.property: raise WebdavError('Property for principal has already been set: old "%s", new "%s"' \ % (self.property, child.pop().name)) elif self.principalURL: raise WebdavError('Principal cannot contain a URL and "%s"' % (self.property)) else: self.property = child.pop().name else: raise WebdavError("There should be only one value in the property for a principal, we have: %s" \ % child.name) else: if self.property: raise WebdavError('Property for principal has already been set: old "%s", new "%s"' \ % (self.property, child.name)) else: self.property = child.name if self.principalURL and self.property in self._TAG_LIST_STATUS: raise WebdavError('Principal cannot contain a URL and "%s"' % (self.property)) else: # This shouldn't happen, something's wrong with the DOM tree raise WebdavError('Non-valid tag in principal DOM tree for constructor: %s' % child.name) elif displayname == None or principalURL == None: if displayname: self.displayname = displayname if principalURL: self.principalURL = principalURL else: # This shouldn't happen, someone screwed up with the params ... raise WebdavError('Non-valid parameters handed to Principal constructor.') def __cmp__(self, other): if not isinstance(other, Principal): return 1 if self.displayname == other.displayname \ and self.principalURL == other.principalURL \ and self.property == other.property: return 0 else: return 1 def __repr__(self): return '<class Principal: displayname: "%s", principalURL: "%s", property: "%s">' \ % (self.displayname, self.principalURL, self.property) def copy(self, other): """Copy Principal object. @param other: Another principal to copy. @type other: L{Principal} object @raise WebdavError: When an object that is not a L{Principal} is passed a L{WebdavError} is raised. """ if not isinstance(other, Principal): raise WebdavError('Non-Principal object passed to copy method: ' % other.__class__) self.displayname = other.displayname self.principalURL = other.principalURL self.property = other.property def isValid(self): """ Checks whether necessarry props for principal are set. @return: Validity of principal. @rtype: C{bool} """ return (self.displayname and (self.principalURL or self.property) and not (self.principalURL and self.property)) def toXML(self, invert=False, displayname=False, defaultNameSpace=None): """Returns string of Principal content in valid XML as described in WebDAV ACP. @param defaultNameSpace: Name space (default: None). @type defaultNameSpace: C(string) @param invert: True if principal should be inverted (default: False). @type invert: C{bool} @param displayname: True if displayname should be in output (default: False). @type displayname: C{bool} """ # this check is needed for setting principals only: # assert self.isValid(), "principal is not initialized or does not contain valid content!" PRINCIPAL = 'D:' + Constants.TAG_PRINCIPAL res = '' if self.principalURL: res += '<D:%s>%s</D:%s>' % (Constants.TAG_HREF, self.principalURL, Constants.TAG_HREF) elif self.property in self._TAG_LIST_STATUS \ or self.property == Constants.TAG_SELF: res += '<D:%s/>' % (self.property) elif self.property: res += '<D:%s><D:%s/></D:%s>' \ % (Constants.TAG_PROPERTY, self.property, Constants.TAG_PROPERTY) if self.displayname and displayname: res += '<D:%s><D:%s>%s</D:%s></D:%s>' \ % (Constants.TAG_PROP, Constants.PROP_DISPLAY_NAME, self.displayname, Constants.PROP_DISPLAY_NAME, Constants.TAG_PROP) if invert: res = '<D:invert>%s</D:invert>' % (res) return '<%s>%s</%s>' % (PRINCIPAL, res, PRINCIPAL)
nilq/baby-python
python
from django.test import TestCase from . import factories from .. import models class TestCard(TestCase): model = models.Card def test_str(self): """A card's str representation is its name.""" name = 'Leeroy Jenkins' card = factories.CardFactory.create(name=name) self.assertEqual(str(card), name) def test_get_template(self): """The method returns the template name if it has "custom/" already.""" template_name = 'custom/leeroy.html' card = factories.CardFactory.create(template_name=template_name) self.assertEqual(card.get_template(), template_name) def test_get_template_adjustment(self): """The method returns the template name with "custom/" added if necessary.""" template_name = 'leeroy.html' card = factories.CardFactory.create(template_name=template_name) self.assertEqual(card.get_template(), 'custom/' + template_name)
nilq/baby-python
python
from foolbox.zoo import git_cloner import os import hashlib import pytest from foolbox.zoo.git_cloner import GitCloneError def test_git_clone(): # given git_uri = "https://github.com/bethgelab/convex_adversarial.git" expected_path = _expected_path(git_uri) # when path = git_cloner.clone(git_uri) # then assert path == expected_path def test_wrong_git_uri(): git_uri = "git@github.com:bethgelab/non-existing-repo.git" with pytest.raises(GitCloneError): git_cloner.clone(git_uri) def _expected_path(git_uri): home = os.path.expanduser('~') m = hashlib.sha256() m.update(git_uri.encode()) hash = m.hexdigest() expected_path = os.path.join(home, '.foolbox_zoo', hash) return expected_path
nilq/baby-python
python
# pip3 install 'gym[atari,accept-rom-license]==0.22.0' import matplotlib.pyplot as plt import gym from gym import wrappers import random import numpy as np env = gym.make('ALE/MsPacman-v5', render_mode='human') height, width, channels = env.observation_space.shape actions = env.action_space.n episodes = 1 random_model_scores = [] # Saves .mp4 and .json files env = wrappers.Monitor(env, "./stream_test/gym-results", force=True) game_arrays = [] for episode in range(1, episodes+1): state = env.reset() done = False score = 0 while not done: # action = random.choice([0,1,2,3,4,5,6,7,8]) action = env.action_space.sample() n_state, reward, done, info = env.step(action) game_arrays.append(env.render(mode='rgb_array')) # action: int - 0,1,2,3,4,5,6,7,8 # n_state: numpy array - dimensions (210, 160, 3) --> (height, width, RGB channels) # reward: float - 0.0 # done: boolean - True, False # info: dictionary - {'lives': 1, 'episode_frame_number': 1892, 'frame_number': 1892} score += reward print('Episode:{} Score:{}'.format(episode, score)) # env.play() random_model_scores.append(score) env.close() # Get numpy array of game to plot later on game_array = np.stack(game_arrays, axis=3) game_array = np.rollaxis(game_array, -1) print(game_array.shape) # for i in range(20): # plt.imshow(game_array[i,:,:,:]) # plt.show() # Create video stream from numpy arrays in matplotlib # https://ben.bolte.cc/matplotlib-videos
nilq/baby-python
python
from django import template register = template.Library() @register.filter def mul(value, arg): arg = int(arg) return int(value * arg)
nilq/baby-python
python
############################################################ # -*- coding: utf-8 -*- # # # # # # # #### # ## ## # ## # # # # # # # # # # # ### # # ## # ## ## # # # # # # # #### # # Python-based Tool for interaction with the 10micron mounts # GUI with PyQT5 for python # Python v3.6.5 # # Michael Würtenberger # (c) 2016, 2017, 2018 # # Licence APL2.0 # ########################################################### import logging import time import PyQt5 import requests from requests_toolbelt.multipart import encoder from baseclasses import checkIP import json import collections import copy class AstrometryClient: logger = logging.getLogger(__name__) solveData = {'session': '12345', 'allow_commercial_use': 'd', 'allow_modifications': 'd', 'publicly_visible': 'n', 'scale_units': 'arcsecperpix', 'scale_type': 'ev', 'scale_est': 1.3, 'scale_err': 20, 'center_ra': 315, 'center_dec': 68, 'radius': 1, 'downsample_factor': 2, 'use_sextractor': False, 'crpix_center': True, 'parity': 2 } def __init__(self, main, app, data): self.main = main self.app = app self.data = data self.application = dict() self.cancel = False self.mutexCancel = PyQt5.QtCore.QMutex() self.checkIP = checkIP.CheckIP() self.application = { 'AstrometryHost': '192.168.2.161', 'AstrometryPort': 3499, 'URLLogin': '', 'URLAPI': '', 'APIKey': '', 'TimeoutMax': 60, 'Connected': False, 'Available': True, 'Name': 'ASTROMETRY.NET', 'Status': '' } self.app.ui.le_AstrometryHost.editingFinished.connect(self.changeIPSettings) self.app.ui.le_AstrometryPort.editingFinished.connect(self.changeIPSettings) self.app.ui.le_AstrometryAPIKey.editingFinished.connect(self.changeIPSettings) def initConfig(self): try: if 'AstrometryTimeout' in self.app.config: self.app.ui.le_astrometryTimeout.setText(self.app.config['AstrometryTimeout']) if 'AstrometryHost' in self.app.config: self.app.ui.le_AstrometryHost.setText(self.app.config['AstrometryHost']) if 'AstrometryPort' in self.app.config: self.app.ui.le_AstrometryPort.setText(self.app.config['AstrometryPort']) if 'AstrometryAPIKey' in self.app.config: self.app.ui.le_AstrometryAPIKey.setText(self.app.config['AstrometryAPIKey']) if 'AstrometryDownsample' in self.app.config: self.app.ui.astrometryDownsampling.setValue(self.app.config['AstrometryDownsample']) if 'AstrometryRadius' in self.app.config: self.app.ui.astrometryRadius.setValue(self.app.config['AstrometryRadius']) except Exception as e: self.logger.error('Item in config.cfg for astrometry client could not be initialized, error:{0}'.format(e)) finally: pass self.changeIPSettings() def storeConfig(self): self.app.config['AstrometryPort'] = self.app.ui.le_AstrometryPort.text() self.app.config['AstrometryHost'] = self.app.ui.le_AstrometryHost.text() self.app.config['AstrometryAPIKey'] = self.app.ui.le_AstrometryAPIKey.text() self.app.config['AstrometryTimeout'] = self.app.ui.le_astrometryTimeout.text() self.app.config['AstrometryDownsample'] = self.app.ui.astrometryDownsampling.value() self.app.config['AstrometryRadius'] = self.app.ui.astrometryRadius.value() def start(self): pass def stop(self): pass def setCancelAstrometry(self): self.mutexCancel.lock() self.cancel = True self.mutexCancel.unlock() def changeIPSettings(self): self.data['Status'] = 'ERROR' self.data['CONNECTION']['CONNECT'] = 'Off' host = self.app.ui.le_AstrometryHost.text() port = self.app.ui.le_AstrometryPort.text() self.application['AstrometryHost'] = host self.application['AstrometryPort'] = int(port) self.application['URLAPI'] = 'http://' + host + ':' + port + '/api' self.application['URLLogin'] = 'http://' + host + ':' + port + '/api/login' self.application['APIKey'] = self.app.ui.le_AstrometryAPIKey.text() self.application['Name'] = 'Astrometry' self.application['TimeoutMax'] = float(self.app.ui.le_astrometryTimeout.text()) self.app.messageQueue.put('Setting IP address for astrometry to: {0}:{1}\n'.format(self.application['AstrometryHost'], self.application['AstrometryPort'])) self.logger.info('Setting IP address for astrometry to: {0}:{1}, key: {2}'.format(self.application['AstrometryHost'], self.application['AstrometryPort'], self.application['APIKey'])) def getStatus(self): if self.application['URLAPI'] == '': return if self.checkIP.checkIPAvailable(self.application['AstrometryHost'], self.application['AstrometryPort']): self.application['Status'] = 'OK' self.data['CONNECTION']['CONNECT'] = 'On' else: self.data['Status'] = 'ERROR' self.data['CONNECTION']['CONNECT'] = 'Off' def callbackUpload(self, monitor): self.main.astrometrySolvingTime.emit('{0:3d}%'.format(int(monitor.bytes_read / monitor.len * 100))) def solveImage(self, imageParams): self.mutexCancel.lock() self.cancel = False self.mutexCancel.unlock() downsampleFactor = self.app.ui.astrometryDownsampling.value() radius = self.app.ui.astrometryRadius.value() # waiting for start solving timeSolvingStart = time.time() # defining start values errorState = False result = '' response = '' stat = '' submissionID = '' jobID = '' headers = dict() imageParams['Message'] = '' self.main.astrometryStatusText.emit('START') # check if we have the online solver running self.main.astrometrySolvingTime.emit('{0:02.0f}'.format(time.time()-timeSolvingStart)) if self.application['APIKey'] != '': # we have to login with the api key for the online solver to get the session key try: response = requests.post(self.application['URLLogin'], data={'request-json': json.dumps({"apikey": self.application['APIKey']})}, headers={}) result = json.loads(response.text) except Exception as e: self.logger.error('Problem setting api key, error: {0}, result: {1}, response: {2}' .format(e, result, response)) imageParams['Message'] = 'Login with api key failed' errorState = True finally: pass if not errorState: if 'status' in result: if result['status'] == 'error': self.app.messageQueue.put('Get session key for ASTROMETRY.NET failed because: {0}\n'.format(result['errormessage'])) self.logger.error('Get session key failed because: {0}'.format(result['errormessage'])) errorState = True elif result['status'] == 'success': self.solveData['session'] = result['session'] self.app.messageQueue.put('\tSession key for ASTROMETRY.NET is [{0}]\n'.format(result['session'])) else: imageParams['Message'] = 'Malformed result in login procedure' errorState = True else: # local solve runs with dummy session key self.solveData['session'] = '12345' self.main.astrometrySolvingTime.emit('{0:02.0f}'.format(time.time()-timeSolvingStart)) # loop for upload self.main.astrometryStatusText.emit('UPLOAD') # start uploading the data and define the parameters data = copy.copy(self.solveData) data['downsample_factor'] = downsampleFactor # check if you want to use this parameter. if 0, than remove it if radius > 0: data['radius'] = radius else: if 'radius' in data: del data['radius'] data['scale_est'] = float(imageParams['ScaleHint']) # ra is in hours data['center_ra'] = imageParams['RaJ2000'] * 360 / 24 data['center_dec'] = float(imageParams['DecJ2000']) if not errorState: fields = collections.OrderedDict() fields['request-json'] = json.dumps(data) fields['file'] = (imageParams['Imagepath'], open(imageParams['Imagepath'], 'rb'), 'application/octet-stream') encodedMultipart = encoder.MultipartEncoder(fields) monitorMultipart = encoder.MultipartEncoderMonitor(encodedMultipart, self.callbackUpload) try: result = '' response = requests.post(self.application['URLAPI'] + '/upload', data=monitorMultipart, headers={'Content-Type': monitorMultipart.content_type}) result = json.loads(response.text) stat = result['status'] self.logger.info('Result upload: {0}, reply: {1}'.format(result, response)) except Exception as e: self.logger.error('Problem upload, error: {0}, result: {1}, response: {2}'.format(e, result, response)) errorState = True imageParams['Message'] = 'Error upload' finally: pass if not errorState: if stat != 'success': self.logger.warning('Could not upload image to astrometry server, error: {0}'.format(result)) imageParams['Message'] = 'Upload failed' errorState = True else: submissionID = result['subid'] self.main.astrometrySolvingTime.emit('{0:02.0f}'.format(time.time()-timeSolvingStart)) # loop for solve self.main.astrometryStatusText.emit('SOLVE-Sub') # wait for the submission = star detection algorithm to take place while not self.cancel and not errorState: data = {'request-json': ''} headers = {} try: result = '' response = requests.get(self.application['URLAPI'] + '/submissions/{0}' .format(submissionID), data=data, headers=headers) result = json.loads(response.text) self.logger.info('Result submissions: {0}, reply: {1}'.format(result, response)) except Exception as e: self.logger.error('Problem submissions, error: {0}, result: {1}, response: {2}' .format(e, result, response)) errorState = True imageParams['Message'] = 'Error submissions' break finally: pass if 'jobs' in result: jobs = result['jobs'] else: self.logger.error('Problem submissions, job not found, result: {0}, response: {1}'.format(result, response)) errorState = True break if len(jobs) > 0: if jobs[0] is not None: jobID = jobs[0] break if time.time()-timeSolvingStart > self.application['TimeoutMax']: # timeout after timeoutMax seconds errorState = True imageParams['Message'] = 'Timeout' break self.main.astrometrySolvingTime.emit('{0:02.0f}'.format(time.time()-timeSolvingStart)) time.sleep(1) # waiting for the solving results done by jobs are present self.main.astrometryStatusText.emit('SOLVE-Job') while not self.cancel and not errorState: data = {'request-json': ''} headers = {} try: result = '' response = requests.get(self.application['URLAPI'] + '/jobs/{0}' .format(jobID), data=data, headers=headers) result = json.loads(response.text) self.logger.info('Result jobs: {0}, reply: {1}'.format(result, response)) except Exception as e: self.logger.error('Problem jobs, error: {0}, result: {1}, response: {2}'.format(e, result, response)) errorState = True imageParams['Message'] = 'Error jobs' finally: pass if 'status' in result: stat = result['status'] else: self.logger.error('Problem jobs, status not found, result: {0}, response: {1}'.format(result, response)) errorState = True break if stat == 'success': break if stat == 'failure': errorState = True break if time.time()-timeSolvingStart > self.application['TimeoutMax']: # timeout after timeoutMax seconds errorState = True imageParams['Message'] = 'Timeout' break self.main.astrometrySolvingTime.emit('{0:02.0f}'.format(time.time()-timeSolvingStart)) time.sleep(1) # Loop for data self.main.imageSolved.emit() self.main.astrometryStatusText.emit('GET DATA') # now get the solving data and results if not self.cancel and not errorState: try: result = '' response = requests.get(self.application['URLAPI'] + '/jobs/{0}/calibration' .format(jobID), data=data, headers=headers) result = json.loads(response.text) self.logger.info('Result calibration: {0}, reply: {1}'.format(result, response)) imageParams['Solved'] = True imageParams['RaJ2000Solved'] = result['ra'] * 24 / 360 imageParams['DecJ2000Solved'] = result['dec'] imageParams['Scale'] = result['pixscale'] imageParams['Angle'] = result['orientation'] imageParams['TimeTS'] = time.time()-timeSolvingStart self.main.astrometrySolvingTime.emit('{0:02.0f}'.format(time.time()-timeSolvingStart)) imageParams['Message'] = 'Solved with success' except Exception as e: self.logger.error('Problem get calibration data, error: {0}, result: {1}, response: {2}'.format(e, result, response)) imageParams['RaJ2000Solved'] = 0 imageParams['DecJ2000Solved'] = 0 imageParams['Scale'] = 0 imageParams['Angle'] = 0 imageParams['TimeTS'] = time.time()-timeSolvingStart imageParams['Solved'] = False imageParams['Message'] = 'Solve failed' finally: pass else: imageParams['Solved'] = False imageParams['Message'] = 'Solve failed' # finally idle self.main.imageDataDownloaded.emit() self.main.astrometryStatusText.emit('IDLE') self.main.astrometrySolvingTime.emit('')
nilq/baby-python
python
import logging import os import arrow from humanfriendly import parse_size from .api import delete_file, get_all_results, upload_file from .utils_fs import download_file, validate_metadata MAX_SIZE_DEFAULT = '128m' class OHProject: """ Work with an Open Humans Project. """ def __init__(self, master_access_token): self.master_access_token = master_access_token self.project_data = None self.update_data() @staticmethod def _get_member_file_data(member_data): file_data = {} for datafile in member_data['data']: basename = datafile['basename'] if (basename not in file_data or arrow.get(datafile['created']) > arrow.get(file_data[basename]['created'])): file_data[basename] = datafile return file_data def update_data(self): url = ('https://www.openhumans.org/api/direct-sharing/project/' 'members/?access_token={}'.format(self.master_access_token)) results = get_all_results(url) self.project_data = {result['project_member_id']: result for result in results} @classmethod def download_member_project_data(cls, member_data, target_member_dir, max_size=MAX_SIZE_DEFAULT): """ Download files to sync a local dir to match OH member project data. """ logging.debug('Download member project data...') sources_shared = member_data['sources_shared'] file_data = cls._get_member_file_data(member_data) for basename in file_data: # This is using a trick to identify a project's own data in an API # response, without knowing the project's identifier: if the data # isn't a shared data source, it must be the project's own data. if file_data[basename]['source'] in sources_shared: continue target_filepath = os.path.join(target_member_dir, basename) download_file(download_url=file_data[basename]['download_url'], target_filepath=target_filepath, max_bytes=parse_size(max_size)) @classmethod def download_member_shared(cls, member_data, target_member_dir, source=None, max_size=MAX_SIZE_DEFAULT): """ Download files to sync a local dir to match OH member shared data. Files are downloaded to match their "basename" on Open Humans. If there are multiple files with the same name, the most recent is downloaded. """ logging.debug('Download member shared data...') sources_shared = member_data['sources_shared'] file_data = cls._get_member_file_data(member_data) logging.info('Downloading member data to {}'.format(target_member_dir)) for basename in file_data: # If not in sources shared, it's the project's own data. Skip. if file_data[basename]['source'] not in sources_shared: continue # Filter source if specified. Determine target directory for file. if source: if source == file_data[basename]['source']: target_filepath = os.path.join(target_member_dir, basename) else: continue else: source_data_dir = os.path.join(target_member_dir, file_data[basename]['source']) if not os.path.exists(source_data_dir): os.mkdir(source_data_dir) target_filepath = os.path.join(source_data_dir, basename) download_file(download_url=file_data[basename]['download_url'], target_filepath=target_filepath, max_bytes=parse_size(max_size)) def download_all(self, target_dir, source=None, project_data=False, memberlist=None, excludelist=None, max_size=MAX_SIZE_DEFAULT): members = self.project_data.keys() for member in members: if not (memberlist is None) and member not in memberlist: logging.debug('Skipping {}, not in memberlist'.format(member)) continue if excludelist and member in excludelist: logging.debug('Skipping {}, in excludelist'.format(member)) continue member_dir = os.path.join(target_dir, member) if not os.path.exists(member_dir): os.mkdir(member_dir) if project_data: self.download_member_project_data( member_data=self.project_data[member], target_member_dir=member_dir, max_size=max_size) else: self.download_member_shared( member_data=self.project_data[member], target_member_dir=member_dir, source=source, max_size=max_size) @staticmethod def upload_member_from_dir(member_data, target_member_dir, metadata, access_token, mode='default', max_size=MAX_SIZE_DEFAULT): """ Upload files in target directory to an Open Humans member's account. The default behavior is to overwrite files with matching filenames on Open Humans, but not otherwise delete files. If the 'mode' parameter is 'safe': matching filenames will not be overwritten. If the 'mode' parameter is 'sync': files on Open Humans that are not in the local directory will be deleted. """ if not validate_metadata(target_member_dir, metadata): raise ValueError('Metadata should match directory contents!') project_data = {f['basename']: f for f in member_data['data'] if f['source'] not in member_data['sources_shared']} for filename in metadata: if filename in project_data and mode == 'safe': logging.info('Skipping {}, remote exists with matching' ' name'.format(filename)) continue filepath = os.path.join(target_member_dir, filename) remote_file_info = (project_data[filename] if filename in project_data else None) upload_file(target_filepath=filepath, metadata=metadata[filename], access_token=access_token, project_member_id=member_data['project_member_id'], remote_file_info=remote_file_info) if mode == 'sync': for filename in project_data: if filename not in metadata: logging.debug("Deleting {}".format(filename)) delete_file( file_basename=filename, access_token=access_token, project_member_id=member_data['project_member_id'])
nilq/baby-python
python
# ============================================================================= # # Copyright (c) Kitware, Inc. # All rights reserved. # See LICENSE.txt for details. # # This software is distributed WITHOUT ANY WARRANTY; without even # the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the above copyright notice for more information. # # ============================================================================= from ._smtkPybindOperation import * """ Operation parameters are simply instances of smtk.attribute.Attribute. We wrap smtk.operation.Operation.parameters() in a python <_create_api> that constructs a more user-friendly API to the python interface for operation parameters. """ def _create_api(parameters): def bind(instance, name, func): """ Bind the function *func* to *instance* with provided name *name*. The provided *func* should accept the instance as the first argument, i.e. "self". """ bound_method = func.__get__(instance, instance.__class__) setattr(instance, name, bound_method) return bound_method def api_for_item(parameters, item): name = item.name() import re nameList = re.split(r"[^a-zA-Z0-9]", item.name().title()) if hasattr(item, 'value'): set_attr = 'set' + ''.join(nameList) get_attr = nameList[0].lower() if len(nameList) > 1: get_attr += ''.join(nameList[1:]) def setValue(self, *argv): index = 0 if len(argv) > 1: index = argv[0] return self.find(name).setValue(index, argv[-1]) bind(parameters, set_attr, setValue) def value(self, index=0): return self.find(name).value(index) bind(parameters, get_attr, value) if item.isOptional(): enable_attr = 'enable' + ''.join(nameList) enabled_attr = nameList[0].lower() if len(nameList) > 1: enabled_attr += ''.join(nameList[1:]) enabled_attr += 'Enabled' def enable(self, choice): return self.find(name).setIsEnabled(choice) bind(parameters, enable_attr, enable) def isEnabled(self): return self.find(name).isEnabled() bind(parameters, enabled_attr, isEnabled) if hasattr(item, 'isDiscrete'): setindex_attr = 'set' + ''.join(nameList) + 'Index' def setDiscreteIndex(self, index, value): return self.find(name).setDiscreteIndex(index, value) bind(parameters, setindex_attr, setDiscreteIndex) getindex_attr = nameList[0].lower() if len(nameList) > 1: getindex_attr += ''.join(nameList[1:]) getindex_attr += "Index" def discreteIndex(self, index): return self.find(name).discreteIndex(index) bind(parameters, getindex_attr, discreteIndex) for i in range(parameters.numberOfItems()): api_for_item(parameters, parameters.item(i)) def _params(self): params = self._parameters() _create_api(params) return params setattr(Operation, 'parameters', _params) del _params """ Provide a method to register all operations in a module. """ def _registerModuleOperations(self, module): """Register all SMTK operations in a python module to this manager. Note this does not recurse modules; only operations directly inside the module are imported. """ for item in dir(module): try: thing = getattr(module, item) if issubclass(thing, Operation): self.registerOperation(module.__name__, item) except: continue setattr(Manager, 'registerModuleOperations', _registerModuleOperations) del _registerModuleOperations
nilq/baby-python
python
tri = [ [0, 75, 0], [0, 95, 64, 0], [0, 17, 47, 82, 0], [0, 18, 35, 87, 10, 0], [0, 20, 4, 82, 47, 65, 0], [0, 19, 1, 23, 75, 3, 34, 0], [0, 88, 2, 77, 73, 7, 63, 67, 0], [0, 99, 65, 4, 28, 6, 16, 70, 92, 0], [0, 41, 41, 26, 56, 83, 40, 80, 70, 33, 0], [0, 41, 48, 72, 33, 47, 32, 37, 16, 94, 29, 0], [0, 53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14, 0], [0, 70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57, 0], [0, 91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48, 0], [0, 63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31, 0], [0, 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23, 0], ] for i in range(1,len(tri)): for j in range(1,len(tri[i])-1): left = tri[i-1][j-1] right = tri[i-1][j] tri[i][j] = tri[i][j] + max(left, right) print(max(tri[-1]))
nilq/baby-python
python
from .model import ( set_db_path, Model, Field, IntField, DateTimeField, StrField )
nilq/baby-python
python
from typing import Union, Optional import torch from falkon.options import FalkonOptions from falkon.sparse.sparse_tensor import SparseTensor from falkon.utils import TicToc, decide_cuda from falkon.la_helpers import mul_triang, copy_triang, trsm, vec_mul_triang from falkon.utils.tensor_helpers import create_same_stride, is_f_contig, create_fortran from .preconditioner import Preconditioner from .pc_utils import * class FalkonPreconditioner(Preconditioner): r"""Approximated Cholesky Preconditioner for FALKON. The preconditioner is based on the :math:`K_{MM}` kernel between the inducing points. A two step approximation of the inverse matrix via two Cholesky decompositions is performed. Starting with :math:`K_{MM}` we obtain :math:`T = \mathrm{chol}(K_{MM})`. Then we can obtain :math:`A = \mathrm{chol}(\frac{1}{M} T T^\top + \lambda)` via another Cholesky decomposition. Both `T` and `A` are upper triangular: the first gets stored in the upper triangle of the :math:`K_{MM}` matrix (called `fC` in the code), while the second is stored in the lower triangle. Whenever we want to use one of the two triangles we must reset the matrix diagonal, since it is shared between the two matrices. Parameters ----------- penalty : float The regularization parameter for KRR. Must be greater than 0. kernel : falkon.kernels.kernel.Kernel The kernel object. This is used to compute the M*M kernel between inducing points. The kernel matrix is then overwritten by the preconditioner itself. opt : FalkonOptions Additional options to be used in computing the preconditioner. Relevant options are: - pc_epsilon : the jitter to add to the kernel matrix to make it positive-definite and allow Cholesky decomposition. This can be either a float, or a dictionary mapping from torch datatypes (e.g. float32, float64) to an appropriate float. Typically float32 requires more jitter than float64. - cpu_preconditioner : a boolean value which overrides CPU/GPU settings and forces the function to compute the whole preconditioner on the CPU. If set to False, we fall back to the usual CPU/GPU settings (i.e. 'use_cpu' option and the availability of a GPU). """ def __init__(self, penalty: float, kernel, opt: FalkonOptions, weight_vec: torch.Tensor = None): super().__init__() self.params = opt self._use_cuda = decide_cuda(self.params) and not self.params.cpu_preconditioner self._lambda = penalty self.kernel = kernel self.weight_vec = weight_vec self.fC: Optional[torch.Tensor] = None self.dT: Optional[torch.Tensor] = None self.dA: Optional[torch.Tensor] = None def init(self, X: Union[torch.Tensor, SparseTensor]): """Initialize the preconditioner matrix. This method must be called before the preconditioner can be used. Parameters ---------- X : torch.Tensor The (M x D) matrix of Nystroem centers """ dtype = X.dtype dev = X.device if X.is_cuda and not self._use_cuda: raise RuntimeError("use_cuda is set to False, but data is CUDA tensor. " "Check your options.") eps = self.params.pc_epsilon(X.dtype) M = X.size(0) with TicToc("Kernel", debug=self.params.debug): if isinstance(X, torch.Tensor): C = create_same_stride((M, M), X, dtype=dtype, device=dev, pin_memory=self._use_cuda) else: # If sparse tensor we need fortran for kernel calculation C = create_fortran((M, M), dtype=dtype, device=dev, pin_memory=self._use_cuda) self.kernel(X, X, out=C, opt=self.params) if not is_f_contig(C): C = C.T with TicToc("Cholesky 1", debug=self.params.debug): # Compute T: lower(fC) = T.T inplace_add_diag_th(C, eps * M) C = potrf_wrapper(C, clean=False, upper=False, use_cuda=self._use_cuda, opt=self.params) # Save the diagonal which will be overwritten when computing A self.dT = C.diag() with TicToc("Copy triangular", debug=self.params.debug): # Copy lower(fC) to upper(fC): upper(fC) = T. copy_triang(C, upper=False) if self.weight_vec is not None: with TicToc("Add weight to lower triangular", debug = self.params.debug): self.weight_vec.sqrt_() vec_mul_triang(C, self.weight_vec.numpy().reshape(-1), side=0, upper=False) if self._use_cuda: with TicToc("LAUUM", debug=self.params.debug): # Product upper(fC) @ upper(fC).T : lower(fC) = T @ T.T C = lauum_wrapper(C, upper=True, use_cuda=self._use_cuda, opt=self.params) else: with TicToc("LAUUM", debug=self.params.debug): # Product lower(fC).T @ lower(fC) : lower(fC) = T @ T.T C = lauum_wrapper(C, upper=False, use_cuda=self._use_cuda, opt=self.params) with TicToc("Cholesky 2", debug=self.params.debug): # lower(fC) = 1/M * T@T.T mul_triang(C, upper=False, preserve_diag=False, multiplier=1 / M) # lower(fC) = 1/M * T@T.T + lambda * I inplace_add_diag_th(C, self._lambda) # Cholesky on lower(fC) : lower(fC) = A.T C = potrf_wrapper(C, clean=False, upper=False, use_cuda=self._use_cuda, opt=self.params) self.dA = C.diag() self.fC = C def to(self, device): if self.fC is not None: self.fC = self.fC.to(device) if self.dT is not None: self.dT = self.dT.to(device) if self.dA is not None: self.dA = self.dA.to(device) return self @check_init("fC", "dT", "dA") def invA(self, v: torch.Tensor) -> torch.Tensor: r"""Solve the system of equations :math:`Ax = v` for unknown vector :math:`x`. Multiple right-hand sides are supported (by simply passing a 2D tensor for `v`) Parameters ---------- v The right-hand side of the triangular system of equations Returns ------- x The solution, computed with the `trsm` function. See Also -------- :func:`~falkon.preconditioner.pc_utils.trsm` : the function used to solve the system of equations """ inplace_set_diag_th(self.fC, self.dA) return trsm(v, self.fC, alpha=1.0, lower=1, transpose=1) @check_init("fC", "dT", "dA") def invAt(self, v: torch.Tensor) -> torch.Tensor: r"""Solve the system of equations :math:`A^\top x = v` for unknown vector :math:`x`. Multiple right-hand sides are supported (by simply passing a 2D tensor for `v`) Parameters ---------- v The right-hand side of the triangular system of equations Returns ------- x The solution, computed with the `trsm` function. See Also -------- :func:`falkon.preconditioner.pc_utils.trsm` : the function used to solve the system of equations """ inplace_set_diag_th(self.fC, self.dA) return trsm(v, self.fC, alpha=1.0, lower=1, transpose=0) @check_init("fC", "dT", "dA") def invT(self, v: torch.Tensor) -> torch.Tensor: r"""Solve the system of equations :math:`Tx = v` for unknown vector :math:`x`. Multiple right-hand sides are supported (by simply passing a 2D tensor for `v`) Parameters ---------- v The right-hand side of the triangular system of equations Returns ------- x The solution, computed with the `trsm` function. See Also -------- :func:`falkon.preconditioner.pc_utils.trsm` : the function used to solve the system of equations """ inplace_set_diag_th(self.fC, self.dT) return trsm(v, self.fC, alpha=1.0, lower=0, transpose=0) @check_init("fC", "dT", "dA") def invTt(self, v: torch.Tensor) -> torch.Tensor: r"""Solve the system of equations :math:`T^\\top x = v` for unknown vector :math:`x`. Multiple right-hand sides are supported (by simply passing a 2D tensor for `v`) Parameters ---------- v The right-hand side of the triangular system of equations Returns ------- x The solution, computed with the `trsm` function. See Also -------- :func:`falkon.preconditioner.pc_utils.trsm` : the function used to solve the system of equations """ inplace_set_diag_th(self.fC, self.dT) return trsm(v, self.fC, alpha=1.0, lower=0, transpose=1) @check_init("fC", "dT", "dA") def apply(self, v: torch.Tensor) -> torch.Tensor: r"""Solve two systems of equations :math:`ATx = v` for unknown vector :math:`x`. Multiple right-hand sides are supported (by simply passing a 2D tensor for `v`) Parameters ---------- v The right-hand side of the triangular system of equations Returns ------- x The solution, computed with the `trsm` function. See Also -------- :func:`falkon.preconditioner.pc_utils.trsm` : the function used to solve the system of equations """ return self.invT(self.invA(v)) @check_init("fC", "dT", "dA") def apply_t(self, v: torch.Tensor) -> torch.Tensor: r"""Solve two systems of equations :math:`A^\top T^\top x = v` for unknown vector :math:`x`. Multiple right-hand sides are supported (by simply passing a 2D tensor for `v`) Parameters ---------- v The right-hand side of the triangular system of equations Returns ------- x The solution, computed with the `trsm` function. See Also -------- :func:`falkon.preconditioner.pc_utils.trsm` : the function used to solve the system of equations """ return self.invAt(self.invTt(v)) def __str__(self): return f"FalkonPreconditioner(_lambda={self._lambda}, kernel={self.kernel})"
nilq/baby-python
python
from IPython import get_ipython # %% #################### # GRAPH GENERATION # #################### # TODO: remove duplicate of nbIndividuals in viz nbIndividuals = 1000 # number of people in the graph | nombre d'individus dans le graphe initHealthy = 0.85 # proportion of healthy people at start | la proportion de personnes saines à l'intant initial initCured = 0.1 # proportion of cured people at start | proportion de personnes guéries à l'instant initial # The other people are 60% presymptomatic and 40% asymptomatic at start | Les autres personnes sont 40% d'asymptomatiques et 60% de présymptomatiques au départ # graph generation for exponential degrees distribution #------------------------------------------------------ deg_avg = 100 # average number of connexions per person | le nombre moyen de connexions par personne av_household_size = 6 # average size of household | la taille moyenne d'un foyer household_proba = 1 # probability of meeting a person of the same household | la probabilité de contact par jour entre membres d'un même foyer extern_contact_proba = 0.3 # probabilty of meeting a person of a different household | la probabilité de contact par jour entre personne de foyers différents # average contacts per day = 0.3*(100-6) + 1*6 = 34.2 # graph generation with organization in households #------------------------------------------------- household_size = (3, 5) # min and max size of an household (uniform distribution) | extremums de la taille d'un foyer household_link = 1 # probability of contact between members of a household | proba de contact entre membres d'un foyer number_of_households = 300 # 2500 is good but a bit slow | number of households in the community | nombre de foyers dans une communauté community_link = 0.3 # probability of contact across households | proba de contact entre foyers av_deg_by_household = 400 # number of link from a household | nombre moyen de liens depuis un foyer # average external degree of an individual : 400/4 (4 is the average size of an household) # average contacts per day = (400/4)*0.3 + 4 = 34 ############## # APP PARAMS # ############## daysNotif = 0 # number of days the app checks back for contact notification | nombre de jours vérifiés par l'appli pour notifier un contact utilApp = 0.8 # percentage of people having the app | la proportion d'utilisateurs de l'application dans la population générale pDetection = 0.9 # prob. that the app detects a contact | proba que l'appli détecte un contact pReport = 0.9 # prob. that a user reports his symptoms | proba qu'un utilisateur alerte de ses symptômes pReadNotif = 0.8 # probablity of taking a notification into account (ask for a test, quarantine) | proba de prendre en compte une notification (demande de test, quarantaine) pSymptomsNotCovid = 0.005 # every day, everyone sends a notification with prob. pSymptomsNotCovid | chaque jour, tout le monde envoie une notif avec proba PSymptomsNotCovid ############ # POLICIES # ############ # people warn the app immediately after having symptoms | on prévient l'application directement après avoir développé les symptômes warningAfterSymptoms = False # upon notification, an individual asks for a test (with some prob.) # if true, user waits for test results in quarantine, else he goes in quarantine only upon reception of positive test results # | # à la reception d'une notif, l'utilisateur demande un test (avec une certaine proba) # si vrai, il attend les résultats en quarantaine, sinon il ne se met en quarantaine qu'aux résultats d'un test positif quarantineAfterNotification = True ############### # TEST PARAMS # ############### testWindow = (3, 10) # tests are only effective in a given window (time since infection) | les tests ne sont efficaces que dans une fenêtre de temps après infection daysUntilResult = 2 # attente pour l'obtention des résultats pFalseNegative = 0.15 # prob. of false negative | proba d'avoir un faux négatif daysBetweenTests = 0 ############## # QUARANTINE # ############## pQSymptoms = 0.9 # probability of going into quarantine when one has symptoms | proba de confinement lors de détection des symptômes quarantineFactor = 100 # reduction factor applied to the probabilities when one is in quarantine | réduction des probas de rencontre lors du confinement daysQuarantine = 14 # duration of the quarantine | durée de la quarantaine ################# # PROBABILITIES # ################# # !! Probabilities are given for 1 step of the process, thus overall prob. follows a geometric law for which expected values have been calculated # paramters estimated -> a limit of the model pCloseContact = 0.375 # prob. that a contact is a close contact (those detected by the app) | proba qu'un contact soit rapproché (ceux détectés par l'appli) pContaminationCloseContact = 0.02 # prob. of contamination after close contact with an infected person | proba de contamination après contact rapproché avec qqn d'infecté #according to https://www.who.int/docs/default-source/coronaviruse/who-china-joint-mission-on-covid-19-final-report.pdf -> around 1 to 5% of close contact lead to virus transmission pContaminationCloseContactAsymp = 0.006 # infectiousness of asymptomatic people appears to be very low according to [4] and "Temporal dynamics in viral shedding and transmissibility of COVID-19" [6] pContaminationFar = 0.001 # prob. of contamination upon non close contact (environmental or short contact) | proba de contamination par contact environnemental ou bref pContaminationFarAsymp = 0.0003 # we took R0=2 estimate from [4] and : 34 contacts/day, an average time of infectiousness of 10 days (pre symptomatic + begining of symptoms period) #average number of infected by symptomatic : (0.375*0.02+0.625*0.001)*34*10 = 2.76 #average number of infected by asymptomatic : (0.375*0.006+0.625*0.0003)*34*10 = 0.83 # this gives 0.6*2.76 + 0.4*0.83 = 1.99 persons infected in average by an infected # this is plausible given the estimate of R0 and the fact that asymptomatic contamination appears to be minor # [4] and [6] # and (0.6*0.625*0.001 + 0.4*0.625*0.0003)*34*10 / R0 = 0.0765 -> the proportion of contaminations which are not due to close contact (environmental / short contact) (contaminations by asymptomatic people are neglected) estimated according to environmental contamination estimate in [4] # thus most infections (92%) are susceptible to be noticed by the app # -> the proportion of contaminations by asympt. people is : 0.4*0.83/(0.6*2.76 + 0.4*0.0.83) = 0.17 plausible according to the presumed low infectiosity shown in [4], but this is a conservative estimate (not the 0.06 given by this paper) given the high uncertainty around the results pAsympt = 0.4 # probability of being asymptomatic when infected | proba qu'une personne infectée soit asymptomatique # according to [4] and Diamond Princess estimates # parameters for the lognormal law of the incubation period | paramètres pour la loi lognormale de la période d'incubation incubMeanlog = 1.644 # -> ~5.5 days incubSdlog = 0.363 # -> ~2.1 days # according to [4] pAtoG = 0.1 # probability of going from asymptomatic state to cured | proba de passer de asymptomatique à guéri # according to "Clinical characteristics of 24 asymptomatic infections with COVID-19 screened among close contacts in Nanjing, China" [7] pIStoC = 0.07 # probability of going from symptomatic state to cured | proba de passer de avec symptômes à gueri pIStoD = 0.003 # probability of dying when symptomatic | proba de décès d'une personne présentant des symptômes # average time with symptoms : 1/(0.07+0.003) = 13.7 days : plausible according to [4] # death rate when symptoms : 0.003/0.07 = 4.3% : plausible in France according to estimate of 1.6M cases with symptoms and 6 000 deaths the 3 April # https://www.mgfrance.org/publication/communiquepresse/2525-enquete-mg-france-plus-d-un-million-et-demi-de-personnes-prises-en-charge-par-leur-medecin-generaliste-pour-le-covid-19-entre-le-17-mars-et-le-3-avril # # Libs and defs # Librairies import random import numpy as np # -> sliders from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets HEALTHY = 0 ASYMP = 1 PRESYMP = 2 SYMP = 3 CURED = 4 DEAD = 5 class Graph: """ Object holding the representation of the graph and some metrics """ def __init__(self): self.individuals = [] self.adj = [] self.encounters = [[[] for day in range(daysNotif)] for individual in range(nbIndividuals)] self.nbHealthy = 0 # number of healthy people self.nbAS = 0 # number of asymptomatic people self.nbPS = 0 # number of premptomatic people self.nbS = 0 # number of symptomatic people self.nbCured = 0 # number of cured persons self.nbDead = 0 # number of deceased people self.nbQuarantineI = 0 # number of infected people in quarantine self.nbQuarantineNonI = 0 # number of non infected people in quarantine self.nbTest = 0 # number of tests made # cumulative counters : self.nbQuarantineTotal = 0 # number of people in quarantine self.nbInfectedByASPS = 0 # number of people infected by asymp. + presymp. people #to compute Rt self.stepNb = 0 self.contaminations = [] # number of people contaminated at a given time self.numInfectedByNewInfected = [] # total number of people who will get infected by people contaminated at a given time class Individual: """ Object holding the representation of an individual """ def __init__(self, state, daysQuarantine, app, sentNotification, daysIncubation, timeSinceInfection, timeLeftForTestResult): self.state = state self.daysQuarantine = daysQuarantine self.app = app self.sentNotification = sentNotification self.daysIncubation = daysIncubation self.timeSinceInfection = timeSinceInfection self.timeSinceLastTest = np.inf # we don't want to test people too often self.timeLeftForTestResult = timeLeftForTestResult self.nbInfected = 0 def in_state(self, state): return self.state == state def is_infected(self): return self.state in [PRESYMP, ASYMP, SYMP] def has_no_covid(self): return self.state in [HEALTHY, CURED] def in_quarantine(self): return self.daysQuarantine > 0 def go_quarantine(self): if self.daysQuarantine <= 0: self.daysQuarantine = daysQuarantine # goes into quarantine if isn't already # # Graph generation def create_individuals(graph): graph.contaminations.append(0) for i in range(nbIndividuals): app = False if random.uniform(0,1) < utilApp: app = True s = PRESYMP time_since_infection = -1 incub = 0 r = random.random() if r < initHealthy: s = HEALTHY graph.nbHealthy += 1 elif r < initHealthy + initCured: s = CURED graph.nbCured += 1 else: graph.contaminations[0] += 1 # we start as if a proportion of the population just got infected time_since_infection = 0 if random.random() < pAsympt: s = ASYMP graph.nbAS += 1 else: s = PRESYMP incub = round(np.random.lognormal(incubMeanlog, incubSdlog)) graph.nbPS += 1 # state, quarantine, app, notif, incubation, timeSinceInfection, timeLeftForTestResult graph.individuals.append(Individual(s, 0, app, False, incub, time_since_infection, -1)) def init_graph_exp(graph): """ Graph initialisation based on exponential ditribution of degrees """ create_individuals(graph) # affecting degrees to vertices degrees = np.around(np.random.exponential(deg_avg, nbIndividuals)) # to get an even number of total degrees S = sum(degrees) if S%2 == 1: degrees[0] += 1 S += 1 graph.adj = [[] for i in range(nbIndividuals)] while S > 0: # creating an edge [p1, p2] = np.random.choice(len(degrees), 2, replace=False, p=degrees/S) if degrees[p1] <= av_household_size or degrees[p2] <= av_household_size: # the last edges created are edges within households graph.adj[p1].append({"node" : p2, "proba" : household_proba}) graph.adj[p2].append({"node" : p1, "proba" : household_proba}) else: graph.adj[p1].append({"node" : p2, "proba" : extern_contact_proba}) graph.adj[p2].append({"node" : p1, "proba" : extern_contact_proba}) degrees[p1] -= 1 degrees[p2] -= 1 S -= 2 def init_graph_household(graph): """ Graph generation based on households organisation """ global nbIndividuals # creation of the households graph.adj = [] for i in range(number_of_households): size = random.randint(household_size[0], household_size[1]) nb = len(graph.adj) for i in range(nb, nb+size): household = [] for j in range(nb, nb+size): if (i != j): household.append({"node": j, "proba": household_link}) graph.adj.append(household) # linkage of the households for i in range(av_deg_by_household*number_of_households): [p1, p2] = np.random.choice(len(graph.adj), 2, replace=False) graph.adj[p1].append({"node": p2, "proba": community_link}) graph.adj[p2].append({"node": p1, "proba": community_link}) nbIndividuals = len(graph.adj) create_individuals(graph) graph.encounters = [[[] for day in range(daysNotif)] for individual in range(nbIndividuals)] # # Updating the graph def contamination(graph, i, j, closeContact): """ Individuals i and j have come into contact, leading to a possible contamination | Les individus i et j sont entrés en contact, une contamination est possible """ if graph.individuals[i].state == graph.individuals[j].state: return if graph.individuals[i].in_state(HEALTHY): contamination(graph, j, i, closeContact) return # i is the infected individual if graph.individuals[i].is_infected(): if graph.individuals[j].in_state(HEALTHY): if closeContact: pContamination = pContaminationCloseContact pContaminationAsymp = pContaminationCloseContactAsymp else: pContamination = pContaminationFar pContaminationAsymp = pContaminationFarAsymp if (random.random() < pContamination and (not graph.individuals[i].in_state(ASYMP))) or \ (random.random() < pContaminationAsymp and graph.individuals[i].in_state(ASYMP)): # j becomes infected # for Rt computation graph.contaminations[graph.stepNb] += 1 graph.numInfectedByNewInfected[graph.stepNb - graph.individuals[i].timeSinceInfection] += 1 # parent infection took place timeSinceInfection ago if graph.individuals[i].in_state(ASYMP) or graph.individuals[i].in_state(PRESYMP): graph.nbInfectedByASPS += 1 graph.individuals[j].timeSinceInfection = 0 graph.individuals[i].nbInfected += 1 # i has infected one more person graph.nbHealthy -= 1 if random.random() < pAsympt: graph.individuals[j].state = ASYMP graph.nbAS += 1 else: graph.individuals[j].state = PRESYMP graph.individuals[j].daysIncubation = round(np.random.lognormal(incubMeanlog, incubSdlog)) graph.nbPS += 1 def test_individual(individual, graph): # if there is a test incoming, the person is not tested again if individual.timeLeftForTestResult >= 0 or individual.in_state(DEAD): return # the person was tested not long ago if individual.timeSinceLastTest < daysBetweenTests: return # the person is tested individual.timeSinceLastTest = 0 graph.nbTest += 1 individual.timeLeftForTestResult = daysUntilResult if individual.has_no_covid(): individual.latestTestResult = False # we assume that there are no false positives return if individual.timeSinceInfection < testWindow[0] or individual.timeSinceInfection > testWindow[1]: individual.latestTestResult = False # not in the detection window, the test fails return # otherwise the person is ill # the test result depends whether we have a false negative or not individual.latestTestResult = not (random.random() < pFalseNegative) def send_notification(graph, i): """ Send notification to people who have been in touch with i | Envoi d'une notif aux personnes ayant été en contact avec i """ if graph.individuals[i].sentNotification: return # notifications already sent graph.individuals[i].sentNotification = True for daysEncounter in graph.encounters[i]: # note: graph.encounter[i] is empty if i does not have the app so there is no need to have an additional condition for contact in daysEncounter: if random.random() < pReadNotif: # if the person takes the notification into account # the person is always tested (TODO: change this ?) test_individual(graph.individuals[contact], graph) # asks for a test if quarantineAfterNotification: # in this case, the person waits for test results in quarantine graph.individuals[contact].go_quarantine() def make_encounters(graph, i): """ Assess all encounters made by i in one day | Détermine toutes les rencontres faites par i en un jour """ for edge in graph.adj[i]: j = edge['node'] if j < i: continue # only check one way of the edge | on ne regarde qu'un sens de chaque arête # if i and/or j are in quarantine, reduce the probability that they meet | si i et/ou j sont confinés, réduction de leur proba de rencontre factor = 1 if graph.individuals[i].in_quarantine(): factor *= quarantineFactor if graph.individuals[j].in_quarantine(): factor *= quarantineFactor if random.random() < edge['proba'] / factor: if random.random() < pCloseContact: # if this is a close contact # if i and j have the app, we save their encounter | si i et j ont l'appli, on note la rencontre if graph.individuals[i].app and graph.individuals[j].app and random.random() < pDetection: # contact detections are symmetric in our model graph.encounters[i][-1].append(j) graph.encounters[j][-1].append(i) contamination(graph, i, j, True) else: contamination(graph, i, j, False) def step(graph): """ Step from a day to the next day | Passage au jour suivant du graphe """ graph.nbTest = 0 for encounter in graph.encounters: encounter.append([]) # will contain every encounter of the day | contiendra les nouvelles rencontres du jour graph.contaminations.append(0) graph.numInfectedByNewInfected.append(0) ## go through each possible encounter | on constate toutes les rencontres entre individus for i in range(nbIndividuals): make_encounters(graph, i) ## update the states | on met à jour les états des individus for i, individual in enumerate(graph.individuals): if individual.in_state(ASYMP): if random.random() < pAtoG: graph.nbAS -= 1 graph.nbCured += 1 individual.state = CURED elif individual.in_state(PRESYMP): if individual.daysIncubation == 0: # the person develops symptoms graph.nbPS -= 1 graph.nbS += 1 individual.state = SYMP # send the notifications (encounters[i] is empty if i doesn't have the app) | envoi des notifs (encounters[i] vide si i n'a pas l'appli) if random.random() < pReport and warningAfterSymptoms: send_notification(graph, i) if random.random() < pQSymptoms: # go into quarantine if symptoms appear | mise en confinement à la détection des symptômes individual.go_quarantine() test_individual(individual, graph) # all individuals developing symptoms are tested (TODO: add prob. to parameters ?) elif individual.in_state(SYMP): action = random.random() if action < pIStoC: graph.nbS -= 1 graph.nbCured += 1 individual.state = CURED elif action > 1 - pIStoD: graph.nbS -= 1 graph.nbDead += 1 individual.state = DEAD # if warningAfterSymptoms is True, each individual has a probability of sending a false notification due to symptoms that are misinterpreted as from COVID-19 # | si warningAfterSymptoms est vrai, chaque individu a une probabilité d'envoyer une notification en raison de symptômes faussement perçus comme relevant du COVID-19 if warningAfterSymptoms and random.random() < pSymptomsNotCovid: send_notification(graph, i) # reception of test results | réception des résultats de test if individual.timeLeftForTestResult == 0: if individual.in_quarantine() and individual.latestTestResult == False: # is in quarantine and gets a negative test individual.daysQuarantine = 0 # end of quarantine if individual.latestTestResult == True: individual.go_quarantine() individual.timeLeftForTestResult = np.inf # people tested positive are not tested again if random.random() < pReport: # not everyone reports a positive test to the app send_notification(graph, i) individual.app = False # unsubscribe from the app in order to not consider new notifications individual.timeLeftForTestResult -= 1 ## results of the day | bilan du jour graph.nbQuarantineNonI = 0 graph.nbQuarantineI = 0 for individual in graph.individuals: if individual.in_state(DEAD): continue individual.daysQuarantine -= 1 individual.daysIncubation -= 1 individual.timeSinceLastTest += 1 # if there are still symptoms we don't end the quarantine if (not individual.in_quarantine()) and individual.in_state(SYMP): individual.daysQuarantine = 1 if individual.in_quarantine(): graph.nbQuarantineTotal += 1/nbIndividuals if not individual.is_infected(): graph.nbQuarantineNonI += 1 else: graph.nbQuarantineI += 1 if individual.timeSinceInfection >= 0: individual.timeSinceInfection += 1 ## deleting oldest recorded day | suppression du plus vieux jour de l'historique for encounter in graph.encounters: encounter.pop(0) graph.stepNb += 1 # # Display # Interactive model below (it takes about 10-15 sec to appear and to run a simulation) # ! uncomment for the notebook version : # %matplotlib notebook import matplotlib.pyplot as plt fig, ((ax, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=[15,10]) axRt = ax3.twinx() xs = [] y_D = [] y_MS = [] y_MPS = [] y_MAS = [] y_S = [] y_G = [] y_Q = [] y_InfectByASPS = [] y_QuarantineNonI = [] y_QuarantineI = [] y_QuarantineNonITotal = [] y_Test = [] y_TestTotal = [] y_Rt = [] ax.set_ylim([0, nbIndividuals]) def update_viz(graph): if y_QuarantineNonITotal != []: y_QuarantineNonITotal.append((graph.nbQuarantineNonI + nbIndividuals*y_QuarantineNonITotal[-1])/nbIndividuals) y_TestTotal.append((graph.nbTest + nbIndividuals*y_TestTotal[-1])/nbIndividuals) else: y_QuarantineNonITotal.append(graph.nbQuarantineNonI/nbIndividuals) y_TestTotal.append(graph.nbTest/nbIndividuals) xs.append(len(xs)) y_D.append(graph.nbDead/nbIndividuals*100) y_MS.append(graph.nbS/nbIndividuals*100) y_MPS.append(graph.nbPS/nbIndividuals*100) y_MAS.append(graph.nbAS/nbIndividuals*100) y_S.append(graph.nbHealthy/nbIndividuals*100) y_G.append(graph.nbCured/nbIndividuals*100) y_Q.append(graph.nbQuarantineTotal) y_InfectByASPS.append(graph.nbInfectedByASPS) y_QuarantineNonI.append(graph.nbQuarantineNonI/nbIndividuals*100) y_QuarantineI.append(graph.nbQuarantineI/nbIndividuals*100) y_Test.append(graph.nbTest/nbIndividuals*100) def draw_viz(graph): ax.clear() ax2.clear() ax3.clear() ax4.clear() axRt.clear() ax.set_xlabel("Days") ax2.set_xlabel("Days") ax3.set_xlabel("Days") ax4.set_xlabel("Days") # computing Rt | calcul de Rt for i in range(graph.stepNb): if graph.contaminations[i] != 0 and graph.contaminations[i] > 5: # we just take into account days where there were more than 5 contaminations to reduce random fluctuations y_Rt.append(graph.numInfectedByNewInfected[i]/graph.contaminations[i]) else: y_Rt.append(0) for i in range(1, graph.stepNb-1): # smoothing Rt curve if y_Rt[i] == 0: y_Rt[i] = (y_Rt[i-1] + y_Rt[i+1])/2 labels = [ "Symptomatic", "Deceased", "Asymptomatic","Presymptomatic", "Cured", "Healthy"] ax.stackplot(xs, y_MS, y_D, y_MAS,y_MPS, y_G, y_S, labels=labels, edgecolor="black", colors=["red", "darkred", "orange","yellow", "dodgerblue", "mediumseagreen"]) ax.set_ylabel("Proportion of the population") labels2 = ["In quarantine and non infected (percentage)", "In quarantine and infected (percentage)"] ax2.stackplot(xs, y_QuarantineNonI, y_QuarantineI, labels=labels2) ax2.set_ylabel("Proportion of the population") #line, = ax3.plot(xs, y_InfectByASPS) #line.set_label("Total infections by asympt.") ax3.set_ylabel("Quarantine days / Tests") line, = ax3.plot(xs, y_Q) line.set_label("Cumulative quarantine days per person") line, = ax3.plot(xs, y_QuarantineNonITotal) line.set_label("Cumulative quarantine days of healthy people per person") line, = ax3.plot(xs, y_TestTotal) line.set_label("Cumulative number of tests per person") axRt.set_ylabel("Rt", color = 'red') line, = axRt.plot(xs, y_Rt, color = 'red') line.set_label("Rt (average number of infections caused by one infected)") line, = ax4.plot(xs, y_Test) line.set_label("Number of tests (in percentage of population)") ax4.set_ylabel("Tests") ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=3) ax2.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=1) #ax3.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=1) ax3.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=2) #axRt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=1) #to avoid legend on top of the other ax4.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), shadow=True, ncol=2) plt.tight_layout() def update_prob(app_use_rate, report_to_app, read_notif, warning_after_symptoms, quarantine_after_notification): global nbIndividuals global utilApp global pReport global pReadNotif global quarantineAfterNotification global warningAfterSymptoms global xs, y_D, y_MS, y_MPS, y_MAS, y_S, y_G, y_Q, y_InfectByASPS, y_Rt global y_QuarantineNonI, y_QuarantineNonITotal, y_QuarantineI, y_Test, y_TestTotal # TODO: clarify/simplify ? utilApp = app_use_rate pReport = report_to_app pReadNotif = read_notif warningAfterSymptoms = warning_after_symptoms quarantineAfterNotification = quarantine_after_notification nbSteps = 60 nbIndividuals = 4000 # you may change the number of individuals for the exponential distribution graph here graph = Graph() init_graph_household(graph) # default graph generation using households structure, as shown in the Results section # uncomment this to get a graph with degrees following an exponential distribution #init_graph_exp(graph) xs.clear() y_D.clear() y_MS.clear() y_MPS.clear() y_MAS.clear() y_S.clear() y_G.clear() y_Q.clear() y_InfectByASPS.clear() y_QuarantineNonI.clear() y_QuarantineNonITotal.clear() y_QuarantineI.clear() y_Test.clear() y_TestTotal.clear() y_Rt.clear() maxSymp = 0 for step_ind in range(nbSteps): # update matplotlib update_viz(graph) # update simulation step(graph) print(f'Progress : {(100*step_ind/nbSteps):.1f} %') maxSymp = max(maxSymp, graph.nbS) # print("Total individuals:", nbIndividuals) # print("Number of deceased:", graph.nbDead) # print("Max. nb of symptomatic people:", maxSymp) # print("Test per people:", y_TestTotal[-1]) # print("Final healthy:", y_S[-1]) print(maxSymp/nbIndividuals,",", y_S[-1],",", y_Q[-1], ",", y_TestTotal[-1]) draw_viz(graph) plt.show() update_prob(utilApp, pReport, pReadNotif, warningAfterSymptoms, quarantineAfterNotification) # interact_manual(update_prob, \ # app_use_rate = widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=utilApp), \ # report_to_app = widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=pReport), \ # read_notif = widgets.FloatSlider(min=0.0, max=1.0, step=0.01, value=pReadNotif), \ # warning_after_symptoms = widgets.Checkbox(value=warningAfterSymptoms), \ # quarantine_after_notification = widgets.Checkbox(value=quarantineAfterNotification))
nilq/baby-python
python
# -*- coding: utf-8 -*- import time import tempfile,random,string from common.common import BaseService import HTMLParser import imgkit from common import logger class BaseBot(BaseService): type = None ''' 给指定的群组和用户发消息 由于目前,很少给用户发消息,所以,没必要定一个send(user,message)接口 group: QQ群名称、QQ讨论群名称、微信群名称 user: QQ中的昵称,微信群中的昵称 message:消息 ''' def send(self,group,user,message,html): raise NotImplementedError def send_image(self,group,user,img_path): raise NotImplementedError def register(self,groups): raise NotImplementedError #为防止发送过于频繁,等待一个1-3秒的随机数 def random_wait(self): time.sleep(random.random()*3) # 报警的邮件要转成图片发给微信机器人,遇到字体问题, out.jpg图片是乱码,反反复复实践解决: # 2.必须要修改网页,加入 < meta charset = "UTF-8”>到HTML里面 def __insert_meta(self, html): html_parser = HTMLParser.HTMLParser() html = html_parser.unescape(html) # 先把 &quot转成" html_pos = html.find("<head>") if html_pos == -1: logger.warn("无法在邮件的HTML文本中查找到<head>标记,插入meta-charset失败") return html meta = "<meta charset=\"UTF-8\">" html_pos = html_pos + 6 html = html[:html_pos] + meta + html[html_pos:] return html # 将html转成图片,存放到系统临时目录 def html2img(self, html,dir=None): html = self.__insert_meta(html) if dir is None : dir = tempfile.gettempdir() random_file_name = ''.join(random.sample(string.ascii_letters + string.digits, 8)) temp_file = dir + "/" + random_file_name + ".jpg" try: imgkit.from_string(html, temp_file, {"xvfb": "", "encoding": "UTF-8"})#, {"xvfb": "", "encoding": "UTF-8"}) # {"xvfb": "", "encoding": "UTF-8"}这个选项是在Ubuntu上测试的时候发现的 except Exception as e: logger.exception(e, "将HTML转化成图片失败:%s,\n%s", str(e), html) return None return temp_file
nilq/baby-python
python
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import datetime from six import iteritems import frappe from frappe import _ from frappe.utils import flt, formatdate from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges def execute(filters=None): if not filters: filters = {} columns = get_columns(filters) if filters.get("budget_against_filter"): dimensions = filters.get("budget_against_filter") else: dimensions = get_cost_centers(filters) period_month_ranges = get_period_month_ranges(filters["period"], filters["from_fiscal_year"]) cam_map = get_dimension_account_month_map(filters) #frappe.msgprint(dimensions) data = [] for dimension in dimensions: dimension_items = cam_map.get(dimension) #frappe.msgprint(json.loads(dimension_items)) if dimension_items: #frappe.msgprint("Got here") data = get_final_data(dimension, dimension_items, filters, period_month_ranges, data, 0) else: DCC_allocation = frappe.db.sql('''SELECT parent, sum(percentage_allocation) as percentage_allocation FROM `tabDistributed Cost Center` WHERE cost_center IN %(dimension)s AND parent NOT IN %(dimension)s GROUP BY parent''',{'dimension':[dimension]}) if DCC_allocation: filters['budget_against_filter'] = [DCC_allocation[0][0]] cam_map = get_dimension_account_month_map(filters) dimension_items = cam_map.get(DCC_allocation[0][0]) if dimension_items: data = get_final_data(dimension, dimension_items, filters, period_month_ranges, data, DCC_allocation[0][1]) chart = get_chart_data(filters, columns, data) return columns, data, None, chart def get_final_data(dimension, dimension_items, filters, period_month_ranges, data, DCC_allocation): for account, monthwise_data in iteritems(dimension_items): row = [dimension, account] totals = [0, 0, 0, 0] for year in get_fiscal_years(filters): last_total = 0 for relevant_months in period_month_ranges: period_data = [0, 0, 0, 0] for month in relevant_months: if monthwise_data.get(year[0]): month_data = monthwise_data.get(year[0]).get(month, {}) for i, fieldname in enumerate(["target", "actual","commitments", "balance"]): value = flt(month_data.get(fieldname)) period_data[i] += value totals[i] += value period_data[0] += last_total if DCC_allocation: period_data[0] = period_data[0]*(DCC_allocation/100) period_data[1] = period_data[1]*(DCC_allocation/100) if(filters.get("show_cumulative")): last_total = period_data[0] - period_data[1] #COMMITMENT = COMMITED ORDERS - ACTUAL INVOICE AMOUNT period_data[2] = period_data[2] - period_data[1] period_data[3] = period_data[0] - period_data[1] - period_data[2] row += period_data #period_data[2] = flt(1000) #frappe.msgprint(totals[2]) totals[2] = totals[2] - totals[1] totals[3] = totals[0] - totals[1] - totals[2] if filters["period"] != "Yearly" : row += totals data.append(row) ##Added Today if(filters.get("show_votebook_trends")) and data: pass return data def get_columns(filters): columns = [ { 'label': _(filters.get("budget_against")), 'fieldtype': 'Link', 'fieldname': 'budget_against', 'options': filters.get('budget_against'), 'width': 150 }, { 'label': _('Account'), 'fieldname': 'Account', 'fieldtype': 'Link', 'options': 'Account', 'width': 150 } ] group_months = False if filters["period"] == "Monthly" else True fiscal_year = get_fiscal_years(filters) for year in fiscal_year: for from_date, to_date in get_period_date_ranges(filters["period"], year[0]): if filters["period"] == "Yearly": labels = [ _("Budget") + " " + str(year[0]), _("Actual") + " " + str(year[0]), _("Commitments") + " " + str(year[0]), _("Balance") + " " + str(year[0]) ] for label in labels: columns.append({ 'label': label, 'fieldtype': 'Float', 'fieldname': frappe.scrub(label), 'width': 150 }) else: for label in [ _("Budget") + " (%s)" + " " + str(year[0]), _("Actual") + " (%s)" + " " + str(year[0]), _("Commitments") + " (%s)" + " " + str(year[0]), _("Balance") + " (%s)" + " " + str(year[0]) ]: if group_months: label = label % ( formatdate(from_date, format_string="MMM") + "-" + formatdate(to_date, format_string="MMM") ) else: label = label % formatdate(from_date, format_string="MMM") columns.append({ 'label': label, 'fieldtype': 'Float', 'fieldname': frappe.scrub(label), 'width': 150 }) if filters["period"] != "Yearly": for label in [_("Total Budget"), _("Total Actual"), _("Total Commitments"), _("Total Balance")]: columns.append({ 'label': label, 'fieldtype': 'Float', 'fieldname': frappe.scrub(label), 'width': 150 }) return columns else: return columns def get_cost_centers(filters): order_by = "" if filters.get("budget_against") == "Cost Center": order_by = "order by lft" if filters.get("budget_against") in ["Cost Center", "Project"]: return frappe.db.sql_list( """ select name from `tab{tab}` where company = %s {order_by} """.format(tab=filters.get("budget_against"), order_by=order_by), filters.get("company")) else: return frappe.db.sql_list( """ select name from `tab{tab}` """.format(tab=filters.get("budget_against"))) # nosec # Get dimension & target details def get_dimension_target_details(filters): budget_against = frappe.scrub(filters.get("budget_against")) cond = "" if filters.get("budget_against_filter"): cond += """ and b.{budget_against} in (%s)""".format( budget_against=budget_against) % ", ".join(["%s"] * len(filters.get("budget_against_filter"))) return frappe.db.sql( """ select b.{budget_against} as budget_against, b.monthly_distribution, ba.account, ba.budget_amount, b.fiscal_year from `tabBudget` b, `tabBudget Account` ba where b.name = ba.parent and b.docstatus = 1 and b.fiscal_year between %s and %s and b.budget_against = %s and b.company = %s {cond} order by b.fiscal_year """.format( budget_against=budget_against, cond=cond, ), tuple( [ filters.from_fiscal_year, filters.to_fiscal_year, filters.budget_against, filters.company, ] + (filters.get("budget_against_filter") or []) ), as_dict=True) # Get target distribution details of accounts of cost center def get_target_distribution_details(filters): target_details = {} for d in frappe.db.sql( """ select md.name, mdp.month, mdp.percentage_allocation from `tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md where mdp.parent = md.name and md.fiscal_year between %s and %s order by md.fiscal_year """, (filters.from_fiscal_year, filters.to_fiscal_year), as_dict=1): target_details.setdefault(d.name, {}).setdefault( d.month, flt(d.percentage_allocation) ) return target_details # Get actual details from gl entry def get_actual_details(name, filters): budget_against = frappe.scrub(filters.get("budget_against")) cond = "" if filters.get("budget_against") == "Cost Center": cc_lft, cc_rgt = frappe.db.get_value("Cost Center", name, ["lft", "rgt"]) cond = """ and lft >= "{lft}" and rgt <= "{rgt}" """.format(lft=cc_lft, rgt=cc_rgt) ac_details = frappe.db.sql( """ select gl.account, gl.debit, gl.credit, gl.fiscal_year, MONTHNAME(gl.posting_date) as month_name, b.{budget_against} as budget_against from `tabGL Entry` gl, `tabBudget Account` ba, `tabBudget` b where b.name = ba.parent and b.docstatus = 1 and ba.account=gl.account and b.{budget_against} = gl.{budget_against} and gl.fiscal_year between %s and %s and b.{budget_against} = %s and exists( select name from `tab{tab}` where name = gl.{budget_against} {cond} ) group by gl.name order by gl.fiscal_year """.format(tab=filters.budget_against, budget_against=budget_against, cond=cond), (filters.from_fiscal_year, filters.to_fiscal_year, name), as_dict=1) cc_actual_details = {} for d in ac_details: cc_actual_details.setdefault(d.account, []).append(d) return cc_actual_details def get_dimension_account_month_map(filters): dimension_target_details = get_dimension_target_details(filters) tdd = get_target_distribution_details(filters) cam_map = {} for ccd in dimension_target_details: actual_details = get_actual_details(ccd.budget_against, filters) for month_id in range(1, 13): month = datetime.date(2013, month_id, 1).strftime("%B") cam_map.setdefault(ccd.budget_against, {}).setdefault( ccd.account, {} ).setdefault(ccd.fiscal_year, {}).setdefault( month, frappe._dict({"target": 0.0, "commitments": 0.0, "actual":0.0}) ) tav_dict = cam_map[ccd.budget_against][ccd.account][ccd.fiscal_year][month] month_percentage = ( tdd.get(ccd.monthly_distribution, {}).get(month, 0) if ccd.monthly_distribution else 100.0 / 12 ) tav_dict.target = flt(ccd.budget_amount) * month_percentage / 100 tav_dict.commitments = flt(total_periodic_commitments(ccd.budget_against,ccd.account,ccd.fiscal_year,month_id)[0][0]) tav_dict.actual = flt(total_invoices_for_pos(ccd.budget_against,ccd.account,ccd.fiscal_year,month_id)[0][0]) #for ad in actual_details.get(ccd.account, []): # if ad.month_name == month and ad.fiscal_year == ccd.fiscal_year: # tav_dict.actual += flt(ad.debit) - flt(ad.credit) return cam_map def total_periodic_commitments(budget_against, account, fiscal_year, month): return frappe.db.sql("""SELECT sum(coalesce(amount,0)) AS total_commitments FROM `tabPurchase Order Item` WHERE (department = '{0}' OR cost_center = '{1}' OR project = '{2}') AND expense_account ='{3}' AND parent IN (SELECT po.name FROM `tabPurchase Order` po, `tabFiscal Year` fy WHERE month(po.transaction_date) = {4} AND fy.year ='{5}' AND po.transaction_date BETWEEN fy.year_start_date AND fy.year_end_date) """.format(budget_against, budget_against, budget_against, account, month, fiscal_year)) def total_invoices_for_pos(budget_against, account, fiscal_year, month): #WHERE pii.parent IN (SELECT reference_name FROM `tabPayment Request`)\ return frappe.db.sql("""SELECT sum(coalesce(pii.amount,0)) AS total_amount FROM `tabPurchase Invoice Item` pii WHERE pii.parent IN (SELECT invoice_number FROM `tabPayment Request Invoice` where docstatus = 1)\ AND pii.po_detail IN (SELECT poi.name FROM `tabPurchase Order Item` poi WHERE poi.docstatus = 1 AND (poi.department = '{0}' OR poi.cost_center = '{1}' OR poi.project = '{2}') AND poi.expense_account ='{3}' AND poi.parent IN (SELECT po.name FROM `tabPurchase Order` po, `tabFiscal Year` fy WHERE month(po.transaction_date) = {4} AND po.docstatus = 1 AND fy.year ='{5}' AND po.transaction_date BETWEEN fy.year_start_date AND fy.year_end_date)) """.format(budget_against, budget_against, budget_against, account, month, fiscal_year)) def get_fiscal_years(filters): fiscal_year = frappe.db.sql( """ select name from `tabFiscal Year` where name between %(from_fiscal_year)s and %(to_fiscal_year)s """, { "from_fiscal_year": filters["from_fiscal_year"], "to_fiscal_year": filters["to_fiscal_year"] }) return fiscal_year def get_chart_data(filters, columns, data): if not data: return None labels = [] fiscal_year = get_fiscal_years(filters) group_months = False if filters["period"] == "Monthly" else True for year in fiscal_year: for from_date, to_date in get_period_date_ranges(filters["period"], year[0]): if filters['period'] == 'Yearly': labels.append(year[0]) else: if group_months: label = formatdate(from_date, format_string="MMM") + "-" \ + formatdate(to_date, format_string="MMM") labels.append(label) else: label = formatdate(from_date, format_string="MMM") labels.append(label) no_of_columns = len(labels) budget_values, actual_values, commit_values = [0] * no_of_columns, [0] * no_of_columns, [0] * no_of_columns for d in data: values = d[3:] index = 0 for i in range(no_of_columns): budget_values[i] += values[index] actual_values[i] += values[index+1] commit_values[i] += values[index+2] index += 4 return { 'data': { 'labels': labels, 'datasets': [ {'name': 'Budget', 'chartType': 'bar', 'values': budget_values}, {'name': 'Actual Expense', 'chartType': 'bar', 'values': actual_values}, {'name': 'Committed Expense', 'chartType': 'bar', 'values': commit_values} ] } }
nilq/baby-python
python
import sys import os import re import networkx as nx import random import numpy as np from alias_table_sampling import AliasTable as at class BatchStrategy(object): # G is a DiGraph with edge weights def __init__(self, G, num_new, mapp, rmapp, num_modify, params = None): self.edges = [] probs_in = [] probs_out = [] n = G.number_of_nodes() for i in xrange(num_modify): idx = len(rmapp) - i - 1 u = rmapp[idx] for v in G[u]: probs_in.append(G[u][v]['weight']) probs_out.append(G[v][u]['weight']) if v >= len(mapp): self.edges.append((idx, v)) else: self.edges.append((idx, mapp[v])) for u in xrange(n - num_new, n): for v in G[u]: probs_in.append(G[u][v]['weight']) probs_out.append(G[v][u]['weight']) if v >= len(mapp): self.edges.append((u, v)) else: self.edges.append((u, mapp[v])) self.sampling_handler_in = at(probs_in) self.sampling_handler_out = at(probs_out) def get_batch(self, batch_size): batch_labels_in = [] batch_labels_out = [] batch_x_in = [] batch_x_out = [] for _ in xrange(batch_size): idx = self.sampling_handler_in.sample() batch_x_in.append(self.edges[idx][0]) batch_labels_in.append([self.edges[idx][1]]) idx = self.sampling_handler_out.sample() batch_x_out.append(self.edges[idx][1]) batch_labels_out.append([self.edges[idx][0]]) return batch_x_in, batch_x_out, batch_labels_in, batch_labels_out
nilq/baby-python
python
from django import forms from .models import squirrel_data class SquirreldataForm(forms.ModelForm): ''' Class to handle ModelForms that are used in the Add Sighting form ''' class Meta: model = squirrel_data fields = '__all__'
nilq/baby-python
python
import logging import pandas as pd from flask import request from mlpiper.components.connectable_component import ConnectableComponent from datarobot_drum.drum.common import LOGGER_NAME_PREFIX from datarobot_drum.drum.exceptions import DrumCommonException from datarobot_drum.profiler.stats_collector import StatsCollector, StatsOperation from datarobot_drum.drum.memory_monitor import MemoryMonitor from datarobot_drum.drum.common import RunLanguage from datarobot_drum.drum.server import ( HTTP_200_OK, HTTP_422_UNPROCESSABLE_ENTITY, HTTP_500_INTERNAL_SERVER_ERROR, get_flask_app, base_api_blueprint, ) logger = logging.getLogger(LOGGER_NAME_PREFIX + "." + __name__) class PredictionServer(ConnectableComponent): def __init__(self, engine): super(PredictionServer, self).__init__(engine) self._show_perf = False self._stats_collector = None self._memory_monitor = None self._run_language = None self._predictor = None def configure(self, params): super(PredictionServer, self).configure(params) self._threaded = self._params.get("threaded", False) self._show_perf = self._params.get("show_perf") self._stats_collector = StatsCollector(disable_instance=not self._show_perf) self._stats_collector.register_report( "run_predictor_total", "finish", StatsOperation.SUB, "start" ) self._memory_monitor = MemoryMonitor() self._run_language = RunLanguage(params.get("run_language")) if self._run_language == RunLanguage.PYTHON: from datarobot_drum.drum.language_predictors.python_predictor.python_predictor import ( PythonPredictor, ) self._predictor = PythonPredictor() elif self._run_language == RunLanguage.JAVA: from datarobot_drum.drum.language_predictors.java_predictor.java_predictor import ( JavaPredictor, ) self._predictor = JavaPredictor() elif self._run_language == RunLanguage.R: # this import is here, because RPredictor imports rpy library, # which is not installed for Java and Python cases. from datarobot_drum.drum.language_predictors.r_predictor.r_predictor import RPredictor self._predictor = RPredictor() else: raise DrumCommonException( "Prediction server doesn't support language: {} ".format(self._run_language) ) self._predictor.configure(params) def _materialize(self, parent_data_objs, user_data): model_api = base_api_blueprint() @model_api.route("/health/", methods=["GET"]) def health(): return {"message": "OK"}, HTTP_200_OK @model_api.route("/predict/", methods=["POST"]) def predict(): response_status = HTTP_200_OK file_key = "X" logger.debug("Entering predict() endpoint") REGRESSION_PRED_COLUMN = "Predictions" filename = request.files[file_key] if file_key in request.files else None logger.debug("Filename provided under X key: {}".format(filename)) if not filename: wrong_key_error_message = "Samples should be provided as a csv file under `{}` key.".format( file_key ) logger.error(wrong_key_error_message) response_status = HTTP_422_UNPROCESSABLE_ENTITY return {"message": "ERROR: " + wrong_key_error_message}, response_status in_df = pd.read_csv(filename) # TODO labels have to be provided as command line arguments or within configure endpoint self._stats_collector.enable() self._stats_collector.mark("start") out_df = self._predictor.predict(in_df) num_columns = len(out_df.columns) # float32 is not JSON serializable, so cast to float, which is float64 out_df = out_df.astype("float") if num_columns == 1: # df.to_json() is much faster. # But as it returns string, we have to assemble final json using strings. df_json = out_df[REGRESSION_PRED_COLUMN].to_json(orient="records") response_json = '{{"predictions":{df_json}}}'.format(df_json=df_json) elif num_columns == 2: # df.to_json() is much faster. # But as it returns string, we have to assemble final json using strings. df_json_str = out_df.to_json(orient="records") response_json = '{{"predictions":{df_json}}}'.format(df_json=df_json_str) else: ret_str = ( "Predictions dataframe has {} columns; " "Expected: 1 - for regression, 2 - for binary classification.".format( num_columns ) ) response_json = {"message": "ERROR: " + ret_str} response_status = HTTP_422_UNPROCESSABLE_ENTITY self._stats_collector.mark("finish") self._stats_collector.disable() return response_json, response_status @model_api.route("/stats/", methods=["GET"]) def stats(): mem_info = self._memory_monitor.collect_memory_info() ret_dict = {"mem_info": mem_info._asdict()} self._stats_collector.round() ret_dict["time_info"] = {} for name in self._stats_collector.get_report_names(): d = self._stats_collector.dict_report(name) ret_dict["time_info"][name] = d self._stats_collector.stats_reset() return ret_dict, HTTP_200_OK @model_api.errorhandler(Exception) def handle_exception(e): logger.exception(e) return {"message": "ERROR: {}".format(e)}, HTTP_500_INTERNAL_SERVER_ERROR app = get_flask_app(model_api) logging.getLogger("werkzeug").setLevel(logger.getEffectiveLevel()) host = self._params.get("host", None) port = self._params.get("port", None) try: app.run(host, port, threaded=self._threaded) except OSError as e: raise DrumCommonException("{}: host: {}; port: {}".format(e, host, port)) if self._stats_collector: self._stats_collector.print_reports() return []
nilq/baby-python
python
"""Adds config flow for NorwegianWeather.""" import logging from homeassistant import config_entries from homeassistant.core import callback from homeassistant.helpers.aiohttp_client import async_create_clientsession from homeassistant.const import CONF_MONITORED_CONDITIONS from homeassistant.helpers import config_validation as cv import voluptuous as vol from .api import NorwegianWeatherApiClient from .const import ( CONF_LAT, CONF_LONG, CONF_PLACE, DOMAIN, ENTITIES, PLATFORMS, ) _LOGGER: logging.Logger = logging.getLogger(__package__) class NorwegianWeatherFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): """Config flow for NorwegianWeather.""" VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL def __init__(self): """Initialize.""" self._errors = {} async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" self._errors = {} # Uncomment the next 2 lines if only a single instance of the integration is allowed: # if self._async_current_entries(): # return self.async_abort(reason="single_instance_allowed") if user_input is not None: valid = await self._test_credentials( user_input[CONF_PLACE], user_input[CONF_LAT], user_input[CONF_LONG], ) if valid: entry = self.async_create_entry( title=user_input[CONF_PLACE], data=user_input ) _LOGGER.debug(f"ConfigEntry: {entry}") # entry.con # entry["options"].update(user_input[CONF_MONITORED_CONDITIONS]) # entry = self.async_create_entry( # title=entry.data.get(CONF_PLACE), data=entry["data"] # ) return entry else: self._errors["base"] = "auth" return await self._show_config_form(user_input) return await self._show_config_form(user_input) async def _show_config_form(self, user_input): # pylint: disable=unused-argument """Show the configuration form to edit location data.""" entity_multi_select = {x: x for x in list(ENTITIES)} return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required( CONF_PLACE, default=self.hass.config.location_name ): str, vol.Required( CONF_LAT, default=self.hass.config.latitude ): vol.Coerce(float), vol.Required( CONF_LONG, default=self.hass.config.longitude ): vol.Coerce(float), vol.Optional( CONF_MONITORED_CONDITIONS, default=list(ENTITIES), ): cv.multi_select(entity_multi_select), } ), errors=self._errors, ) async def _test_credentials(self, place, latitude, longitude): """Return true if credentials is valid.""" try: _LOGGER.debug("Checking credentials.") session = async_create_clientsession(self.hass) client = NorwegianWeatherApiClient(place, latitude, longitude, session) await client.async_get_data() return True except Exception as e: # pylint: disable=broad-except _LOGGER.error(f"Failed during testing of credentials: {e}") # pass return False @staticmethod @callback def async_get_options_flow(config_entry): return NorwegianWeatherOptionsFlowHandler(config_entry) class NorwegianWeatherOptionsFlowHandler(config_entries.OptionsFlow): """NorwegianWeather config flow options handler.""" def __init__(self, config_entry): """Initialize HACS options flow.""" self.config_entry = config_entry self.options = dict(config_entry.options) async def async_step_init(self, user_input=None): """Manage the options.""" errors = {} if user_input is not None: self.options.update(user_input) return await self._update_options() entity_multi_select = {x: x for x in list(ENTITIES)} return self.async_show_form( step_id="init", data_schema=vol.Schema( { vol.Optional( CONF_MONITORED_CONDITIONS, default=self.config_entry.options.get( CONF_MONITORED_CONDITIONS, list(ENTITIES) ), ): cv.multi_select(entity_multi_select), } ), errors=errors, ) async def _update_options(self): """Update config entry options.""" return self.async_create_entry( title=self.config_entry.data.get(CONF_PLACE), data=self.options )
nilq/baby-python
python
count_weekday_years = survey_data.groupby([survey_data["eventDate"].dt.year, survey_data["eventDate"].dt.dayofweek]).size().unstack()
nilq/baby-python
python
import os; import bvpl_octree_batch import multiprocessing import Queue import time import random import optparse import sys from numpy import log, ceil from xml.etree.ElementTree import ElementTree class dbvalue: def __init__(self, index, type): self.id = index # unsigned integer self.type = type # string class gauss_smoothing_job(): def __init__(self,scene, sigma , block_i, block_j, block_k, output_path, cell_length): self.scene = scene; self.sigma = sigma; self.block_i = block_i; self.block_j = block_j; self.block_k = block_k; self.output_path = output_path; self.cell_length = cell_length; def execute_jobs(jobs, num_procs=5): work_queue=multiprocessing.Queue(); result_queue=multiprocessing.Queue(); for job in jobs: work_queue.put(job) for i in range(num_procs): worker= gauss_kernel_worker(work_queue,result_queue) worker.start(); print("worker with name ",worker.name," started!") # collect the results off the queue #important: having a result queue makes the execute_jobs wait for all jobs in the queue before exiting results = [] while len(results) < len(jobs): result = result_queue.get() results.append(result) return results class gauss_kernel_worker(multiprocessing.Process): def __init__(self,work_queue,result_queue): # base class initialization multiprocessing.Process.__init__(self) # job management stuff self.work_queue = work_queue self.result_queue = result_queue self.kill_received = False def run(self): while not self.kill_received: # get a task try: job = self.work_queue.get_nowait() except Queue.Empty: break start_time = time.time(); print("Creating Gauss kernel"); bvpl_octree_batch.init_process("bvpl_create_gauss3d_kernel_process"); bvpl_octree_batch.set_input_float(0,job.sigma); bvpl_octree_batch.set_input_float(1,job.sigma); bvpl_octree_batch.set_input_float(2,job.sigma); bvpl_octree_batch.set_input_float(3,1.0); bvpl_octree_batch.set_input_float(4,0.0); bvpl_octree_batch.set_input_float(5,0.0); bvpl_octree_batch.set_input_float(6,0.0); bvpl_octree_batch.run_process(); (kernel_id,kernel_type)= bvpl_octree_batch.commit_output(0); kernel = dbvalue(kernel_id,kernel_type); print("Running Kernel"); bvpl_octree_batch.init_process("bvplBlockKernelOperatorProcess"); bvpl_octree_batch.set_input_from_db(0,job.scene); bvpl_octree_batch.set_input_from_db(1,kernel); bvpl_octree_batch.set_input_int(2, job.block_i); bvpl_octree_batch.set_input_int(3, job.block_j) bvpl_octree_batch.set_input_int(4, job.block_k) bvpl_octree_batch.set_input_string(5,"algebraic"); bvpl_octree_batch.set_input_string(6, job.output_path); bvpl_octree_batch.set_input_double(7, job.cell_length); bvpl_octree_batch.run_process(); print ("Runing time for worker:", self.name) print(time.time() - start_time); #output exit code in this case #important: having a result queue makes the execute_jobs wait for all jobs in the queue before exiting self.result_queue.put(0); if __name__=="__main__": bvpl_octree_batch.register_processes(); bvpl_octree_batch.register_datatypes(); parser = optparse.OptionParser(description='Run Taylor Kernels'); parser.add_option('--model_dir', action="store", dest="model_dir"); parser.add_option('--num_cores', action="store", dest="num_cores", type="int", default=4); parser.add_option('--nblocks_x', action="store", dest="nblocks_x", type="int"); parser.add_option('--nblocks_y', action="store", dest="nblocks_y", type="int"); parser.add_option('--nblocks_z', action="store", dest="nblocks_z", type="int"); options, args = parser.parse_args(); model_dir = options.model_dir; nblocks_x = options.nblocks_x; nblocks_y = options.nblocks_y; nblocks_z = options.nblocks_z; num_cores = options.num_cores; if not os.path.isdir(model_dir +"/"): print "Invalid Model Dir" sys.exit(-1); print("Creating a Scene"); bvpl_octree_batch.init_process("boxmCreateSceneProcess"); bvpl_octree_batch.set_input_string(0, model_dir +"/site12_pmvs.xml"); bvpl_octree_batch.run_process(); (scene_id, scene_type) = bvpl_octree_batch.commit_output(0); scene= dbvalue(scene_id, scene_type); #Begin multiprocessing t1=time.time(); job_list=[]; blocks_x = [i for i in range(0,nblocks_x)]; blocks_y = [i for i in range(0,nblocks_y)]; blocks_z = [i for i in range(0,nblocks_z)]; random.shuffle(blocks_x); random.shuffle(blocks_y); random.shuffle(blocks_y); #Enqueue jobs for i in range(0, len(blocks_x)): for j in range(0, len(blocks_y)): for k in range(0, len(blocks_z)): block_i = blocks_x[i]; block_j = blocks_y[j]; block_k = blocks_z[k]; current_job = gauss_smoothing_job(scene, 3.0, block_i, block_j, block_k, model_dir, 1.0); job_list.append(current_job); # wait for all the jobs results = execute_jobs(job_list, num_cores); print("Creating a Scene"); bvpl_octree_batch.init_process("boxmCreateSceneProcess"); bvpl_octree_batch.set_input_string(0, model_dir +"/float_response_scene.xml"); bvpl_octree_batch.run_process(); (scene_id, scene_type) = bvpl_octree_batch.commit_output(0); scene= dbvalue(scene_id, scene_type); print("Save Scene"); bvpl_octree_batch.init_process("boxmSaveSceneRawProcess"); bvpl_octree_batch.set_input_from_db(0,scene); bvpl_octree_batch.set_input_string(1,model_dir + "/drishti/gauss_scene"); bvpl_octree_batch.set_input_unsigned(2,0); bvpl_octree_batch.set_input_unsigned(3,1); bvpl_octree_batch.run_process();
nilq/baby-python
python
from django.core.management.base import BaseCommand from django.contrib.auth.models import User from app.models import Question, Answer, Tag CONFIRMATION = 'remove database' class Command(BaseCommand): help = 'Remove all data from the database' requires_migrations_checks = True def add_arguments(self, parser): parser.add_argument('--yes', help='Remove confirmation dialogue') def drop_db(self): Answer.objects.all().delete() Question.objects.all().delete() Tag.objects.all().delete() User.objects.all().delete() def handle(self, *args, **options): no_confirm = options['yes'] if not no_confirm: check = input('Are you sure you want to DROP database? ' 'This action will WIPE all the data.\n' f'Type "{CONFIRMATION}" to proceed: ') if check != CONFIRMATION: print('Abort') return print('Removing all data from the database') self.drop_db() print('All records removed')
nilq/baby-python
python
import numpy from PIL import Image def get_origin(canny_img): image = canny_img.load() pixels_x = [] pixels_y = [] for x in range(0, canny_img.size[0]): for y in range(0, canny_img.size[1]): if image[x,y] != 0: pixels_x.append(x) pixels_y.append(y) pixels_x = numpy.asarray(pixels_x) pixels_y = numpy.asarray(pixels_y) x_mean = int(numpy.mean(pixels_x)) y_mean = int(numpy.mean(pixels_y)) return(x_mean, y_mean)
nilq/baby-python
python
"""A Python library for perturbation-based classifiers. ``Perturbation Classifier`` is a library containing the implementation of the Perturbation-based Classifier (PerC) and subconcept Perturbation-based Classifier (sPerC). Subpackages ----------- subconcept The implementation of subconcept Perturbation-based Classifier (sPerC). util The implementation of probability function and load keel dataset format. """ from perturbation_classifiers.perc import PerC # list of all modules available in the library __all__ = ['PerC', 'subconcept', 'util'] __version__ = '0.1.dev'
nilq/baby-python
python
import goprolib.HERO4.HERO4 as HERO4 import datetime import time def main(path='/media/xyoz/XYOZ-INT1000E/Pictures/2016_07_13 GoPro Auto'): h4 = HERO4.HERO4() h4.download_all(delete_after_download=True, path=path) if __name__ == '__main__': while True: try: main('/media/xyoz/XYOZ-INT1000E/Pictures/2016_07_15_GoPro Tests') except: print(datetime.datetime.now()) time.sleep(5)
nilq/baby-python
python
# GNU MediaGoblin -- federated, autonomous media hosting # Copyright (C) 2011, 2012 MediaGoblin contributors. See AUTHORS. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from mediagoblin.tools.staticdirect import PluginStatic from mediagoblin.tools import pluginapi from pkg_resources import resource_filename def setup_plugin(): routes = [ ('staticstuff.static_demo', '/staticstuff/', 'mediagoblin.tests.testplugins.staticstuff.views:static_demo')] pluginapi.register_routes(routes) hooks = { 'setup': setup_plugin, 'static_setup': lambda: PluginStatic( 'staticstuff', resource_filename( 'mediagoblin.tests.testplugins.staticstuff', 'static'))}
nilq/baby-python
python
import re import six import ast import json import global_params from utils import run_command from ast_helper import AstHelper class Source: def __init__(self, filename): self.filename = filename self.content = self._load_content() self.line_break_positions = self._load_line_break_positions() def _load_content(self): with open(self.filename, 'r') as f: content = f.read() return content def _load_line_break_positions(self): return [i for i, letter in enumerate(self.content) if letter == '\n'] class SourceMap: parent_filename = "" position_groups = {} sources = {} ast_helper = None func_to_sig_by_contract = {} remap = "" allow_paths = "" def __init__(self, cname, parent_filename, input_type, root_path="", remap="", allow_paths=""): self.root_path = root_path self.cname = cname self.input_type = input_type if not SourceMap.parent_filename: SourceMap.remap = remap SourceMap.allow_paths = allow_paths SourceMap.parent_filename = parent_filename if input_type == "solidity": SourceMap.position_groups = SourceMap._load_position_groups() elif input_type == "standard json": SourceMap.position_groups = SourceMap._load_position_groups_standard_json() else: raise Exception("There is no such type of input") SourceMap.ast_helper = AstHelper(SourceMap.parent_filename, input_type, SourceMap.remap, SourceMap.allow_paths) SourceMap.func_to_sig_by_contract = SourceMap._get_sig_to_func_by_contract() self.source = self._get_source() self.positions = self._get_positions() self.instr_positions = {} self.var_names = self._get_var_names() self.func_call_names = self._get_func_call_names() self.callee_src_pairs = self._get_callee_src_pairs() self.func_name_to_params = self._get_func_name_to_params() self.sig_to_func = self._get_sig_to_func() def get_source_code(self, pc): try: pos = self.instr_positions[pc] except: return "" begin = pos['begin'] end = pos['end'] return self.source.content[begin:end] def get_source_code_for_block(self,pc_start,pc_end,instructions): try: pos1 = self.instr_positions[pc_start] pos2 = self.instr_positions[pc_end] except: return "" begin = pos1['begin'] end = pos2['end'] if begin <= 0: if "JUMPI" in instructions[pc_end]: return "function select" return "contract start" if end >= len(self.source.content)-1: return "contract end" # return self.get_buggy_line((pc_start+pc_end)/2) if begin > end or ((end-begin) > len(self.source.content)/3): begin = pos2['begin'] return self.source.content[begin:end] return self.source.content[begin:end] def get_source_code_from_src(self, src): src = src.split(":") start = int(src[0]) end = start + int(src[1]) return self.source.content[start:end] def get_buggy_line(self, pc): try: pos = self.instr_positions[pc] except: return "" location = self.get_location(pc) begin = self.source.line_break_positions[location['begin']['line'] - 1] + 1 end = pos['end'] return self.source.content[begin:end] def get_buggy_line_from_src(self, src): pos = self._convert_src_to_pos(src) location = self.get_location_from_src(src) begin = self.source.line_break_positions[location['begin']['line'] - 1] + 1 end = pos['end'] return self.source.content[begin:end] def get_location(self, pc): pos = self.instr_positions[pc] return self._convert_offset_to_line_column(pos) def get_location_from_src(self, src): pos = self._convert_src_to_pos(src) return self._convert_offset_to_line_column(pos) def get_parameter_or_state_var(self, var_name): try: names = [ node.id for node in ast.walk(ast.parse(var_name)) if isinstance(node, ast.Name) ] if names[0] in self.var_names: return var_name except: return None return None def _convert_src_to_pos(self, src): pos = {} src = src.split(":") pos['begin'] = int(src[0]) length = int(src[1]) pos['end'] = pos['begin'] + length - 1 return pos def _get_sig_to_func(self): func_to_sig = SourceMap.func_to_sig_by_contract[self.cname]['hashes'] return dict((sig, func) for func, sig in six.iteritems(func_to_sig)) def _get_func_name_to_params(self): func_name_to_params = SourceMap.ast_helper.get_func_name_to_params(self.cname) for func_name in func_name_to_params: calldataload_position = 0 for param in func_name_to_params[func_name]: if param['type'] == 'ArrayTypeName': param['position'] = calldataload_position calldataload_position += param['value'] else: param['position'] = calldataload_position calldataload_position += 1 return func_name_to_params def _get_source(self): fname = self.get_filename() if fname not in SourceMap.sources: SourceMap.sources[fname] = Source(fname) return SourceMap.sources[fname] def _get_callee_src_pairs(self): return SourceMap.ast_helper.get_callee_src_pairs(self.cname) def _get_var_names(self): return SourceMap.ast_helper.extract_state_variable_names(self.cname) def _get_func_call_names(self): func_call_srcs = SourceMap.ast_helper.extract_func_call_srcs(self.cname) func_call_names = [] for src in func_call_srcs: src = src.split(":") start = int(src[0]) end = start + int(src[1]) func_call_names.append(self.source.content[start:end]) return func_call_names @classmethod def _get_sig_to_func_by_contract(cls): if cls.allow_paths: cmd = 'solc --combined-json hashes %s %s --allow-paths %s' % (cls.remap, cls.parent_filename, cls.allow_paths) else: cmd = 'solc --combined-json hashes %s %s' % (cls.remap, cls.parent_filename) out = run_command(cmd) out = json.loads(out) return out['contracts'] @classmethod def _load_position_groups_standard_json(cls): with open('standard_json_output', 'r') as f: output = f.read() output = json.loads(output) return output["contracts"] @classmethod def _load_position_groups(cls): if cls.allow_paths: cmd = "solc --combined-json asm %s %s --allow-paths %s" % (cls.remap, cls.parent_filename, cls.allow_paths) else: cmd = "solc --combined-json asm %s %s" % (cls.remap, cls.parent_filename) out = run_command(cmd) out = json.loads(out) return out['contracts'] def _get_positions(self): if self.input_type == "solidity": asm = SourceMap.position_groups[self.cname]['asm']['.data']['0'] else: filename, contract_name = self.cname.split(":") asm = SourceMap.position_groups[filename][contract_name]['evm']['legacyAssembly']['.data']['0'] positions = asm['.code'] while(True): try: positions.append(None) positions += asm['.data']['0']['.code'] asm = asm['.data']['0'] except: break return positions def _convert_offset_to_line_column(self, pos): ret = {} ret['begin'] = None ret['end'] = None if pos['begin'] >= 0 and (pos['end'] - pos['begin'] + 1) >= 0: ret['begin'] = self._convert_from_char_pos(pos['begin']) ret['end'] = self._convert_from_char_pos(pos['end']) return ret def _convert_from_char_pos(self, pos): line = self._find_lower_bound(pos, self.source.line_break_positions) if self.source.line_break_positions[line] != pos: line += 1 begin_col = 0 if line == 0 else self.source.line_break_positions[line - 1] + 1 col = pos - begin_col return {'line': line, 'column': col} def _find_lower_bound(self, target, array): start = 0 length = len(array) while length > 0: half = length >> 1 middle = start + half if array[middle] <= target: length = length - 1 - half start = middle + 1 else: length = half return start - 1 def get_filename(self): return self.cname.split(":")[0]
nilq/baby-python
python
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import os import copy from functools import partial import math import numpy as np from pathlib import Path import random from typing import Callable, Dict, List, Tuple, Union import torch from torch.utils.data import Dataset, Subset, DataLoader from torchvision.transforms import ColorJitter import xml.etree.ElementTree as ET from PIL import Image from .plot import plot_detections, plot_grid from .bbox import AnnotationBbox from .mask import binarise_mask from .references.utils import collate_fn from .references.transforms import Compose, ToTensor from ..common.gpu import db_num_workers Trans = Callable[[object, dict], Tuple[object, dict]] def _flip_keypoints(keypoints, width, hflip_inds): """ Variation of `references.transforms._flip_coco_person_keypoints` with additional hflip_inds. """ flipped_keypoints = keypoints[:, hflip_inds] flipped_keypoints[..., 0] = width - flipped_keypoints[..., 0] # Maintain COCO convention that if visibility == 0, then x, y = 0 inds = flipped_keypoints[..., 2] == 0 flipped_keypoints[inds] = 0 return flipped_keypoints class RandomHorizontalFlip(object): """ Variation of `references.transforms.RandomHorizontalFlip` to make sure flipping works on custom keypoints. """ def __init__(self, prob): self.prob = prob def __call__(self, im, target): if random.random() < self.prob: height, width = im.shape[-2:] im = im.flip(-1) bbox = target["boxes"] bbox[:, [0, 2]] = width - bbox[:, [2, 0]] target["boxes"] = bbox if "masks" in target: target["masks"] = target["masks"].flip(-1) if "keypoints" in target: assert ( "hflip_inds" in target ), "To use random horizontal flipping, 'hflip_inds' needs to be specified" keypoints = target["keypoints"] keypoints = _flip_keypoints( keypoints, width, target["hflip_inds"] ) target["keypoints"] = keypoints return im, target class ColorJitterTransform(object): """ Wrapper for torchvision's ColorJitter to make sure 'target object is passed along """ def __init__(self, brightness, contrast, saturation, hue): self.brightness = brightness self.contrast = contrast self.saturation = saturation self.hue = hue def __call__(self, im, target): im = ColorJitter( brightness=self.brightness, contrast=self.contrast, saturation=self.saturation, hue=self.hue, )(im) return im, target def get_transform(train: bool) -> Trans: """ Gets basic the transformations to apply to images. Source: https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html#writing-a-custom-dataset-for-pennfudan Args: train: whether or not we are getting transformations for the training set. Returns: A list of transforms to apply. """ transforms = [] # transformations to apply before image is turned into a tensor if train: transforms.append( ColorJitterTransform( brightness=0.2, contrast=0.2, saturation=0.4, hue=0.05 ) ) # transform im to tensor transforms.append(ToTensor()) # transformations to apply after image is turned into a tensor if train: transforms.append(RandomHorizontalFlip(0.5)) return Compose(transforms) def parse_pascal_voc_anno( anno_path: str, labels: List[str] = None, keypoint_meta: Dict = None ) -> Tuple[List[AnnotationBbox], Union[str, Path], np.ndarray]: """ Extract the annotations and image path from labelling in Pascal VOC format. Args: anno_path: the path to the annotation xml file labels: list of all possible labels, used to compute label index for each label name keypoint_meta: meta data of keypoints which should include at least "labels". Return A tuple of annotations, the image path and keypoints. Keypoints is a numpy array of shape (N, K, 3), where N is the number of objects of the category that defined the keypoints, and K is the number of keypoints defined in the category. `len(keypoints)` would be 0 if no keypoints found. """ anno_bboxes = [] keypoints = [] tree = ET.parse(anno_path) root = tree.getroot() # get image path from annotation. Note that the path field might not be set. anno_dir = os.path.dirname(anno_path) if root.find("path") is not None: im_path = os.path.realpath( os.path.join(anno_dir, root.find("path").text) ) else: im_path = os.path.realpath( os.path.join(anno_dir, root.find("filename").text) ) # extract bounding boxes, classification and keypoints objs = root.findall("object") for obj in objs: label = obj.find("name").text # Get keypoints if any. # For keypoint detection, currently only one category (except # background) is allowed. We assume all annotated objects are of that # category. if keypoint_meta is not None: kps = [] kps_labels = keypoint_meta["labels"] # Assume keypoints are available kps_annos = obj.find("keypoints") if kps_annos is None: raise Exception(f"No keypoints found in {anno_path}") assert set([kp.tag for kp in kps_annos]).issubset( kps_labels ), "Incompatible keypoint labels" # Read keypoint coordinates: [x, y, visibility] # Visibility 0 means invisible, non-zero means visible for name in kps_labels: kp_anno = kps_annos.find(name) if kp_anno is None: # return 0 for invisible keypoints kps.append([0, 0, 0]) else: kps.append( [ int(float(kp_anno.find("x").text)), int(float(kp_anno.find("y").text)), 1, ] ) keypoints.append(kps) # get bounding box bnd_box = obj.find("bndbox") left = int(bnd_box.find("xmin").text) top = int(bnd_box.find("ymin").text) right = int(bnd_box.find("xmax").text) bottom = int(bnd_box.find("ymax").text) # Set mapping of label name to label index if labels is None: label_idx = None else: label_idx = labels.index(label) anno_bbox = AnnotationBbox.from_array( [left, top, right, bottom], label_name=label, label_idx=label_idx, im_path=im_path, ) assert anno_bbox.is_valid() anno_bboxes.append(anno_bbox) return anno_bboxes, im_path, np.array(keypoints) class DetectionDataset: """ An object detection dataset. The implementation of the dunder methods __init__, __getitem__, and __len__ were inspired from code found here: https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html#writing-a-custom-dataset-for-pennfudan """ def __init__( self, root: Union[str, Path], batch_size: int = 2, train_transforms: Trans = get_transform(train=True), test_transforms: Trans = get_transform(train=False), train_pct: float = 0.5, anno_dir: str = "annotations", im_dir: str = "images", mask_dir: str = None, keypoint_meta: Dict = None, seed: int = None, allow_negatives: bool = False, ): """ initialize dataset This class assumes that the data is formatted in two folders: - annotation folder which contains the Pascal VOC formatted annotations - image folder which contains the images Args: root: the root path of the dataset containing the image and annotation folders batch_size: batch size for dataloaders train_transforms: the transformations to apply to the train set test_transforms: the transformations to apply to the test set train_pct: the ratio of training to testing data anno_dir: the name of the annotation subfolder under the root directory im_dir: the name of the image subfolder under the root directory. If set to 'None' then infers image location from annotation .xml files allow_negatives: is false (default) then will throw an error if no annotation .xml file can be found for a given image. Otherwise use image as negative, ie assume that the image does not contain any of the objects of interest. mask_dir: the name of the mask subfolder under the root directory if the dataset is used for instance segmentation keypoint_meta: meta data of keypoints which should include "labels", "skeleton" and "hflip_inds". seed: random seed for splitting dataset to training and testing data """ self.root = Path(root) self.train_transforms = train_transforms self.test_transforms = test_transforms self.im_dir = im_dir self.anno_dir = anno_dir self.mask_dir = mask_dir self.batch_size = batch_size self.train_pct = train_pct self.allow_negatives = allow_negatives self.seed = seed self.keypoint_meta = keypoint_meta # read annotations self._read_annos() # create training and validation datasets self.train_ds, self.test_ds = self.split_train_test( train_pct=train_pct ) # create training and validation data loaders self.init_data_loaders() def _read_annos(self) -> None: """ Parses all Pascal VOC formatted annotation files to extract all possible labels. """ # All annotation files are assumed to be in the anno_dir directory. # If im_dir is provided then find all images in that directory, and # it's assumed that the annotation filenames end with .xml. # If im_dir is not provided, then the image paths are read from inside # the .xml annotations. if self.im_dir is None: anno_filenames = sorted(os.listdir(self.root / self.anno_dir)) else: im_filenames = sorted(os.listdir(self.root / self.im_dir)) im_paths = [ os.path.join(self.root / self.im_dir, s) for s in im_filenames ] anno_filenames = [ os.path.splitext(s)[0] + ".xml" for s in im_filenames ] # Read all annotations self.im_paths = [] self.anno_paths = [] self.anno_bboxes = [] self.mask_paths = [] self.keypoints = [] for anno_idx, anno_filename in enumerate(anno_filenames): anno_path = self.root / self.anno_dir / str(anno_filename) # Parse annotation file if present if os.path.exists(anno_path): anno_bboxes, im_path, keypoints = parse_pascal_voc_anno( anno_path, keypoint_meta=self.keypoint_meta ) # When meta provided, we assume this is keypoint # detection. if self.keypoint_meta is not None: self.keypoints.append(keypoints) else: if not self.allow_negatives: raise FileNotFoundError(anno_path) anno_bboxes = [] im_path = im_paths[anno_idx] # Torchvision needs at least one ground truth bounding box per image. Hence for images without a single # annotated object, adding a tiny bounding box with "background" label 0. if len(anno_bboxes) == 0: anno_bboxes = [ AnnotationBbox.from_array( [1, 1, 5, 5], label_name=None, label_idx=0, im_path=im_path, ) ] if self.im_dir is None: self.im_paths.append(im_path) else: self.im_paths.append(im_paths[anno_idx]) if self.mask_dir: # Assume mask image name matches image name but has .png # extension mask_name = os.path.basename(self.im_paths[-1]) mask_name = mask_name[: mask_name.rindex(".")] + ".png" mask_path = self.root / self.mask_dir / mask_name # For mask prediction, if no mask provided and negatives not # allowed (), raise exception if not mask_path.exists(): if not self.allow_negatives: raise FileNotFoundError(mask_path) else: self.mask_paths.append(None) else: self.mask_paths.append(mask_path) self.anno_paths.append(anno_path) self.anno_bboxes.append(anno_bboxes) assert len(self.im_paths) == len(self.anno_paths) # Get list of all labels labels = [] for anno_bboxes in self.anno_bboxes: for anno_bbox in anno_bboxes: if anno_bbox.label_name is not None: labels.append(anno_bbox.label_name) self.labels = list(set(labels)) # Set for each bounding box label name also what its integer representation is for anno_bboxes in self.anno_bboxes: for anno_bbox in anno_bboxes: if ( anno_bbox.label_name is None ): # background rectangle is assigned id 0 by design anno_bbox.label_idx = 0 else: anno_bbox.label_idx = ( self.labels.index(anno_bbox.label_name) + 1 ) def split_train_test( self, train_pct: float = 0.8 ) -> Tuple[Dataset, Dataset]: """ Split this dataset into a training and testing set Args: train_pct: the ratio of images to use for training vs testing Return A training and testing dataset in that order """ test_num = math.floor(len(self) * (1 - train_pct)) if self.seed: torch.manual_seed(self.seed) indices = torch.randperm(len(self)).tolist() train = copy.deepcopy(Subset(self, indices[test_num:])) train.dataset.transforms = self.train_transforms test = copy.deepcopy(Subset(self, indices[:test_num])) test.dataset.transforms = self.test_transforms return train, test def init_data_loaders(self): """ Create training and validation data loaders """ self.train_dl = DataLoader( self.train_ds, batch_size=self.batch_size, shuffle=True, num_workers=db_num_workers(), collate_fn=collate_fn, ) self.test_dl = DataLoader( self.test_ds, batch_size=self.batch_size, shuffle=False, num_workers=db_num_workers(), collate_fn=collate_fn, ) def add_images( self, im_paths: List[str], anno_bboxes: List[List[AnnotationBbox]], target: str = "train", mask_paths: List[str] = None, keypoints: List[np.ndarray] = None, ): """ Add new images to either the training or test set. Args: im_paths: path to the images. anno_bboxes: ground truth boxes for each image. target: specify if images are to be added to the training or test set. Valid options: "train" or "test". mask_paths: path to the masks. keypoints: list of numpy array of shape (N, K, 3), where N is the number of objects of the category that defined the keypoints, and K is the number of keypoints defined in the category. Raises: Exception if `target` variable is neither 'train' nor 'test' """ assert len(im_paths) == len(anno_bboxes) for i, (im_path, anno_bbox) in enumerate(zip(im_paths, anno_bboxes)): self.im_paths.append(im_path) self.anno_bboxes.append(anno_bbox) if mask_paths is not None: self.mask_paths.append(mask_paths[i]) if keypoints is not None: self.keypoints.append(keypoints[i]) if target.lower() == "train": self.train_ds.dataset.im_paths.append(im_path) self.train_ds.dataset.anno_bboxes.append(anno_bbox) if mask_paths is not None: self.train_ds.dataset.mask_paths.append(mask_paths[i]) if keypoints is not None: self.train_ds.dataset.keypoints.append(keypoints[i]) self.train_ds.indices.append(len(self.im_paths) - 1) elif target.lower() == "test": self.test_ds.dataset.im_paths.append(im_path) self.test_ds.dataset.anno_bboxes.append(anno_bbox) if mask_paths is not None: self.test_ds.dataset.mask_paths.append(mask_paths[i]) if keypoints is not None: self.test_ds.dataset.keypoints.append(keypoints[i]) self.test_ds.indices.append(len(self.im_paths) - 1) else: raise Exception(f"Target {target} unknown.") # Re-initialize the data loaders self.init_data_loaders() def show_ims(self, rows: int = 1, cols: int = 3, seed: int = None) -> None: """ Show a set of images. Args: rows: the number of rows images to display cols: cols to display, NOTE: use 3 for best looking grid seed: random seed for selecting images Returns None but displays a grid of annotated images. """ if seed or self.seed: random.seed(seed or self.seed) def helper(im_paths): idx = random.randrange(len(im_paths)) detection = { "idx": idx, "im_path": im_paths[idx], "det_bboxes": [], } return detection, self, None, None plot_grid( plot_detections, partial(helper, self.im_paths), rows=rows, cols=cols, ) def show_im_transformations( self, idx: int = None, rows: int = 1, cols: int = 3 ) -> None: """ Show a set of images after transformations have been applied. Args: idx: the index to of the image to show the transformations for. rows: number of rows to display cols: number of cols to display, NOTE: use 3 for best looking grid Returns None but displays a grid of randomly applied transformations. """ if not hasattr(self, "transforms"): print( ( "Transformations are not applied ot the base dataset object.\n" "Call this function on either the train_ds or test_ds instead:\n\n" " my_detection_data.train_ds.dataset.show_im_transformations()" ) ) else: if idx is None: idx = random.randrange(len(self.anno_paths)) def plotter(im, ax): ax.set_xticks([]) ax.set_yticks([]) ax.imshow(im) def im_gen() -> torch.Tensor: return self[idx][0].permute(1, 2, 0) plot_grid(plotter, im_gen, rows=rows, cols=cols) print(f"Transformations applied on {self.im_paths[idx]}:") [print(transform) for transform in self.transforms.transforms] def _get_binary_mask(self, idx: int) -> Union[np.ndarray, None]: """ Return binary masks for objects in the mask image. """ binary_masks = None if self.mask_paths: if self.mask_paths[idx] is not None: binary_masks = binarise_mask(Image.open(self.mask_paths[idx])) else: # for the tiny bounding box in _read_annos(), make the mask to # be the whole box mask = np.zeros( Image.open(self.im_paths[idx]).size[::-1], dtype=np.uint8 ) binary_masks = binarise_mask(mask) return binary_masks def __getitem__(self, idx): """ Make iterable. """ # get box/labels from annotations im_path = self.im_paths[idx] anno_bboxes = self.anno_bboxes[idx] boxes = [ [anno_bbox.left, anno_bbox.top, anno_bbox.right, anno_bbox.bottom] for anno_bbox in anno_bboxes ] labels = [anno_bbox.label_idx for anno_bbox in anno_bboxes] # convert everything into a torch.Tensor boxes = torch.as_tensor(boxes, dtype=torch.float32) labels = torch.as_tensor(labels, dtype=torch.int64) # get area for evaluation with the COCO metric, to separate the # metric scores between small, medium and large boxes. area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) # suppose all instances are not crowd (torchvision specific) iscrowd = torch.zeros((len(boxes),), dtype=torch.int64) # unique id im_id = torch.tensor([idx]) # setup target dic target = { "boxes": boxes, "labels": labels, "image_id": im_id, "area": area, "iscrowd": iscrowd, } # get masks binary_masks = self._get_binary_mask(idx) if binary_masks is not None: target["masks"] = torch.as_tensor(binary_masks, dtype=torch.uint8) # get keypoints if self.keypoints: target["keypoints"] = torch.as_tensor( self.keypoints[idx], dtype=torch.float32 ) if "hflip_inds" in self.keypoint_meta: target["hflip_inds"] = torch.as_tensor( self.keypoint_meta["hflip_inds"], dtype=torch.int64 ) # get image im = Image.open(im_path).convert("RGB") # and apply transforms if any if self.transforms is not None: im, target = self.transforms(im, target) return im, target def __len__(self): return len(self.im_paths)
nilq/baby-python
python
from django import template register = template.Library() @register.filter(name='get_vulnerable_endpoints') def get_vulnerable_endpoints(endpoints): return endpoints.filter(remediated=False) @register.filter(name='get_remediated_endpoints') def get_remediated_endpoints(endpoints): return endpoints.filter(remediated=True)
nilq/baby-python
python
class Node(): def __init__(self, key, data): """Create a new node Arguments: key {[type]} -- [description] data {[type]} -- [description] """ self.key = key self.data = data self.next = None
nilq/baby-python
python
if __name__ == '__main__': # print("a") # ord: characters -> ASCII code # print(ord('a')) # chr: ASCII code -> characters # print(chr(97)) result = chr(ord('a') + 1) print(result)
nilq/baby-python
python
"""download and/or process data""" import torch import torch.nn as nn import torchaudio import pandas as pd from sonopy import power_spec, mel_spec, mfcc_spec, filterbanks class MFCC(nn.Module): def __init__(self, sample_rate, fft_size=400, window_stride=(400, 200), num_filt=40, num_coeffs=40): super(MFCC, self).__init__() self.sample_rate = sample_rate self.window_stride = window_stride self.fft_size = fft_size self.num_filt = num_filt self.num_coeffs = num_coeffs self.mfcc = lambda x: mfcc_spec( x, self.sample_rate, self.window_stride, self.fft_size, self.num_filt, self.num_coeffs ) def forward(self, x): return torch.Tensor(self.mfcc(x.squeeze(0).numpy())).transpose(0, 1).unsqueeze(0) def get_featurizer(sample_rate): return MFCC(sample_rate=sample_rate) class RandomCut(nn.Module): """Augmentation technique that randomly cuts start or end of audio""" def __init__(self, max_cut=10): super(RandomCut, self).__init__() self.max_cut = max_cut def forward(self, x): """Randomly cuts from start or end of batch""" side = torch.randint(0, 1, (1,)) cut = torch.randint(1, self.max_cut, (1,)) if side == 0: return x[:-cut,:,:] elif side == 1: return x[cut:,:,:] class SpecAugment(nn.Module): """Augmentation technique to add masking on the time or frequency domain""" def __init__(self, rate, policy=3, freq_mask=2, time_mask=4): super(SpecAugment, self).__init__() self.rate = rate self.specaug = nn.Sequential( torchaudio.transforms.FrequencyMasking(freq_mask_param=freq_mask), torchaudio.transforms.TimeMasking(time_mask_param=time_mask) ) self.specaug2 = nn.Sequential( torchaudio.transforms.FrequencyMasking(freq_mask_param=freq_mask), torchaudio.transforms.TimeMasking(time_mask_param=time_mask), torchaudio.transforms.FrequencyMasking(freq_mask_param=freq_mask), torchaudio.transforms.TimeMasking(time_mask_param=time_mask) ) policies = { 1: self.policy1, 2: self.policy2, 3: self.policy3 } self._forward = policies[policy] def forward(self, x): return self._forward(x) def policy1(self, x): probability = torch.rand(1, 1).item() if self.rate > probability: return self.specaug(x) return x def policy2(self, x): probability = torch.rand(1, 1).item() if self.rate > probability: return self.specaug2(x) return x def policy3(self, x): probability = torch.rand(1, 1).item() if probability > 0.5: return self.policy1(x) return self.policy2(x) def collate_fn(data): """Batch and pad wakeword data""" rand_cut = RandomCut(max_cut=10) mfccs = [] labels = [] for d in data: mfcc, label = d mfccs.append(mfcc.squeeze(0).transpose(0, 1)) labels.append(label) # pad mfccs to ensure all tensors are same size in the time dim mfccs = nn.utils.rnn.pad_sequence(mfccs, batch_first=True) # batch, seq_len, feature mfccs = mfccs.transpose(0, 1) # seq_len, batch, feature mfccs = rand_cut(mfccs) #print(mfccs.shape) labels = torch.Tensor(labels) return mfccs, labels
nilq/baby-python
python
from substance.monads import * from substance.logs import * from substance import (Engine, Command) from substance.exceptions import (SubstanceError) class Env(Command): def getUsage(self): return "substance engine env [ENGINE NAME]" def getHelpTitle(self): return "Print the shell variables to set up the local docker client environment" def getShellOptions(self, optparser): return optparser def main(self): name = self.getInputName() self.core.loadEngine(name) \ .bind(Engine.loadConfigFile) \ .bind(self.outputDockerEnv) \ .catch(self.exitError) def outputDockerEnv(self, engine): env = engine.getDockerEnv() for k, v in env.items(): print(("export %s=\"%s\"" % (k, v))) return OK(None)
nilq/baby-python
python
from PIL import Image import math import os DATASET_PATH = 'A:/temp/temp' output_path = 'image_resize/' MAXIMUM_RESOLUTION = 1280*720 def img_resize(img, maximum_resolution): img_width = img.width img_height = img.height img_definition = img_width * img_height img_dpi = img.info['dpi'] if img_definition > maximum_resolution: reduction_ratio = img_definition / maximum_resolution reduction_ratio = math.sqrt(reduction_ratio) img_width_r = int(img_width / reduction_ratio) img_height_r = int(img_height / reduction_ratio) img = img.resize((img_width_r, img_height_r)) return img, img_dpi def main(): file_list = os.listdir(DATASET_PATH) if not os.path.exists(output_path): os.mkdir(output_path) for idx, fn in enumerate(file_list): img_path = os.path.join(DATASET_PATH, fn) img = Image.open(img_path) img, dpi = img_resize(img, maximum_resolution=MAXIMUM_RESOLUTION) img.save(os.path.join(output_path, fn), quality=100, dpi=dpi) print(fn + ' Done!') if __name__ == '__main__': main()
nilq/baby-python
python
from kivy.uix.screenmanager import Screen from kivy.properties import BooleanProperty, StringProperty from kivy.event import EventDispatcher from kivy.network.urlrequest import UrlRequest from kivy.app import App from kivy.lang import Builder from kivy.factory import Factory import sys sys.path.append("/".join(x for x in __file__.split("/")[:-1])) from json import dumps import os.path # Load the kv files folder = os.path.dirname(os.path.realpath(__file__)) Builder.load_file(folder + "/homescreen.kv") Builder.load_file(folder + "/logout.kv") Builder.load_file(folder + "/spinnerscreen.kv") from homescreen import HomeScreen from spinnerscreen import SpinnerScreen from logout import Logout class WelcomeScreen(Screen, EventDispatcher): refresh_token = "" logout_success = BooleanProperty(False) # Called upon successful sign out refresh_token_file = App.get_running_app().user_data_dir + "/refresh_token.txt" google_token_file = "token.pickle" def on_logout_success(self, *args): """Overwrite this method to switch to your app's home screen. """ print("Logged out successfully", "<Screen name='firebase_login_screen'>, True") def create_refresh_token(self, refresh_token): """Saves the refresh token in a local file to enable automatic sign in next time the app is opened. """ if os.path.exists(refresh_token): print("The file exists") else: f = open(refresh_token, "x") def log_out(self): """Overwrite this method to switch to your app's home screen. """ if os.path.exists(self.refresh_token_file): os.remove(self.refresh_token_file) self.create_refresh_token(self.refresh_token_file) if os.path.exists(self.google_token_file): os.remove(self.google_token_file) self.logout_success = True
nilq/baby-python
python
"""A module that fails the tests""" long_string = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" def bad_function(a: int) -> int: """Return input + 2 Parameters ---------- a : int input integer Returns ------- int input + 2 """ return a + 2
nilq/baby-python
python
TESTING=True """ TESTING=False IN CASE OF PRODUCTION TESTING=True IN CASE OF TESTING """ from flask import (Flask, abort, jsonify, make_response, request) from flask_sqlalchemy import SQLAlchemy import secrets import os from __init__ import db, SECRET from models import (NotReceived, User, #Product, Order, #Image, db_drop_and_create_all, populate_tables) from auth import (requires_auth, auth_cookie_response , auth_cookie_response_new, validate_token, generate_token) from datetime import timedelta,datetime from flask_cors import CORS from pydantic_models import (validate_model_id_pydantic, UserPost, UserUpdatePassword, UserLogin#, ProductPost, OrderPost, OrderUpdate ) from flask_pydantic import validate from functions import validate_model_id if "SECRET" in os.environ: SECRET = os.environ["SECRET"] class config: #SECRET_KEY=os.urandom(32) SECRET_KEY=secrets.token_urlsafe(5000) basedir = os.path.abspath(os.path.dirname(__file__)) DEBUG = False SQLALCHEMY_DATABASE_URI = "sqlite:///databases/database.sqlite" SQLALCHEMY_TRACK_MODIFICATIONS= False class config_test: DEBUG = True SQLALCHEMY_DATABASE_URI = "sqlite:///databases/test.sqlite" class config_docker: SQLALCHEMY_DATABASE_URI = "sqlite:////database//database.sqlite" def create_app(DOCKER=False,testing=TESTING): app = Flask(__name__) app.config.from_object(config) if TESTING: app.config.from_object(config_test) if DOCKER: app.config.from_object(config_docker) db.app = app db.init_app(app) db.create_all() CORS(app,resources={r"*":{"origins":"*"}}) @app.after_request def after_request(response): response.headers.add("Access-Control-allow-Origin","*") response.headers.add("Access-Control-allow-Headers", "*,Content-Type,true") response.headers.add("Access-Control-allow-Methods", "GET,PUT,POST,DELETE,OPTIONS") response.headers.add("Access-Control-Expose-Headers", "Authorization,Set-Cookie") db.session.rollback() return response @app.route('/r', methods=['GET']) def raised(): # Testng the ability to raise custom responses abort(make_response(jsonify({"sucess":True}),200)) return jsonify({"success":False}) """ 1) "/clear_tables"-------->"GET" , "OPTIONS" """ @app.route("/clear_tables", methods=["GET"]) def clear_all_tables(): test_only() db_drop_and_create_all() """ Tests: test_02_populate_test """ return jsonify({"success":True}) """ 2) "/populate" ->--------->"GET" , "OPTIONS" """ @app.route("/populate", methods=["GET"]) def populate_all_tables(): test_only() #This endpoint will clear all the data in the database and #populate with new data try: populate_tables() return jsonify({"success":True}) except: abort(422) #Unprocessible """ Tests: test_01_clear_tables """ """ User endpoints: post_users delete users login """ @app.route("/users/who", methods=["POST"]) def users_who(): #This endpoint will tell if the user should pass or not #and if his token expired, it will refresh it the_401_error = jsonify({ "error": 401,"message": "unauthorized", "success": False}) the_401_error.headers.add("Authorization","") if "Authorization" not in request.headers: return the_401_error,401 #Now the cookie exists token = request.headers["Authorization"] #print(SECRET,flush=True) #print(request.cookies,flush=True) token_validation = validate_token( token=token,secret=SECRET) #print(token_validation,flush=True) #print("WHO: "+str(token_validation),flush=True) if token_validation["case"]==3: return the_401_error,401 if token_validation["case"]==2: res=jsonify({"success":True}) user_id=token_validation["payload"]["uid"] response=auth_cookie_response( response={"success":True, "result":"refreshed expired token", "user_id":user_id}, user_id=user_id) return response else: res = jsonify({"success":True, "result":"user is logged in", "user_id":token_validation["payload"]["uid"]}) res.headers.add("Authorization",token) return res @app.route("/users", methods=["POST"]) @validate() def post_users(body:UserPost): #This endpoint will add a new user username = body.username password = body.password1 #return jsonify({"success":True, #"username":username,"password":password #}) #Create the user new_user = User(username=username, password=password) #Insert the user in the database try: new_user.insert() response=auth_cookie_response( response={"success":True,"user":new_user.simple()}, user_id=new_user.id) return response except Exception as e: raise(e) abort(500) @app.route("/users", methods=["DELETE"]) @requires_auth() def delete_users(payload): #This endpoint will delete an existing user user_id=payload["uid"] users_query=User.query user_id_validation=validate_model_id( input_id=user_id,model_query=users_query ,model_name_string="user") if user_id_validation["case"]==1: #The user exists user=user_id_validation["result"] else: #No user with this id, can not convert to int, # or id is missing (Impossible) return my_error( status=user_id_validation["result"]["status"], description=user_id_validation ["result"]["description"]) #Now, we have "user", this is essential try: # Finally, deleting the user itself user.delete() r=jsonify({"success":True, "result":"user deleted successfully"}) cookies=request.cookies for co in cookies: r.set_cookie(co,value="",expires=-50) return r #return jsonify({"success":True, # "result":"user deleted successfully"}) except Exception as e: raise(e) db.session.rollback() abort(500) @app.route("/users/login", methods=["POST"]) @validate() def login_users(body:UserLogin): #This endpoint will log the user in the_user_id = body.password response=auth_cookie_response( response={"success":True, "result":"logged in successfully", "user_id":the_user_id}, user_id=the_user_id) return response @app.route("/users/logout", methods=["POST"]) def logout_users(): #This endpoint will log the user out cookies = request.cookies r=jsonify({"success":True, "result":"logged out successfully"}) r.headers.add("Authorization","") return r @app.route("/users/login/test", methods=["POST"]) def login_test(): test_only() #This endpoint will log the user in response=auth_cookie_response_new( response={"success":True, "result":"logged in successfully", "user_id":1}, user_id=1) return response @app.route("/users/login/expired", methods=["POST"]) def login_expired(): test_only() #This endpoint will log the user in with expired token res = jsonify( {"success":True, "result":"setting expired token successfully"}) expired_token=generate_token(user_id=1,secret=SECRET, expiration_delta=timedelta(days=-7), issued_at=datetime.now()) res.headers.add('Authorization',expired_token["result"]) return res,200 @app.errorhandler(400) def bad_request(error): return jsonify({"success":False,"error":400, "message":"bad request"}),400 @app.errorhandler(401) def unauthorized(error): return jsonify({"success":False,"error":401, "message":"unauthorized"}),401 @app.errorhandler(403) def forbidden(error): return jsonify({"success":False,"error":403, "message":"forbidden"}),403 @app.errorhandler(404) def not_found(error): return jsonify({"success":False,"error":404, "message":"not found"}),404 @app.errorhandler(405) def method_not_allowed(error): return jsonify({"success":False,"error":405, "message":"method not allowed"}),405 @app.errorhandler(422) def unprocessible(error): return jsonify({"success":False,"error":422, "message":"unprocessible"}),422 @app.errorhandler(500) def internal_server_error(error): return jsonify({"success":False,"error":500, "message":"internal server error"}),500 def test_only(): if testing == False: abort(404) return app if __name__ == '__main__': create_app().run()
nilq/baby-python
python
import pickle import numpy as np import torch from torch import Tensor from torch.utils.data import Dataset import arguments as args class CrepeDataset(Dataset): def __init__(self, data_path: str, sample_len: int, scaler, device: str ): """Dataset class for CREPE features of audios example one sample: (song_tensor, hum_tensor, song_id) Args: annotation_path: path to annotation file data_path: path to all data. Expected to be like (filename, crepe_tensor) sample_len: number of frequencies for each sample. sample longer than this number will be cut, shorter will be padded. device: cpu or cuda """ self.data_path = data_path self.sample_len = sample_len self.scaler = scaler self.device = device # load all data to RAM self.data = pickle.load(open(data_path, 'rb')) self._scale_data() self._cut_and_pad_if_necessary() def _scale_data(self,) -> None: """Scaling if self.scaler is not None, """ for i in range(len(self.data)): self.data[i] = list(self.data[i]) self.data[i][-2] = self.scaler(self.data[i][-2]) self.data[i][-1] = self.scaler(self.data[i][-1]) def _cut_and_pad_if_necessary(self,)-> None: for i in range(len(self.data)): # cut tail if longer than self.sample_len if self.data[i][-2].shape[0] > self.sample_len: self.data[i][-2] = self.data[i][-2][:self.sample_len] if self.data[i][-1].shape[0] > self.sample_len: self.data[i][-1] = self.data[i][-1][:self.sample_len] # pad tail if shorter than self.sample_len if self.data[i][-2].shape[0] < self.sample_len: padding_size = self.sample_len - self.data[i][-2].shape[0] padding_ = np.zeros(padding_size) self.data[i][-2] = np.concatenate([self.data[i][-2], padding_]) if self.data[i][-1].shape[0] < self.sample_len: padding_size = self.sample_len - self.data[i][-1].shape[0] padding_ = np.zeros(padding_size) self.data[i][-1] = np.concatenate([self.data[i][-1], padding_]) def __getitem__(self, index): # random crop 4secs here index = index%len(self.data) item = self.data[index] # cut_point = np.random.randint(0, args.sample_len - args.chunk_len*100) song_freq = item[-2] hum_freq = item[-1] return (torch.tensor(song_freq, dtype=torch.float, device=self.device), torch.tensor(hum_freq, dtype=torch.float, device=self.device), torch.tensor(item[0], dtype=torch.long)) def __len__(self): return len(self.data)*args.epoch_hack if __name__ == '__main__': mydataset = CrepeDataset(args.train_data_path, args.sample_len, args.scaler, args.device) dataloader = torch.utils.data.DataLoader(mydataset, args.batch_size, shuffle = True) for song_tensor, hum_tensor, music_ids in dataloader: print(song_tensor.shape) print(hum_tensor.shape) print( music_ids) print(song_tensor) print(hum_tensor) print(len(mydataset)) break
nilq/baby-python
python
def obter_dados_canal(lista): for _ in range(lista): nome,inscritos,monetizacao,ehpremium = input().split(';') inscritos = int(inscritos) monetizacao = float(monetizacao) ehpremium = ehpremium == 'sim' canais.append([nome, inscritos, monetizacao, ehpremium]) def calcular_bonificacao(valor_premium, valor_nao_premium): lista_de_bonificacao = [] for canal in canais: nome = canal[0] incrito = canal[1] valor_da_monetizacao = canal[2] ehpremium = canal[3] if (ehpremium): valor_da_monetizacao += incrito // 1000 * valor_premium else: valor_da_monetizacao += incrito // 1000 * valor_nao_premium lista_de_bonificacao.append([nome, valor_da_monetizacao]) return lista_de_bonificacao def exibir_bonificacao(bonus): print('-----') print('BÔNUS') print('-----') for bonificacao in bonus: nome = bonificacao[0] valor = bonificacao[1] print(f'{nome}: R$ {valor:.2f}') canais = [] quantidade_de_canais = int(input()) if (1 <= quantidade_de_canais <= 200): obter_dados_canal(quantidade_de_canais) valor_premium = float(input()) valor_nao_premium = float(input()) exibir_bonificacao(calcular_bonificacao(valor_premium, valor_nao_premium))
nilq/baby-python
python
# -*- coding: utf-8 -*- from datetime import datetime from sqlalchemy import Column from sqlalchemy.dialects.mysql import INTEGER, VARCHAR, TINYINT, TIMESTAMP from webspider import constants from webspider.models.base import BaseModel class JobModel(BaseModel): __tablename__ = 'job' id = Column(INTEGER, nullable=False, primary_key=True, autoincrement=True) lg_job_id = Column(INTEGER, nullable=False, doc=u'接口使用的 job id') city_id = Column(INTEGER, nullable=False, doc=u'城市 id') company_id = Column(INTEGER, nullable=False, doc=u'公司 id') title = Column(VARCHAR(64), nullable=False, default='', doc=u'职位标题') work_year = Column(TINYINT, nullable=False, doc=u'工作年限要求') department = Column(VARCHAR(64), nullable=False, doc=u'招聘部门') salary = Column(VARCHAR(32), nullable=False, doc=u'薪水') education = Column(TINYINT, nullable=False, doc=u'教育背景要求') nature = Column(TINYINT, nullable=False, doc=u'工作性质') description = Column(VARCHAR(constants.JOB_DESCRIPTION_MAX_LEN), nullable=False, doc=u'额外描述') advantage = Column(VARCHAR(constants.JOB_ADVANTAGE_MAX_LEN), nullable=False, doc=u'职位优势') created_at = Column(TIMESTAMP, nullable=False, default=datetime.now, doc=u'职位创建时间') updated_at = Column(TIMESTAMP, nullable=False, default=datetime.now, onupdate=datetime.now, doc=u'职位创建时间')
nilq/baby-python
python
""" :date_created: 2021-11-18 """ from do_py.abc import ABCRestrictions from db_able.base_model.database_abc import Database from db_able.client import DBClient @ABCRestrictions.require('save_params') class Savable(Database): """ This is a mixin designed to access DB with a standard method action, `save`. Supplants the "U" of CRUD. :requirement save_params: list or Params; usually load_params + create_params """ _is_abstract_ = True @classmethod def __compile__(cls): """ Extend compilation checks to validate defined params. """ super(Savable, cls).__compile__() cls._validate_params('save_params') def save(self): """ Save `DataObject`. Uses data in instance to update DB. Refer to `self.save_params` to see what fields are update-able. Expects to call the stored procedure: '%s_save' % cls.__name__, i.e. 'MyDataObject_save' Note: Standard Savable implementation uses Loadable internally in the stored procedure. Example: >>> from db_able import Loadable, Creatable, Savable, Params >>> from do_py import R >>> >>> class A(Creatable, Loadable, Savable): >>> db = 'schema_name' >>> _restrictions = { >>> 'id': R.INT, >>> 'x': R.INT.with_default(0), >>> 'y': R.INT.with_default(1) >>> } >>> load_params = Params('id') # version=2 allows versioning of the SP, i.e. `A_load_v2` >>> create_params = Params('x', 'y') >>> save_params = Params('id', 'x', 'y') >>> >>> a = A.create(x=1, y=2) >>> loaded = A.load(id=a.id) >>> assert a == loaded :rtype: bool """ stored_procedure = '%s_save%s' % (self.__class__.__name__, self.save_params.version) validated_args = self.kwargs_validator(*self.save_params, **self) with DBClient(self.db, stored_procedure, *validated_args, rollback=True) as conn: assert conn.data, 'DB response required for `%s`.`%s`.' % (self.db, stored_procedure) for row in conn.data: # Note: this is a weakness. Should always return one and only one row. self(data=row) return True
nilq/baby-python
python
from .orient import ImageOrienter from recipes.dicts import pformat class keep: pass class CalibrationImage: """Descriptor class for calibration images""" # Orientation = ImageOrientBase def __init__(self, name): self.name = f'_{name}' def __get__(self, instance, owner): if instance is None: return self return getattr(instance, self.name) def __set__(self, instance, value): if value is keep: return if value is not None: # ensure consistent orientation # note getting array here!! assert len(value.shape) == 2, 'Calibration image must be 2d' value = value.oriented[:] setattr(instance, self.name, value) def __delete__(self, instance): setattr(instance, self.name, None) class ImageCalibration(ImageOrienter): """ Do calibration arithmetic for CCD images on the fly """ # init the descriptors bias = CalibrationImage('bias') flat = CalibrationImage('flat') def __init__(self, hdu, bias=keep, flat=keep): super().__init__(hdu) self._bias = self._flat = None self.bias = bias self.flat = flat def __str__(self): return pformat(dict(bias=self.bias, flat=self.flat), self.__class__.__name__) def __repr__(self): return str(self) def __call__(self, data): """ Do calibration arithmetic on `data` ignoring orientation Parameters ---------- data Returns ------- """ # debias if self.bias is not None: data = data - self.bias # flat field if self.flat is not None: data = data / self.flat return data # def __getitem__(self, item): return self(super().__getitem__(item))
nilq/baby-python
python
import pandas as pd import numpy as np import math import json from tqdm import tqdm from time import time from datetime import datetime, timedelta import sys import warnings if not sys.warnoptions: warnings.simplefilter("ignore") import matplotlib.pyplot as plt import matplotlib.cm from matplotlib.colors import ListedColormap, LinearSegmentedColormap import geopy.distance import geopandas as gpd import contextily as ctx def saveFIG(filename='tmp.pdf'): import pylab as plt plt.subplots_adjust( top=1, bottom=0, right=1, left=0, hspace=0, wspace=0) plt.margins(0, 0) # plt.gca().xaxis.set_major_locator(plt.NullLocator()) # plt.gca().yaxis.set_major_locator(plt.NullLocator()) plt.savefig(filename, dpi=300, bbox_inches=0, transparent=True) return def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): new_cmap = LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n))) return new_cmap def lighten_color(color, amount=0.5): """ By Ian Hincks from stack overflow Lightens the given color by multiplying (1-luminosity) by the given amount. Input can be matplotlib color string, hex string, or RGB tuple. Examples: >> lighten_color('g', 0.3) >> lighten_color('#F034A3', 0.6) >> lighten_color((.3,.55,.1), 0.5) """ import matplotlib.colors as mc import colorsys try: c = mc.cnames[color] except: c = color c = colorsys.rgb_to_hls(*mc.to_rgb(c)) return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2]) # =========================== DF column names ==========================START day_col = 'day' actual_event_col = 'actual_event' variable_col = 'target' source_col = 'source' predictin_col = 'predictions' lon_col = 'lon2' lat_col = 'lat2' source = None grace = 1 # =========================== DF column names ==========================END def df_intersect(df1, df2, columns=[]): df1__ = df1[columns] df2__ = df2[columns] df1__m = df1__.apply(lambda x: hash(tuple(x)), axis=1) df2__m = df2__.apply(lambda x: hash(tuple(x)), axis=1) df_ = df1[df1__m.isin(df2__m)] return df_ def df_setdiff(df1, df2, columns=[]): df1__ = df1[columns] df2__ = df2[columns] df1__m = df1__.apply(lambda x: hash(tuple(x)), axis=1) df2__m = df2__.apply(lambda x: hash(tuple(x)), axis=1) df_ = df1[~df1__m.isin(df2__m)] return df_ def df_union(df_1, df_2, columns=[], count_only=False): dfh_1 = df_1[columns].apply(lambda x: hash(tuple(x)), axis=1) dfh_2 = df_2[columns].apply(lambda x: hash(tuple(x)), axis=1) diff = df_1[~dfh_1.isin(dfh_2)] union = pd.concat([diff, df_2], axis=0, sort=False) if count_only: return len(union) else: return union def transCMAP(cmap=plt.cm.RdBu,linear=True): cmap1 = cmap(np.arange(cmap.N)) if linear: cmap1[:,-1] = np.linspace(0, 1, cmap.N) else: cmap1[:,-1] = np.logspace(0, 1, cmap.N) return ListedColormap(cmap1) def getHausdorf(coord,pt): return np.min([geopy.distance.distance(pt,i).miles for i in coord]) def getHausdorf_df(df, pt, EPS=0.0001): if len(df) == 0: return np.inf, [] while True: T = [tuple(i) for i in df[(np.abs(df.lat-pt[0])<EPS) & (np.abs(df.lon-pt[1])<EPS)].values] if len(T)>0: break else: EPS=2*EPS return getHausdorf(T,tuple(pt)),T def get_intensity(intensity,lon_mesh,lat_mesh,pt_,sigma=3,radius=2): ''' single point spread calculation with Gaussian diffusion ''' lon_del=lon_mesh[0,:] lat_del=lat_mesh[:,0] lon_index=np.arange(len(lon_del))[(pt_[1]-lon_del<radius)*(pt_[1]-lon_del>-radius)] lat_index=np.arange(len(lat_del))[(pt_[0]-lat_del<radius)*(pt_[0]-lat_del>-radius)] mu=np.mean(lon_index) bins=lon_index intensity_lon=1/(sigma*np.sqrt(2*np.pi))*np.exp(-(bins - mu)**2/(2 * sigma**2)) mu=np.mean(lat_index) bins=lat_index intensity_lat=1/(sigma*np.sqrt(2*np.pi))*np.exp(-(bins - mu)**2/(2 * sigma**2)) for i in np.arange(len(lon_index)): for j in np.arange(len(lat_index)): intensity[lat_index[j],lon_index[i]]=intensity[lat_index[j],lon_index[i]] +intensity_lon[i]*intensity_lat[j] return intensity def get_mesh(df0,lat_min,lat_max,lon_min,lon_max,radius=2,detail=0.25): coord_=df0[[lat_col,lon_col]].values lon_grid=np.arange(lon_min-radius,lon_max+radius,detail) lat_grid=np.arange(lat_min-radius,lat_max+radius,detail) lon_mesh,lat_mesh=np.meshgrid(lon_grid,lat_grid) return lon_mesh,lat_mesh,coord_ def get_prediction( df, days, types, lat_min, lat_max, lon_min, lon_max, sigma=3.5, #=======YI made sigma a parameter radius=0.01, detail=0.2, Z=1.0, miles=50 #=======YI made miles in spatial relaxation a paramter ): # =========================== DF column names ==========================START day_col = 'day' actual_event_col = 'actual_event' variable_col = 'target' source_col = 'source' predictin_col = 'predictions' lon_col = 'lon2' lat_col = 'lat2' source = None grace = 1 # =========================== DF column names ==========================END df = df[df[day_col].between(days - grace,days + grace)] df = df[df[variable_col].isin(types)] # df = df[df[source_col] == source] df_gnd = df[(df[day_col]==days) & (df[actual_event_col]==1)] df_prd0 = df[(df[day_col]==days) & (df[predictin_col]==1)] df_prd1 = df[(df[day_col]==days - grace) & (df[predictin_col]==1)] df_prd2 = df[(df[day_col]==days + grace) & (df[predictin_col]==1)] df_prd0_tp = df_prd0[df_prd0[actual_event_col]==1] # UPDXX calculate tp df_gndB = df[(df[day_col]==days-grace) & (df[actual_event_col]==1)] df_gndF = df[(df[day_col]==days+grace) & (df[actual_event_col]==1)] df_tpB = df_intersect(df_prd0,df_gndB, columns=[lat_col, lon_col]) df_tpF = df_intersect(df_prd0,df_gndF, columns=[lat_col, lon_col]) df_tp = df_union( df_union(df_prd0_tp, df_tpB, columns=[lat_col, lon_col]), df_tpF, columns=[lat_col, lon_col]) tp = df_tp.index.size df_fp = df_setdiff(df_prd0,df_tp,columns=[lat_col, lon_col]) fp = df_fp.index.size df_fn0 = df[(df[day_col]==days) & (df[actual_event_col]==1) & (df[predictin_col]==0)] df_fn1 = df[(df[day_col]==days - grace) & (df[predictin_col]==0)] df_fn2 = df[(df[day_col]==days + grace) & (df[predictin_col]==0)] df_fn = df_intersect(df_intersect(df_fn0,df_fn1,columns=[lat_col, lon_col]), df_fn2,columns=[lat_col, lon_col]) fn= df_fn.index.size print('tmporal comp: --> ', 'tp ',tp, ' fp ', fp, ' fn ',fn) # SPATIAL ADJUSTMENT lon_grid = np.arange(lon_min - radius, lon_max + radius, detail) lat_grid = np.arange(lat_min - radius, lat_max + radius, detail) lon_mesh, lat_mesh = np.meshgrid(lon_grid,lat_grid) lon_mesh0, lat_mesh0, coord_= get_mesh( df_prd0, lat_min, lat_max, lon_min, lon_max, radius=radius, detail=detail) intensity = np.zeros(lat_mesh0.shape) for i in coord_: intensity = get_intensity( intensity, lon_mesh0, lat_mesh0, i, sigma=sigma, radius=radius) intensity0 = np.multiply(intensity, (intensity > Z)) intensity0=(1. / intensity0.max()) * intensity0 lon_del=lon_mesh0[0,:] lat_del=lat_mesh0[:,0] A=(intensity0>Z).nonzero() coordNZ=[(lat_del[A[0][i]],lon_del[A[1][i]]) for i in np.arange(len(A[0]))] df_cnz=pd.DataFrame(coordNZ,columns=['lat','lon']) xgfp = np.array([getHausdorf_df(df_cnz,tuple(i),EPS=0.01)[0] for i in (df_fp[[lat_col,lon_col]].drop_duplicates().values)]) fp = np.sum(xgfp < miles) xgfn = np.array([getHausdorf_df(df_cnz, tuple(i), EPS=0.01)[0] for i in (df_fn[[lat_col,lon_col]].drop_duplicates().values)]) fn = np.sum(xgfn > 2 * miles) df_tp_0 = df_intersect(df_tp, df_prd0,columns=[lat_col, lon_col]) return fn, tp, fp, tp/(tp+fp), tp/(tp+fn), lon_mesh0, lat_mesh0, intensity, intensity0, df_gnd, df_fn,df_tp,df_fp,df_tp_0
nilq/baby-python
python
import random from donphan.utils import not_creatable from tests.utils import async_test from donphan import Column, Table, SQLType from unittest import TestCase class _TestAlterColumnsTable(Table): a: Column[SQLType.Text] = Column(primary_key=True) class AlterColumnsTest(TestCase): def test_query_drop_column(self): assert ( _TestAlterColumnsTable._query_drop_column(_TestAlterColumnsTable._columns_dict["b"]) == r"ALTER TABLE public.__test_alter_columns_table DROP COLUMN b" ) def test_query_add_column(self): column = Column.create("b", SQLType.Text) assert ( _TestAlterColumnsTable._query_add_column(column) == r"ALTER TABLE public.__test_alter_columns_table ADD COLUMN b TEXT" ) @async_test async def test_a_table_create(self): await _TestAlterColumnsTable.create(None) @async_test async def test_c_table_add_column(self): column = Column.create("b", SQLType.Text) await _TestAlterColumnsTable.add_column(None, column) column = Column.create("c", SQLType.Text) await _TestAlterColumnsTable.add_column(None, column) @async_test async def test_d_table_drop_column(self): await _TestAlterColumnsTable.drop_column(None, _TestAlterColumnsTable._columns_dict["b"]) @async_test async def test_e_table_migrate(self): @not_creatable class Migrator(Table, _name="__test_alter_columns_table"): a: Column[SQLType.Text] = Column(primary_key=True) b: Column[SQLType.Text] await _TestAlterColumnsTable.migrate_to(None, Migrator) @async_test async def test_f_table_delete(self): await _TestAlterColumnsTable.drop(None)
nilq/baby-python
python
import torch class AutocastCPUTestLists(object): # Supplies ops and arguments for test_autocast_* in test/test_cpu.py def __init__(self, dev): super().__init__() n = 8 # Utility arguments, created as one-element tuples pointwise0_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) pointwise1_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) pointwise2_bf16 = (torch.randn(n, dtype=torch.bfloat16, device=dev),) mat0_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) mat1_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) mat2_bf16 = (torch.randn((n, n), dtype=torch.bfloat16, device=dev),) dummy_dimsets = ((n,), (n, n), (n, n, n), (n, n, n, n), (n, n, n, n, n)) dummy_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev),) for dimset in dummy_dimsets] dimsets = ((n, n, n), (n, n, n, n), (n, n, n, n, n)) conv_args_bf16 = [(torch.randn(dimset, dtype=torch.bfloat16, device=dev), torch.randn(dimset, dtype=torch.bfloat16, device=dev)) for dimset in dimsets] conv_args_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev), torch.randn(dimset, dtype=torch.float32, device=dev)) for dimset in dimsets] bias_fp32 = (torch.randn((n,), dtype=torch.float32, device=dev),) element0_fp32 = (torch.randn(1, dtype=torch.float32, device=dev),) pointwise0_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) pointwise1_fp32 = (torch.randn(n, dtype=torch.float32, device=dev),) mat0_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat1_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat2_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) mat3_fp32 = (torch.randn((n, n), dtype=torch.float32, device=dev),) dummy_fp32 = [(torch.randn(dimset, dtype=torch.float32, device=dev),) for dimset in dummy_dimsets] # The lists below organize ops that autocast needs to test. # self.list_name corresponds to test_autocast_list_name in test/test_cpu.py. # Each op is associated with a tuple of valid arguments. # Some ops implement built-in type promotion. These don't need autocasting, # but autocasting relies on their promotion, so we include tests to double-check. self.torch_expect_builtin_promote = [ ("eq", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("ge", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("gt", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("le", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("lt", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("ne", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("add", pointwise0_fp32 + pointwise1_bf16, torch.float32), ("div", pointwise0_fp32 + pointwise1_bf16, torch.float32), ("mul", pointwise0_fp32 + pointwise1_bf16, torch.float32), ] self.methods_expect_builtin_promote = [ ("__eq__", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("__ge__", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("__gt__", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("__le__", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("__lt__", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("__ne__", pointwise0_fp32 + pointwise1_bf16, torch.bool), ("__add__", pointwise0_fp32 + pointwise1_bf16, torch.float32), ("__div__", pointwise0_fp32 + pointwise1_bf16, torch.float32), ("__mul__", pointwise0_fp32 + pointwise1_bf16, torch.float32), ] # The remaining lists organize ops that autocast treats explicitly. self.torch_bf16 = [ ("conv1d", conv_args_fp32[0]), ("conv2d", conv_args_fp32[1]), ("conv3d", conv_args_fp32[2]), ("conv_transpose1d", conv_args_fp32[0]), ("conv_transpose2d", conv_args_fp32[1]), ("conv_transpose3d", conv_args_fp32[2]), ("bmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ("mm", mat0_fp32 + mat1_fp32), ("baddbmm", (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ("addmm", mat1_fp32 + mat2_fp32 + mat3_fp32), ("addbmm", mat0_fp32 + (torch.randn((n, n, n), device=dev, dtype=torch.float32), torch.randn((n, n, n), device=dev, dtype=torch.float32))), ] self.torch_fp32 = [ # ("instance_norm", dummy_bf16[2], {"weight": None, "bias": None, "running_mean": torch.rand((n), dtype=torch.float32), # "running_var": torch.rand((n), dtype=torch.float32), "use_input_stats": False, # "momentum": 0.1, "eps": 1e-5, "cudnn_enabled": False}), ("fmod", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1.5)), ("prod", torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16)), ("quantile", (torch.tensor([0.1, 0.2, 0.3, 0.4], dtype=torch.bfloat16), torch.tensor([0.1, 0.2, 0.3, 0.4], dtype=torch.bfloat16))), ("nanquantile", (torch.tensor([0.1, 0.2, 0.3, 0.4], dtype=torch.bfloat16), torch.tensor([0.1, 0.2, 0.3, 0.4], dtype=torch.bfloat16))), ("stft", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1, 1)), ("cdist", (dummy_bf16[1][0], dummy_bf16[1][0])), ("cumprod", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1)), ("cumsum", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1)), ("diag", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1)), ("diagflat", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1)), ("histc", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1)), ("logcumsumexp", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16), 1)), ("vander", (torch.tensor([[1, 2, 3, 4]], dtype=torch.bfloat16))), ("inverse", mat2_bf16), ("pinverse", mat2_bf16), ("max_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}), ("group_norm", torch.randn(1, 6, 10, 10).to(torch.bfloat16), {"num_groups": 1}), ("conv_tbc", (torch.randn(2, 1, 8).to(torch.bfloat16), torch.randn(3, 8, 8).to(torch.bfloat16), dummy_bf16[0][0])), ] self.nn_bf16 = [ ("linear", mat0_fp32 + mat1_fp32), ] self.fft_fp32 = [ ("fft_fft", torch.randn(1, 4).to(torch.bfloat16)), ("fft_ifft", torch.randn(1, 4).to(torch.bfloat16)), ("fft_fft2", torch.randn(1, 4).to(torch.bfloat16), {"dim": -1}), ("fft_ifft2", torch.randn(1, 4).to(torch.bfloat16), {"dim": -1}), ("fft_fftn", torch.randn(1, 4).to(torch.bfloat16)), ("fft_ifftn", torch.randn(1, 4).to(torch.bfloat16)), ("fft_rfft", torch.randn(1, 4).to(torch.bfloat16)), ("fft_irfft", torch.randn(1, 4).to(torch.bfloat16)), ("fft_rfft2", torch.randn(1, 4).to(torch.bfloat16), {"dim": -1}), ("fft_irfft2", torch.randn(1, 4).to(torch.bfloat16), {"dim": -1}), ("fft_rfftn", torch.randn(1, 4).to(torch.bfloat16)), ("fft_irfftn", torch.randn(1, 4).to(torch.bfloat16)), ("fft_hfft", torch.randn(1, 4).to(torch.bfloat16)), ("fft_ihfft", torch.randn(1, 4).to(torch.bfloat16)), ] self.special_fp32 = [ ] self.linalg_fp32 = [ ("linalg_matrix_norm", dummy_bf16[2]), ("linalg_cond", dummy_bf16[2]), ("linalg_matrix_rank", dummy_bf16[2]), ("linalg_solve", dummy_bf16[2], {"other": dummy_bf16[2][0]}), ("linalg_cholesky", torch.mm(dummy_bf16[1][0], dummy_bf16[1][0].t()).reshape(1, 8, 8)), ("linalg_svdvals", dummy_bf16[2]), ("linalg_eigvals", dummy_bf16[2]), ("linalg_eigvalsh", dummy_bf16[2]), ("linalg_inv", dummy_bf16[2]), ("linalg_householder_product", (dummy_bf16[1][0], dummy_bf16[0][0])), ("linalg_tensorinv", dummy_bf16[1], {"ind": 1}), ("linalg_tensorsolve", (torch.eye(2 * 3 * 4).reshape((2 * 3, 4, 2, 3, 4)).to(torch.bfloat16), torch.randn(2 * 3, 4).to(torch.bfloat16))), ("linalg_qr", dummy_bf16[1]), ("linalg_cholesky_ex", dummy_bf16[1]), ("linalg_svd", dummy_bf16[1]), ("linalg_eig", dummy_bf16[1]), ("linalg_eigh", dummy_bf16[1]), ("linalg_lstsq", (dummy_bf16[1][0], dummy_bf16[1][0])), ] self.nn_fp32 = [ ("avg_pool3d", dummy_bf16[3], {"kernel_size": (3, 3, 3), "stride": (1, 1, 1)}), ("binary_cross_entropy", (torch.rand((n, n), device=dev, dtype=torch.bfloat16),) + (torch.rand((n, n), device=dev, dtype=torch.bfloat16),)), ("reflection_pad1d", dummy_bf16[2], {"padding": (3, 3)}), ("adaptive_avg_pool3d", torch.randn(1, 64, 10, 9, 8).to(torch.bfloat16), {"output_size": 7}), ("reflection_pad1d", torch.arange(8, dtype=torch.float).reshape(1, 2, 4).to(torch.bfloat16), {"padding": 2}), ("reflection_pad2d", torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3).to(torch.bfloat16), {"padding": 2}), ("replication_pad1d", torch.arange(8, dtype=torch.float).reshape(1, 2, 4).to(torch.bfloat16), {"padding": 2}), ("replication_pad2d", torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3).to(torch.bfloat16), {"padding": 2}), ("replication_pad3d", torch.arange(1 * 3 * 8 * 320 * 480, dtype=torch.float).reshape(1, 3, 8, 320, 480).to(torch.bfloat16), {"padding": 3}), ("mse_loss", (torch.randn(3, 5, requires_grad=True).to(torch.bfloat16), torch.randn(3, 5).to(torch.bfloat16))), ] self.torch_need_autocast_promote = [ ("cat", (pointwise0_bf16 + pointwise1_fp32,)), ("stack", (pointwise0_bf16 + pointwise1_fp32,)), ("index_copy", (torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.bfloat16), 0, torch.tensor([0, 1, 2]), torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float))), ] self.blacklist_non_float_output_pass_test = [ ] self.torch_fp32_multi_output = [ ("eig", (torch.randn(10, 10).to(torch.bfloat16), True)), ("geqrf", (torch.randn(10, 10).to(torch.bfloat16), )), ("lstsq", (torch.randn(10, 10).to(torch.bfloat16), torch.randn(10, 10).to(torch.bfloat16))), ("_lu_with_info", (torch.randn(10, 10).to(torch.bfloat16), True)), ("qr", (torch.randn(10, 10).to(torch.bfloat16), True)), ("solve", (torch.randn(10, 10).to(torch.bfloat16), torch.randn(10, 10).to(torch.bfloat16))), ("svd", (torch.randn(10, 10).to(torch.bfloat16), True)), ("symeig", (torch.randn(10, 10).to(torch.bfloat16), True)), ("triangular_solve", (torch.randn(10, 10).to(torch.bfloat16), torch.randn(10, 10).to(torch.bfloat16))), ("adaptive_max_pool3d", (torch.randn(100, 100, 100, 100).to(torch.bfloat16), (13, 13, 13))), ] self.nn_fp32_multi_output = [ ("fractional_max_pool2d", (torch.randn(100, 100, 100).to(torch.bfloat16), 2, (13, 12), torch.randn(10, 10, 10))), ("fractional_max_pool3d", (torch.randn(100, 100, 100, 100).to(torch.bfloat16), 2, (13, 12, 1), torch.randn(10, 10, 10))), ]
nilq/baby-python
python
from __future__ import print_function from keras.preprocessing.image import ImageDataGenerator import numpy as np import os import glob import skimage.io as io import skimage.transform as trans khong = [0,0,0] vua = [0,0,128] nang = [0,128,0] ratnang = [128,128,0] lut = [128,0,0] COLOR_DICT = np.array([khong,vua,nang,ratnang, lut]) # Chức năng để chuẩn hóa giá trị pixel của dữ liệu của tập huấn luyện và nhãn, #mục đích của việc định hình lại để dự đoán nhiều lớp def adjustData(img,mask,flag_multi_class,num_class): if(flag_multi_class): img = img / 255 #câu lệnh viết tắt của if else (batch_size, wight, heigh) mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0] # np.zeros bên trong là một bộ hình dạng, mục đích này là để mở rộng độ dày #của dữ liệu cho lớp num_class, để đạt được cấu trúc phân loại theo hướng của lớp new_mask = np.zeros(mask.shape + (num_class,)) for i in range(num_class): #for one pixel in the image, find the class in mask and convert it into one-hot vector #index = np.where(mask == i) #index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i) #new_mask[index_mask] = 1 new_mask[mask == i,i] = 1 new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2])) mask = new_mask elif(np.max(img) > 1): img /= 255 mask = mask /255 mask[mask > 0.5] = 1 mask[mask <= 0.5] = 0 return (img,mask) def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale", mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask", flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (256,256),seed = 1): ''' có thể tạo hình ảnh và mặt nạ cùng một lúc sử dụng cùng một hạt giống cho image_datagen và mask_datagen để đảm bảo việc chuyển đổi cho hình ảnh và mặt nạ giống nhau nếu bạn muốn hình dung kết quả của trình tạo, hãy đặt save_to_dir = "your path" ''' image_datagen = ImageDataGenerator(**aug_dict) mask_datagen = ImageDataGenerator(**aug_dict) image_generator = image_datagen.flow_from_directory( train_path,#đường dẫn thư mục đào tạo classes = [image_folder],#thư mục danh mục, lớp nào cần nâng cấp class_mode = None,#không trả lại thẻ color_mode = image_color_mode,#thang độ xám, chế độ đường đơn target_size = target_size,#mục tiêu hình ảnh sau khi chuyển đổi batch_size = batch_size,#số lượng ảnh tạo ra sau mỗi lần chuyển đổi save_to_dir = save_to_dir,#lưu hình ảnh vào địa chủ save_prefix = image_save_prefix,#Tiền tố của hình ảnh đã tạo chỉ hợp lệ khi save_to_dir được cung cấp seed = seed) mask_generator = mask_datagen.flow_from_directory( train_path, classes = [mask_folder], class_mode = None, color_mode = mask_color_mode, target_size = target_size, batch_size = batch_size, save_to_dir = save_to_dir, save_prefix = mask_save_prefix, seed = seed) train_generator = zip(image_generator, mask_generator)#kếp hợp thành tổng quan #Bởi vì lô là 2, vì vậy trả lại hai hình cùng một lúc, tức là, img là một mảng gồm 2 hình ảnh thang độ xám, [2,256,256] for (img,mask) in train_generator: img,mask = adjustData(img,mask,flag_multi_class,num_class) yield (img,mask) #Hai hình ảnh và thẻ được tạo mỗi lần, nếu bạn không hiểu lợi nhuận, vui lòng xem # Chức năng trên chủ yếu là để tạo trình tạo ảnh tăng cường dữ liệu, rất tiện lợi khi sử dụng trình tạo này để liên tục tạo ảnh def testGenerator(test_path,num_image = 21,target_size = (256,256),flag_multi_class = False,as_gray = True): for i in range(num_image): img = io.imread(os.path.join(test_path,"%d.png"%i),as_gray = as_gray) img = img / 255 img = trans.resize(img,target_size) img = np.reshape(img,img.shape+(1,)) if (not flag_multi_class) else img img = np.reshape(img,(1,)+img.shape) yield img def geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True): image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix)) # Tương đương với tìm kiếm tệp, tìm kiếm tệp khớp với các ký tự trong đường dẫn image_arr = [] mask_arr = [] for index,item in enumerate(image_name_arr): img = io.imread(item,as_gray = image_as_gray) img = np.reshape(img,img.shape + (1,)) if image_as_gray else img mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray) # Tìm kiếm lại ảnh có ký tự mặt nạ (ảnh nhãn) trong thư mục mask_path mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask img,mask = adjustData(img,mask,flag_multi_class,num_class) image_arr.append(img) mask_arr.append(mask) image_arr = np.array(image_arr) mask_arr = np.array(mask_arr) return image_arr,mask_arr #Chức năng này chủ yếu là để tìm kiếm các hình ảnh trong thư mục tập hợp đào tạo và thư mục thẻ, sau đó mở rộng một thứ nguyên để trả về nó ở dạng mảng, để đọc dữ liệu trong thư mục khi không sử dụng tính năng nâng cao dữ liệu def labelVisualize(num_class,color_dict,img): img = img[:,:,0] if len(img.shape) == 3 else img img_out = np.zeros(img.shape + (3,)) # Trở thành không gian RGB, vì các màu khác chỉ có thể được hiển thị trong không gian RGB for i in range(num_class): img_out[img == i,:] = color_dict[i] # Áp dụng các màu khác nhau cho các danh mục khác nhau, color_dict [i] #là màu liên quan đến số lượng danh mục, img_out #[img == i,:] là điểm của img_out ở vị trí của img bằng với danh mục i return img_out / 255 # Chức năng trên là cung cấp màu khác cho đầu ra sau khi #đưa ra kết quả sau khi kiểm tra. Nó chỉ hoạt động trong nhiều loại trường hợp. Hai loại đều vô dụng def saveResult(save_path,npyfile,flag_multi_class = False,num_class = 2): for i,item in enumerate(npyfile): img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0] io.imsave(os.path.join(save_path,"%d_predict.png"%i),img) # Nếu có nhiều danh mục, ảnh sẽ có màu, nếu không có nhiều danh mục (hai danh mục), ảnh sẽ có màu đen và trắng
nilq/baby-python
python
# -*- coding: utf-8 -*- # import general neural network model from .dnn import NN # import multilayer perceptron model from .mlp import * # import NEAT model from .neat_model import NEATModel # import convolutional neural network from .cnn import * # import recurrent neural network from .rnn import * # import auto-encoder from .ae import * # import variational auto-encoder from .vae import * # import generative adversarial networks from .gan import *
nilq/baby-python
python
import django_tables2 as tables from nautobot.utilities.tables import ( BaseTable, ButtonsColumn, ToggleColumn, ) from dummy_plugin.models import DummyModel class DummyModelTable(BaseTable): """Table for list view of `DummyModel` objects.""" pk = ToggleColumn() name = tables.LinkColumn() actions = ButtonsColumn(DummyModel) class Meta(BaseTable.Meta): model = DummyModel fields = ["pk", "name", "number"]
nilq/baby-python
python
from stonehenge import Application, Route, Router, run from stonehenge.modules import DefaultModules from stonehenge.admin import AdminRouter from stonehenge.cms import CMSRouter from blog import BlogModule from handlers import home, about, portfolio, subpage, blog_handler, user_handler class App(Application): modules = DefaultModules + [ BlogModule(), ] router = Router( routes=[ Route(methods=["GET"], path="/", handler=home), Route(methods=["GET"], path="/about", handler=about), Route(methods=["GET"], path="/portfolio", handler=portfolio), Route( methods=["GET"], path="/company/:company_id<int>/user/:username<str>/", handler=user_handler, ), Route(methods=["GET"], path="/blog/:slug<slug>/", handler=blog_handler), Router( path="/pages", routes=[ Route(methods=["GET"], path="/subpage", handler=subpage), ], ), AdminRouter(path="/secret-hidden-admin"), CMSRouter(), ], request_middlewares=[], response_middlewares=[], ) app = App() if __name__ == "__main__": run(app)
nilq/baby-python
python
import pytorch_lightning as pl from pytorch_lightning import loggers from l5kit.configs import load_config_data from raster.lyft import LyftTrainerModule, LyftDataModule from pathlib import Path import argparse import torch from raster.utils import boolify import pandas as pd parser = argparse.ArgumentParser(description='Manage running job') parser.add_argument('--seed', type=int, default=313, help='random seed to use') parser.add_argument('--config', type=str, help='config yaml path') parser.add_argument('--checkpoint-path', type=str, default=None, help='initial weights to transfer on') parser.add_argument('--challenge-submission', type=boolify, default=False, help='whether test is for challenge submission') parser.add_argument('--test-csv-path', type=str, default=None, help='where to save result of test') parser.add_argument('--test-mask-path', type=str, default=None, help='mask applied over test') parser = LyftTrainerModule.add_model_specific_args(parser) parser = LyftDataModule.add_model_specific_args(parser) parser = pl.Trainer.add_argparse_args(parser) if __name__ == '__main__': args = parser.parse_args() # initializing various parts pl.seed_everything(args.seed) # initializing training trainer = pl.Trainer.from_argparse_args(args, checkpoint_callback=False, logger=False) config = load_config_data(args.config) args_dict = vars(args) args_dict['config'] = config training_procedure = LyftTrainerModule.load_from_checkpoint(checkpoint_path=args_dict['checkpoint_path'], test_csv_path =args_dict['test_csv_path']) args_dict['config'] = training_procedure.hparams.config training_procedure.datamodule = LyftDataModule(**args_dict) trainer.test(training_procedure) if args_dict['challenge_submission']: validate_csv = pd.read_csv(args_dict['test_csv_path'] + "/full_result.csv") validate_csv.pop('idx') validate_csv.pop('grads/semantics') validate_csv.pop('grads/vehicles') validate_csv.pop('grads/total') validate_csv.pop('nll') validate_csv.pop('loss') validate_csv.to_csv(index=False, path_or_buf=args_dict['test_csv_path'] + "/submission.csv")
nilq/baby-python
python
# Copyright (c) 2018 Steven R. Brandt # Copyright (c) 2018 R. Tohid # # Distributed under the Boost Software License, Version 1.0. (See accompanying # file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) import phylanx from phylanx import Phylanx @Phylanx def sumn(): n = 0 sum = 0 for n in range(4): sum += n return sum assert sumn() == 6 @Phylanx def sumn2(): n = 0 sum = 0 for n in range(1, 4): sum += n return sum assert sumn2() == 6 @Phylanx def sumn3(): n = 0 sum = 0 c = 0 for n in range(3, 0, -1): sum += n c += 1 return sum + c assert sumn3() == 9
nilq/baby-python
python
from django.apps import AppConfig class ScannerappConfig(AppConfig): name = 'scannerapp'
nilq/baby-python
python
""" Developed by : Adem Boussetha Email : ademboussetha@gmail.com """ import cv2 import datetime import os face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_default.xml') # Read the input image #img = cv2.imread('test.png') cap = cv2.VideoCapture(0) print ("you're gonna be added to db face recognition.") name = input("enter your name please : ") dirname='images/'+name os.makedirs(dirname) while True: _, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.1, 4) for (x, y , w ,h) in faces: cv2.rectangle(img, (x,y), (x+w, y+h), (255, 0 , 0), 3) filename= f'{name}-'+str(datetime.datetime.now()).replace(" ","_")+'.png' print(filename) dirname= dirname+"/" print(dirname) isWritten =cv2.imwrite(os.path.join(dirname,filename),img[y:y+h,x:x+w]) if isWritten: print("image is successfully saved as file") # Display the output cv2.imshow('img', img) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release()
nilq/baby-python
python
""" 523. Continuous Subarray Sum Given a list of non-negative numbers and a target integer k, write a function to check if the array has a continuous subarray of size at least 2 that sums up to the multiple of k, that is, sums up to n*k where n is also an integer. Example 1: Input: [23, 2, 4, 6, 7], k=6 Output: True Explanation: Because [2, 4] is a continuous subarray of size 2 and sums up to 6. Example 2: Input: [23, 2, 6, 4, 7], k=6 Output: True Explanation: Because [23, 2, 6, 4, 7] is an continuous subarray of size 5 and sums up to 42. Note: The length of the array won't exceed 10,000. You may assume the sum of all the numbers is in the range of a signed 32-bit integer. """ class Solution: def checkSubarraySum(self, nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ st = set() cur, pre = 0, 0 for i in nums: cur, pre = (cur + i) % k if k!=0 else cur + i, cur if cur in st: return True st.add(pre) return False class Solution: def checkSubarraySum(self, nums, k): """ :type nums: List[int] :type k: int :rtype: bool """ dic = collections.defaultdict(int) cur = 0 dic[0] = -1 for i, v in enumerate(nums): cur = (cur + v) % k if k!=0 else cur + v if cur in st and i - dic[cur]> 1: return True dic[cur] = i return False class Solution: def checkSubarraySum(self, nums, k): dic = collections.defaultdict(int) cur = 0 dic[0] = -1 for i, v in enumerate(nums): cur = (cur + v) % k if k!=0 else cur + v if cur in dic and i - dic[cur]> 1: return True elif cur not in dic: dic[cur] = i return False
nilq/baby-python
python
#!/usr/bin/env python3 import xmlrpc.client import time test1 = xmlrpc.client.ServerProxy('http://localhost:8081') print(test1.system.listMethods()) test1.start_trial() test1.turnLeft() test1.turnRight() test1.end_trial()
nilq/baby-python
python
class Solution: def missingNumber(self, nums: [int]) -> int: nums_set = set(nums) for i in range(len(nums) + 1): if i not in nums_set: return i
nilq/baby-python
python
# # gemini_python # # recipe_system.reduction # reduceActions.py # ------------------------------------------------------------------------------ """ This module provides a number "action" classes, subclassed from the argparse.Action class. These classes only override the __call__() method. This actions class library supplies ad hoc functionality to DPDG requirements on the reduce command line interface. Action classes provided: PosArgAction - positional argument BooleanAction - optional switches UnitaryArgumentAction - single value options ParameterAction - user parameters (-p, --param) CalibrationAction - user calibration services (--user_cal) Becuase of requirements on the reduce interface, any new reduce options should specify one of these actions in the add_argument() call. But only one (1) PosArgAction should occur in a given parser. These actions may be used in the add_argument() method call, such as, parser.add_argument('-f', '--foo', action=BooleanAction, help="Switch on foo.") """ from argparse import Action class PosArgAction(Action): def __call__(self, parser, namespace, values, option_string=None): if values: setattr(namespace, self.dest, values) return class BooleanAction(Action): def __call__(self, parser, namespace, values, option_string=None): # 'values' is a list, which may have accumulated pos args _pos_args = [] _switch_state = bool(getattr(namespace, self.dest)) _pos_args.extend([f for f in values if ".fits" in f]) # Configure namespace w new files if _pos_args: setattr(namespace, 'files', _pos_args) # Toggle switch. setattr(namespace, self.dest, not _switch_state) return class UnitaryArgumentAction(Action): def __call__(self, parser, namespace, values, option_string=None): # 'values' is a list, which may have accumulated pos args _pos_args = [] _par_args = [] _extant_pos_args = getattr(namespace, 'files') _extant_par_args = getattr(namespace, self.dest) for value in values: if ".fits" in value: _pos_args.extend([value]) else: _par_args.extend([value]) # set new pos args if _pos_args: setattr(namespace, 'files', _pos_args) # Received (new) unitary argument types # override any previous namespace self.dest setattr(namespace, self.dest, _par_args) return class ParameterAction(Action): def __call__(self, parser, namespace, values, option_string=None): # 'values' is a list, which may have accumulated pos args _pos_args = [] _par_args = [] _extant_pos_args = getattr(namespace, 'files') _extant_par_args = getattr(namespace, self.dest) for value in values: if "=" not in value: _pos_args.extend([value]) else: _par_args.extend([value]) # set new pos args if _pos_args: setattr(namespace, 'files', _pos_args) # Handle parameter args already in namespace. # Override only those specific parameters. if _par_args and not _extant_par_args: setattr(namespace, self.dest, _par_args) if _extant_par_args: reemed = [_extant_par_args.remove(z) for z in [x for x in _extant_par_args if x.split('=')[0] in [y.split('=')[0] for y in _par_args]] ] print("Overriding", len(reemed), "parameter(s).\n") _extant_par_args.extend(_par_args) setattr(namespace, self.dest, _extant_par_args) return class CalibrationAction(Action): def __call__(self, parser, namespace, values, option_string=None): # 'values' is a list, which may have accumulated pos args _pos_args = [] _cal_args = [] _extant_pos_args = getattr(namespace, 'files') _extant_cal_args = getattr(namespace, self.dest) for value in values: if ":" not in value: _pos_args.extend([value]) else: _cal_args.extend([value]) # set new pos args if _pos_args: setattr(namespace, 'files', _pos_args) # Handle cal args already in namespace. # Override specific parameters. if _cal_args and not _extant_cal_args: setattr(namespace, self.dest, _cal_args) if _extant_cal_args: reemed = [_extant_cal_args.remove(z) for z in [x for x in _extant_cal_args if x.split(':')[0] in [y.split(':')[0] for y in _cal_args]] ] print("Overriding", len(reemed), "calibration source(s).\n") _extant_cal_args.extend(_cal_args) setattr(namespace, self.dest, _extant_cal_args) return
nilq/baby-python
python
from fastapi import APIRouter from fastapi import Depends, HTTPException, status from fastapi.responses import ORJSONResponse from fastapi.security import OAuth2PasswordRequestForm from fastapidi import get_dependency from app.modules.auth.depends import validate_jwt_token from app.modules.auth.dtos.token import Token, Check, RefreshToken from app.modules.auth.dtos.user_data import UserData from app.modules.auth.use_cases.interfaces import IJwtService, IPasswordHashService, IRefreshTokenService from app.modules.users.use_cases.interfaces import IUserService router = APIRouter() @router.post("/sign_in", response_model=Token) async def login_for_access_token( jwt_service=get_dependency(IJwtService), user_service=get_dependency(IUserService), password_hash_service=get_dependency(IPasswordHashService), refresh_token_service=get_dependency(IRefreshTokenService), form_data: OAuth2PasswordRequestForm = Depends() ): user = await user_service.get_by_email(form_data.username) if user is None: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect username", headers={"WWW-Authenticate": "Bearer"}, ) if not password_hash_service.verify_password(form_data.password, user.password): raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Incorrect password", headers={"WWW-Authenticate": "Bearer"}, ) payload_data = {"sub": user.email, "id": user.id} access_token = jwt_service.create_access_token(data=payload_data) refresh_token = jwt_service.create_refresh_token(data=payload_data) refresh_token_in_db = await refresh_token_service.create(refresh_token, user.id) if refresh_token_in_db is None: raise HTTPException(status_code=400, detail="Failed to write refresh token") return ORJSONResponse({"access_token": access_token, "refresh_token": refresh_token, "token_type": "bearer"}) @router.post("/refresh_token", response_model=Token) async def update_tokens( refresh_token: RefreshToken, jwt_service=get_dependency(IJwtService), user_service=get_dependency(IUserService), refresh_token_service=get_dependency(IRefreshTokenService) ): deleted_refresh_token = await refresh_token_service.delete_by_token(token=refresh_token.refresh_token) if deleted_refresh_token is None: raise HTTPException(status_code=400, detail="Failed to delete refresh token") user = await user_service.get_by_id(deleted_refresh_token.user_id) payload_data = {"sub": user.email, "id": user.id} refresh_token = jwt_service.create_refresh_token(data=payload_data) refresh_token_in_db = await refresh_token_service.create(refresh_token, user.id) if refresh_token_in_db is None: raise HTTPException(status_code=400, detail="Failed to write refresh token") access_token = jwt_service.create_access_token(data=payload_data) return ORJSONResponse({"access_token": access_token, "refresh_token": refresh_token, "token_type": "bearer"}) @router.post("/register", response_model=Token, status_code=201) async def register_new_user( user_date: UserData, jwt_service=get_dependency(IJwtService), user_service=get_dependency(IUserService), refresh_token_service=get_dependency(IRefreshTokenService) ): user = await user_service.create(**user_date.dict()) if user is None: raise HTTPException(status_code=400, detail="Failed to create user") payload_data = {"sub": user.email, "id": user.id} refresh_token = jwt_service.create_refresh_token(data=payload_data) refresh_token_in_db = await refresh_token_service.create(refresh_token, user.id) if refresh_token_in_db is None: raise HTTPException(status_code=400, detail="Failed to write refresh token") access_token = jwt_service.create_access_token(data=payload_data) return ORJSONResponse({"access_token": access_token, "refresh_token": refresh_token, "token_type": "bearer"}, 201) @router.get("/check_token", response_model=Check, status_code=200) async def check_token(_: str = Depends(validate_jwt_token)): return ORJSONResponse(Check(status=True).dict())
nilq/baby-python
python
""" fasta - manipulations with FASTA databases ========================================== FASTA is a simple file format for protein sequence databases. Please refer to `the NCBI website <http://www.ncbi.nlm.nih.gov/blast/fasta.shtml>`_ for the most detailed information on the format. Data manipulation ----------------- Classes ....... Several classes of FASTA parsers are available. All of them have common features: - context manager support; - header parsing; - direct iteration. Available classes: :py:class:`FASTABase` - common ancestor, suitable for type checking. Abstract class. :py:class:`FASTA` - text-mode, sequential parser. Good for iteration over database entries. :py:class:`IndexedFASTA` - binary-mode, indexing parser. Supports direct indexing by header string. :py:class:`TwoLayerIndexedFASTA` - additionally supports indexing by extracted header fields. :py:class:`UniProt` and :py:class:`IndexedUniProt`, :py:class:`UniParc` and :py:class:`IndexedUniParc`, :py:class:`UniMes` and :py:class:`IndexedUniMes`, :py:class:`UniRef` and :py:class:`IndexedUniRef`, :py:class:`SPD` and :py:class:`IndexedSPD`, :py:class:`NCBI` and :py:class:`IndexedNCBI` - format-specific parsers. Functions ......... :py:func:`read` - returns an instance of the appropriate reader class, for sequential iteration or random access. :py:func:`chain` - read multiple files at once. :py:func:`chain.from_iterable` - read multiple files at once, using an iterable of files. :py:func:`write` - write entries to a FASTA database. :py:func:`parse` - parse a FASTA header. Decoy sequence generation ------------------------- :py:func:`decoy_sequence` - generate a decoy sequence from a given sequence, using one of the other functions listed in this section or any other callable. :py:func:`reverse` - generate a reversed decoy sequence. :py:func:`shuffle` - generate a shuffled decoy sequence. :py:func:`fused_decoy` - generate a "fused" decoy sequence. Decoy database generation ------------------------- :py:func:`write_decoy_db` - generate a decoy database and write it to a file. :py:func:`decoy_db` - generate entries for a decoy database from a given FASTA database. :py:func:`decoy_chain` - a version of :py:func:`decoy_db` for multiple files. :py:func:`decoy_chain.from_iterable` - like :py:func:`decoy_chain`, but with an iterable of files. Auxiliary --------- :py:data:`std_parsers` - a dictionary with parsers for known FASTA header formats. ------------------------------------------------------------------------------- """ # Copyright 2012 Anton Goloborodko, Lev Levitsky # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import random from collections import namedtuple import re from . import auxiliary as aux Protein = namedtuple('Protein', ('description', 'sequence')) class FASTABase(object): """Abstract base class for FASTA file parsers. Can be used for type checking. """ parser = None _ignore_comments = False _comments = set('>;') def __init__(self, source, **kwargs): self._ignore_comments = kwargs.pop('ignore_comments', False) parser = kwargs.pop('parser', None) if parser is not None: self.parser = parser super(FASTABase, self).__init__(source, **kwargs) def _is_comment(self, line): return line[0] in self._comments def get_entry(self, key): raise NotImplementedError class FASTA(FASTABase, aux.FileReader): """Text-mode, sequential FASTA parser. Suitable for iteration over the file to obtain all entries in order. """ def __init__(self, source, ignore_comments=False, parser=None, encoding=None): """Create a new FASTA parser object. Supports iteration, yields `(description, sequence)` tuples. Supports `with` syntax. Parameters ---------- source : str or file-like File to read. If file object, it must be opened in *text* mode. ignore_comments : bool, optional If :py:const:`True` then ignore the second and subsequent lines of description. Default is :py:const:`False`, which concatenates multi-line descriptions into a single string. parser : function or None, optional Defines whether the FASTA descriptions should be parsed. If it is a function, that function will be given the description string, and the returned value will be yielded together with the sequence. The :py:data:`std_parsers` dict has parsers for several formats. Hint: specify :py:func:`parse` as the parser to apply automatic format recognition. Default is :py:const:`None`, which means return the header "as is". encoding : str or None, optional File encoding (if it is given by name). """ super(FASTA, self).__init__(source, mode='r', parser_func=self._read, pass_file=False, args=(), kwargs={}, encoding=encoding, ignore_comments=ignore_comments, parser=parser) def _read(self): accumulated_strings = [] # Iterate through '>' after the file is over to retrieve the last entry. for string in itertools.chain(self._source, '>'): stripped_string = string.strip() # Skip empty lines. if not stripped_string: continue is_comment = self._is_comment(stripped_string) if is_comment: # If it is a continuing comment if len(accumulated_strings) == 1: if not self._ignore_comments: accumulated_strings[0] += (' ' + stripped_string[1:]) else: continue elif accumulated_strings: description = accumulated_strings[0] sequence = ''.join(accumulated_strings[1:]) # Drop the translation stop sign. if sequence and sequence[-1] == '*': sequence = sequence[:-1] if self.parser is not None: description = self.parser(description) yield Protein(description, sequence) accumulated_strings = [stripped_string[1:]] else: # accumulated_strings is empty; we're probably reading # the very first line of the file accumulated_strings.append(stripped_string[1:]) else: accumulated_strings.append(stripped_string) def get_entry(self, key): raise aux.PyteomicsError('Direct indexing is not supported. ' 'Use IndexedFASTA and its subclasses') def _reconstruct(cls, args, kwargs): kwargs['_skip_index'] = True return cls(*args, **kwargs) class IndexedFASTA(FASTABase, aux.TaskMappingMixin, aux.IndexedTextReader): """Indexed FASTA parser. Supports direct indexing by matched labels.""" delimiter = '\n>' label = r'^[\n]?>(.*)\s*' def __init__(self, source, ignore_comments=False, parser=None, **kwargs): """Create an indexed FASTA parser object. Parameters ---------- source : str or file-like File to read. If file object, it must be opened in *binary* mode. ignore_comments : bool, optional If :py:const:`True` then ignore the second and subsequent lines of description. Default is :py:const:`False`, which concatenates multi-line descriptions into a single string. parser : function or None, optional Defines whether the FASTA descriptions should be parsed. If it is a function, that function will be given the description string, and the returned value will be yielded together with the sequence. The :py:data:`std_parsers` dict has parsers for several formats. Hint: specify :py:func:`parse` as the parser to apply automatic format recognition. Default is :py:const:`None`, which means return the header "as is". encoding : str or None, optional, keyword only File encoding. Default is UTF-8. block_size : int or None, optional, keyword only Number of bytes to consume at once. delimiter : str or None, optional, keyword only Overrides the FASTA record delimiter (default is ``'\n>'``). label : str or None, optional, keyword only Overrides the FASTA record label pattern. Default is ``'^[\n]?>(.*)'``. label_group : int or str, optional, keyword only Overrides the matched group used as key in the byte offset index. This in combination with `label` can be used to extract fields from headers. However, consider using :py:class:`TwoLayerIndexedFASTA` for this purpose. """ super(IndexedFASTA, self).__init__(source, ignore_comments=ignore_comments, parser=parser, parser_func=self._read, pass_file=False, args=(), kwargs={}, **kwargs) self._init_args = (source, ignore_comments, parser) self._init_kwargs = kwargs def __reduce_ex__(self, protocol): return (_reconstruct, (self.__class__, self._init_args, self._init_kwargs), self.__getstate__()) def _read_protein_lines(self, lines): description = [] sequence = [] for string in lines: stripped_string = string.strip() if not stripped_string: continue is_comment = self._is_comment(stripped_string) if is_comment: if not description or not self._ignore_comments: description.append(stripped_string[1:]) else: sequence.append(stripped_string) description = ' '.join(description) sequence = ''.join(sequence) # Drop the translation stop sign. if sequence and sequence[-1] == '*': sequence = sequence[:-1] if self.parser is not None: description = self.parser(description) return Protein(description, sequence) def _item_from_offsets(self, offsets): start, end = offsets lines = self._read_lines_from_offsets(start, end) return self._read_protein_lines(lines) def _read(self, **kwargs): for key, offsets in self._offset_index.items(): yield self._item_from_offsets(offsets) def get_entry(self, key): return self.get_by_id(key) class TwoLayerIndexedFASTA(IndexedFASTA): """Parser with two-layer index. Extracted groups are mapped to full headers (where possible), full headers are mapped to byte offsets. When indexed, the key is looked up in both indexes, allowing access by meaningful IDs (like UniProt accession) and by full header string. """ header_group = 1 header_pattern = None def __init__(self, source, header_pattern=None, header_group=None, ignore_comments=False, parser=None, **kwargs): """Open `source` and create a two-layer index for convenient random access both by full header strings and extracted fields. Parameters ---------- source : str or file-like File to read. If file object, it must be opened in *binary* mode. header_pattern : str or RE or None, optional Pattern to match the header string. Must capture the group used for the second index. If :py:const:`None` (default), second-level index is not created. header_group : int or str or None, optional Defines which group is used as key in the second-level index. Default is 1. ignore_comments : bool, optional If :py:const:`True` then ignore the second and subsequent lines of description. Default is :py:const:`False`, which concatenates multi-line descriptions into a single string. parser : function or None, optional Defines whether the FASTA descriptions should be parsed. If it is a function, that function will be given the description string, and the returned value will be yielded together with the sequence. The :py:data:`std_parsers` dict has parsers for several formats. Hint: specify :py:func:`parse` as the parser to apply automatic format recognition. Default is :py:const:`None`, which means return the header "as is". Other arguments : the same as for :py:class:`IndexedFASTA`. """ super(TwoLayerIndexedFASTA, self).__init__(source, ignore_comments, parser, **kwargs) if header_group is not None: self.header_group = header_group if header_pattern is not None: self.header_pattern = header_pattern if not kwargs.get('_skip_index', False): self.build_second_index() self._init_args = (source, header_pattern, header_group, ignore_comments, parser) self._init_kwargs = kwargs def build_second_index(self): """Create the mapping from extracted field to whole header string.""" if self.header_pattern is None: self._id2header = None else: index = {} for key in self._offset_index: match = re.match(self.header_pattern, key) if match: index[match.group(self.header_group)] = key self._id2header = index def __getstate__(self): state = super(TwoLayerIndexedFASTA, self).__getstate__() state['id2header'] = self._id2header return state def __setstate__(self, state): super(TwoLayerIndexedFASTA, self).__setstate__(state) self._id2header = state['id2header'] def get_by_id(self, key): """Get the entry by value of header string or extracted field.""" try: return super(TwoLayerIndexedFASTA, self).get_by_id(key) except KeyError: if self._id2header: header = self._id2header.get(key) if header is not None: return super(TwoLayerIndexedFASTA, self).get_entry(header) raise KeyError(key) def __contains__(self, key): return super(TwoLayerIndexedFASTA, self).__contains__(key) or key in self._id2header class FlavoredMixin(): """Parser aimed at a specific FASTA flavor. Subclasses should define `parser` and `header_pattern`. The `parse` argument in :py:meth:`__init__` defines whether description is parsed in output. """ def __init__(self, parse=True): if not parse: self.parser = None class UniProtMixin(FlavoredMixin): header_pattern = r'^(\w+)\|([-\w]+)\|(\w+)\s+([^=]*\S)((\s+\w+=[^=]+(?!\w*=))+)\s*$' header_group = 2 def parser(self, header): db, ID, entry, name, pairs, _ = re.match(self.header_pattern, header).groups() gid, taxon = entry.split('_') info = {'db': db, 'id': ID, 'entry': entry, 'name': name, 'gene_id': gid, 'taxon': taxon} info.update(_split_pairs(pairs)) _intify(info, ('PE', 'SV')) return info def _add_init(cls): """Add an __init__ method to a flavored parser class, which simply calls __init__ of its two bases.""" flavor, typ = cls.__bases__ newdict = cls.__dict__.copy() def __init__(self, source, parse=True, **kwargs): typ.__init__(self, source, **kwargs) flavor.__init__(self, parse) self._init_args = (source, parse) self._init_kwargs = kwargs flavor_name = flavor.__name__[:-5] type_name = "Text-mode" if typ is FASTA else "Indexed" __init__.__doc__ = """Creates a :py:class:`{}` object. Parameters ---------- source : str or file The file to read. If a file object, it needs to be in *{}* mode. parse : bool, optional Defines whether the descriptions should be parsed in the produced tuples. Default is :py:const:`True`. kwargs : passed to the :py:class:`{}` constructor. """.format(cls.__name__, 'text' if typ is FASTA else 'binary', typ.__name__) newdict['__init__'] = __init__ newdict['__doc__'] = """{} parser for {} FASTA files.""".format(type_name, flavor_name) return type(cls.__name__, (flavor, typ), newdict) @_add_init class UniProt(UniProtMixin, FASTA): pass @_add_init class IndexedUniProt(UniProtMixin, TwoLayerIndexedFASTA): pass class UniRefMixin(FlavoredMixin): header_pattern = r'^(\S+)\s+([^=]*\S)((\s+\w+=[^=]+(?!\w*=))+)\s*$' def parser(self, header): assert 'Tax' in header ID, cluster, pairs, _ = re.match(self.header_pattern, header).groups() info = {'id': ID, 'cluster': cluster} info.update(_split_pairs(pairs)) gid, taxon = info['RepID'].split('_') type_, acc = ID.split('_') info.update({'taxon': taxon, 'gene_id': gid, 'type': type_, 'accession': acc}) _intify(info, ('n',)) return info @_add_init class UniRef(UniRefMixin, FASTA): pass @_add_init class IndexedUniRef(UniRefMixin, TwoLayerIndexedFASTA): pass class UniParcMixin(FlavoredMixin): header_pattern = r'(\S+)\s+status=(\w+)\s*$' def parser(self, header): ID, status = re.match(self.header_pattern, header).groups() return {'id': ID, 'status': status} @_add_init class UniParc(UniParcMixin, FASTA): pass @_add_init class IndexedUniParc(UniParcMixin, TwoLayerIndexedFASTA): pass class UniMesMixin(FlavoredMixin): header_pattern = r'^(\S+)\s+([^=]*\S)((\s+\w+=[^=]+(?!\w*=))+)\s*$' def parser(self, header): assert 'OS=' in header and 'SV=' in header and 'PE=' not in header ID, name, pairs, _ = re.match(self.header_pattern, header).groups() info = {'id': ID, 'name': name} info.update(_split_pairs(pairs)) _intify(info, ('SV',)) return info @_add_init class UniMes(UniMesMixin, FASTA): pass @_add_init class IndexedUniMes(UniMesMixin, TwoLayerIndexedFASTA): pass class SPDMixin(FlavoredMixin): header_pattern = r'^([^|]+?)\s*\|\s*(([^|]+?)_([^|]+?))\s*\|\s*([^|]+?)\s*$' def parser(self, header): assert '=' not in header ID, gene, gid, taxon, d = re.match(self.header_pattern, header).groups() return {'id': ID, 'gene': gene, 'description': d, 'taxon': taxon, 'gene_id': gid} @_add_init class SPD(SPDMixin, FASTA): pass @_add_init class IndexedSPD(SPDMixin, TwoLayerIndexedFASTA): pass class NCBIMixin(FlavoredMixin): header_pattern = r'^(\S+)\s+(.*\S)\s+\[(.*)\]' def parser(self, header): ID, description, organism = re.match(self.header_pattern, header).groups() return {'id': ID, 'description': description, 'taxon': organism} @_add_init class NCBI(NCBIMixin, FASTA): pass @_add_init class IndexedNCBI(NCBIMixin, TwoLayerIndexedFASTA): pass class RefSeqMixin(FlavoredMixin): header_pattern = r'^ref\|([^|]+)\|\s*([^\[]*\S)\s*\[(.*)\]' def parser(self, header): ID, description, organism = re.match(self.header_pattern, header).groups() return {'id': ID, 'description': description, 'taxon': organism} @_add_init class RefSeq(RefSeqMixin, FASTA): pass @_add_init class IndexedRefSeq(RefSeqMixin, TwoLayerIndexedFASTA): pass def read(source=None, use_index=None, flavor=None, **kwargs): """Parse a FASTA file. This function serves as a dispatcher between different parsers available in this module. Parameters ---------- source : str or file or None, optional A file object (or file name) with a FASTA database. Default is :py:const:`None`, which means read standard input. use_index : bool, optional If :py:const:`True`, the created parser object will be an instance of :py:class:`IndexedFASTA`. If :py:const:`False` (default), it will be an instance of :py:class:`FASTA`. flavor : str or None, optional A supported FASTA header format. If specified, a format-specific parser instance is returned. .. note:: See :py:data:`std_parsers` for supported flavors. Returns ------- out : iterator of tuples A named 2-tuple with FASTA header (str or dict) and sequence (str). Attributes 'description' and 'sequence' are also provided. """ try: parser = std_parsers[flavor and flavor.lower()] except KeyError: raise aux.PyteomicsError('No parser for flavor: {}. Supported flavors: {}'.format( flavor, ', '.join(map(str, std_parsers)))) use_index = aux._check_use_index(source, use_index, False) return parser[use_index](source, **kwargs) @aux._file_writer() def write(entries, output=None): """ Create a FASTA file with `entries`. Parameters ---------- entries : iterable of (str, str) tuples An iterable of 2-tuples in the form (description, sequence). output : file-like or str, optional A file open for writing or a path to write to. If the file exists, it will be opened for appending. Default is :py:const:`None`, which means write to standard output. file_mode : str, keyword only, optional If `output` is a file name, defines the mode the file will be opened in. Otherwise will be ignored. Default is 'a'. Returns ------- output_file : file object The file where the FASTA is written. """ for descr, seq in entries: output.write('>' + descr.replace('\n', '\n;') + '\n') output.write(''.join([('%s\n' % seq[i:i+70]) for i in range(0, len(seq), 70)]) + '\n') return output.file def reverse(sequence, keep_nterm=False, keep_cterm=False): """ Create a decoy sequence by reversing the original one. Parameters ---------- sequence : str The initial sequence string. keep_nterm : bool, optional If :py:const:`True`, then the N-terminal residue will be kept. Default is :py:const:`False`. keep_cterm : bool, optional If :py:const:`True`, then the C-terminal residue will be kept. Default is :py:const:`False`. Returns ------- decoy_sequence : str The decoy sequence. """ start = 1 if keep_nterm else 0 end = len(sequence)-1 if keep_cterm else len(sequence) if start == end: return sequence return sequence[:start] + sequence[start:end][::-1] + sequence[end:] def shuffle(sequence, keep_nterm=False, keep_cterm=False): """ Create a decoy sequence by shuffling the original one. Parameters ---------- sequence : str The initial sequence string. keep_nterm : bool, optional If :py:const:`True`, then the N-terminal residue will be kept. Default is :py:const:`False`. keep_cterm : bool, optional If :py:const:`True`, then the C-terminal residue will be kept. Default is :py:const:`False`. Returns ------- decoy_sequence : str The decoy sequence. """ start = 1 if keep_nterm else 0 end = len(sequence)-1 if keep_cterm else len(sequence) if start == end: return sequence elif keep_cterm or keep_nterm: return sequence[:start] + shuffle(sequence[start:end]) + sequence[end:] modified_sequence = list(sequence) random.shuffle(modified_sequence) return ''.join(modified_sequence) def fused_decoy(sequence, decoy_mode='reverse', sep='R', **kwargs): """ Create a "fused" decoy sequence by concatenating a decoy sequence with the original one. The method and its use cases are described in: Ivanov, M. V., Levitsky, L. I., & Gorshkov, M. V. (2016). `Adaptation of Decoy Fusion Strategy for Existing Multi-Stage Search Workflows. <http://doi.org/10.1007/s13361-016-1436-7>`_ Journal of The American Society for Mass Spectrometry, 27(9), 1579-1582. Parameters ---------- sequence : str The initial sequence string. decoy_mode : str or callable, optional Type of decoy sequence to use. Should be one of the standard modes or any callable. Standard modes are: - 'reverse' for :py:func:`reverse`; - 'shuffle' for :py:func:`shuffle`; - 'fused' for :py:func:`fused_decoy` (if you love recursion). Default is 'reverse'. sep : str, optional Amino acid motif that separates the decoy sequence from the target one. This setting should reflect the enzyme specificity used in the search against the database being generated. Default is 'R', which is suitable for trypsin searches. **kwargs : given to the decoy generation function. Examples -------- >>> fused_decoy('PEPT') 'TPEPRPEPT' >>> fused_decoy('MPEPT', 'shuffle', 'K', keep_nterm=True) 'MPPTEKMPEPT' """ decoy = decoy_sequence(sequence, decoy_mode, **kwargs) return decoy + sep + sequence _decoy_functions = {'reverse': reverse, 'shuffle': shuffle, 'fused': fused_decoy} def decoy_sequence(sequence, mode='reverse', **kwargs): """ Create a decoy sequence out of a given sequence string. Parameters ---------- sequence : str The initial sequence string. mode : str or callable, optional Type of decoy sequence. Should be one of the standard modes or any callable. Standard modes are: - 'reverse' for :py:func:`reverse`; - 'shuffle' for :py:func:`shuffle`; - 'fused' for :py:func:`fused_decoy`. Default is 'reverse'. **kwargs : given to the decoy function. Returns ------- decoy_sequence : str The decoy sequence. """ fmode = mode if isinstance(mode, str): fmode = _decoy_functions.get(mode) if fmode is None: raise aux.PyteomicsError('Unsupported decoy mode: {}'.format(mode)) return fmode(sequence, **kwargs) @aux._file_reader() def decoy_db(source=None, mode='reverse', prefix='DECOY_', decoy_only=False, ignore_comments=False, parser=None, **kwargs): """Iterate over sequences for a decoy database out of a given ``source``. Parameters ---------- source : file-like object or str or None, optional A path to a FASTA database or a file object itself. Default is :py:const:`None`, which means read standard input. mode : str or callable, optional Algorithm of decoy sequence generation. 'reverse' by default. See :py:func:`decoy_sequence` for more information. prefix : str, optional A prefix to the protein descriptions of decoy entries. The default value is `'DECOY_'`. decoy_only : bool, optional If set to :py:const:`True`, only the decoy entries will be written to `output`. If :py:const:`False`, the entries from `source` will be written first. :py:const:`False` by default. ignore_comments : bool, optional If True then ignore the second and subsequent lines of description. Default is :py:const:`False`. parser : function or None, optional Defines whether the fasta descriptions should be parsed. If it is a function, that function will be given the description string, and the returned value will be yielded together with the sequence. The :py:data:`std_parsers` dict has parsers for several formats. Hint: specify :py:func:`parse` as the parser to apply automatic format guessing. Default is :py:const:`None`, which means return the header "as is". **kwargs : given to :py:func:`decoy_sequence`. Returns ------- out : iterator An iterator over entries of the new database. """ # store the initial position pos = source.tell() if not decoy_only: with read(source, ignore_comments, parser) as f: for x in f: yield x # return to the initial position in the source file to read again source.seek(pos) parser = parser or (lambda x: x) with read(source, ignore_comments) as f: for descr, seq in f: yield Protein(parser(prefix + descr), decoy_sequence(seq, mode, **kwargs)) @aux._file_writer() def write_decoy_db(source=None, output=None, mode='reverse', prefix='DECOY_', decoy_only=False, **kwargs): """Generate a decoy database out of a given ``source`` and write to file. If `output` is a path, the file will be open for appending, so no information will be lost if the file exists. Although, the user should be careful when providing open file streams as `source` and `output`. The reading and writing will start from the current position in the files, which is where the last I/O operation finished. One can use the :py:func:`file.seek` method to change it. Parameters ---------- source : file-like object or str or None, optional A path to a FASTA database or a file object itself. Default is :py:const:`None`, which means read standard input. output : file-like object or str, optional A path to the output database or a file open for writing. Defaults to :py:const:`None`, the results go to the standard output. mode : str or callable, optional Algorithm of decoy sequence generation. 'reverse' by default. See :py:func:`decoy_sequence` for more details. prefix : str, optional A prefix to the protein descriptions of decoy entries. The default value is `'DECOY_'` decoy_only : bool, optional If set to :py:const:`True`, only the decoy entries will be written to `output`. If :py:const:`False`, the entries from `source` will be written as well. :py:const:`False` by default. file_mode : str, keyword only, optional If `output` is a file name, defines the mode the file will be opened in. Otherwise will be ignored. Default is 'a'. **kwargs : given to :py:func:`decoy_sequence`. Returns ------- output : file A (closed) file object for the created file. """ with decoy_db(source, mode, prefix, decoy_only, **kwargs) as entries: write(entries, output) return output.file # auxiliary functions for parsing of FASTA headers def _split_pairs(s): return dict(map(lambda x: x.strip(), x.split('=')) for x in re.split(r' (?=\w+=)', s.strip())) def _intify(d, keys): for k in keys: if k in d: d[k] = int(d[k]) std_parsers = {'uniprot': (UniProt, IndexedUniProt), 'uniref': (UniRef, IndexedUniRef), 'uniparc': (UniParc, IndexedUniParc), 'unimes': (UniMes, IndexedUniMes), 'spd': (SPD, IndexedSPD), 'ncbi': (NCBI, IndexedNCBI), 'refseq': (RefSeq, IndexedRefSeq), None: (FASTA, IndexedFASTA)} """A dictionary with parsers for known FASTA header formats. For now, supported formats are those described at `UniProt help page <http://www.uniprot.org/help/fasta-headers>`_.""" _std_mixins = {'uniprot': UniProtMixin, 'uniref': UniRefMixin, 'uniparc': UniParcMixin, 'unimes': UniMesMixin, 'spd': SPDMixin, 'ncbi': NCBIMixin, 'refseq': RefSeqMixin} def parse(header, flavor='auto', parsers=None): """Parse the FASTA header and return a nice dictionary. Parameters ---------- header : str FASTA header to parse flavor : str, optional Short name of the header format (case-insensitive). Valid values are :py:const:`'auto'` and keys of the `parsers` dict. Default is :py:const:`'auto'`, which means try all formats in turn and return the first result that can be obtained without an exception. parsers : dict, optional A dict where keys are format names (lowercased) and values are functions that take a header string and return the parsed header. Returns ------- out : dict A dictionary with the info from the header. The format depends on the flavor. """ parser_function = lambda cls: cls().parser flavor = flavor.lower() # accept strings with and without leading '>' if header and header[0] == '>': header = header[1:] # choose the format known = parsers or _std_mixins if flavor == 'auto': for parser in known.values(): try: return parser_function(parser)(header) except Exception: pass raise aux.PyteomicsError('Unknown FASTA header format: ' + header) elif flavor in known: try: return parser_function(known[flavor])(header) except Exception as e: raise aux.PyteomicsError('Could not parse header as "{}". ' 'The error message was: {}: {}. Header: "{}"'.format( flavor, type(e).__name__, e.args[0], header)) raise aux.PyteomicsError('Unknown flavor: {}'.format(flavor)) chain = aux._make_chain(read, 'read') decoy_chain = aux._make_chain(decoy_db, 'decoy_db')
nilq/baby-python
python
from log import LOG from .image import Image from .digits import Digits
nilq/baby-python
python
# V0 # V1 # https://blog.csdn.net/fuxuemingzhu/article/details/82083609 class Solution(object): def numSpecialEquivGroups(self, A): """ :type A: List[str] :rtype: int """ B = set() for a in A: B.add(''.join(sorted(a[0::2])) + ''.join(sorted(a[1::2]))) return len(B) # V2 # Time: O(n * l) # Space: O(n) class Solution(object): def numSpecialEquivGroups(self, A): """ :type A: List[str] :rtype: int """ def count(word): result = [0]*52 for i, letter in enumerate(word): result[ord(letter)-ord('a') + 26*(i%2)] += 1 return tuple(result) return len({count(word) for word in A})
nilq/baby-python
python
# ====================================================================== # Dirac Dice # Advent of Code 2021 Day 21 -- Eric Wastl -- https://adventofcode.com # # Python implementation by Dr. Dean Earl Wright III # ====================================================================== # ====================================================================== # t e s t _ p l a y e r . p y # ====================================================================== "Test Player for Advent of Code 2021 day 21, Dirac Dice" # ---------------------------------------------------------------------- # import # ---------------------------------------------------------------------- import unittest import player # ---------------------------------------------------------------------- # constants # ---------------------------------------------------------------------- EXAMPLE_TEXT = "Player 1 starting position: 4" # ====================================================================== # TestPlayer # ====================================================================== class TestPlayer(unittest.TestCase): # pylint: disable=R0904 "Test Player object" def test_empty_init(self): "Test the default Player creation" # 1. Create default Player object myobj = player.Player() # 2. Make sure it has the default values self.assertEqual(myobj.part2, False) self.assertEqual(myobj.text, None) self.assertEqual(myobj.number, 0) self.assertEqual(myobj.position, 0) self.assertEqual(myobj.score, 0) self.assertEqual(myobj.wins, 0) def test_text_init(self): "Test the Player object creation from text" # 1. Create Player object from text myobj = player.Player(text=EXAMPLE_TEXT) # 2. Make sure it has the expected values self.assertEqual(myobj.part2, False) self.assertEqual(len(myobj.text), 29) self.assertEqual(myobj.number, 1) self.assertEqual(myobj.position, 3) self.assertEqual(myobj.score, 0) self.assertEqual(myobj.wins, 0) # ---------------------------------------------------------------------- # module initialization # ---------------------------------------------------------------------- if __name__ == '__main__': pass # ====================================================================== # end t e s t _ p l a y e r . p y end # ======================================================================
nilq/baby-python
python
from rest_framework import viewsets from rest_framework.permissions import IsAdminUser from src.apps.users.models import User from src.apps.users.serializers import FullUserSerializer, LimitedUserSerializer from src.contrib.permission import ReadOnly class UserViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = User.objects.all().order_by("-date_joined") permission_classes = [IsAdminUser | ReadOnly] def get_serializer_class(self): if self.request.user.is_staff: return FullUserSerializer return LimitedUserSerializer
nilq/baby-python
python
# -*- coding: utf-8 -*- """rackio/managers/api.py Thi module implements API Manager. """ import falcon from falcon import api_helpers as helpers from falcon_auth import FalconAuthMiddleware, TokenAuthBackend from falcon_multipart.middleware import MultipartMiddleware from falcon_cors import CORS from ..api import TagResource, TagCollectionResource from ..api import GroupResource, GroupCollectionResource from ..api import TagHistoryResource, TrendResource, TrendCollectionResource from ..api import WaveformResource, WaveformCollectionResource from ..api import LoggerResource from ..api import ControlResource, ControlCollectionResource from ..api import RuleResource, RuleCollectionResource from ..api import AlarmResource, AlarmCollectionResource from ..api import EventCollectionResource from ..api import AppSummaryResource from ..api import BlobCollectionResource, BlobResource from ..api import LoginResource, LogoutResource from ..web import StaticResource, resource_pairs from ..dao import AuthDAO def user_loader(token): dao = AuthDAO() user = dao.read_by_key(token) if not user: return None username = user.username return {'username': username} class API(falcon.API): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.multipart_middleware = MultipartMiddleware() self.auth_backend = TokenAuthBackend(user_loader, auth_header_prefix='Token') self.auth_middleware = FalconAuthMiddleware(self.auth_backend, exempt_routes=['/api/login'], exempt_methods=['HEAD']) self.auth = False self.cors_origins = list() def set_auth(self, enabled=False): self.auth = enabled def auth_enabled(self): return self.auth def set_cors(self, allow_origins): self.cors_origins = allow_origins def get_cors(self): return self.cors_origins def set_middleware(self, independent_middleware=True): middleware = [self.multipart_middleware] if self.auth: middleware.append(self.auth_middleware) if self.cors_origins: cors = CORS(allow_origins_list=self.cors_origins) middleware.append(cors.middleware) self._middleware = helpers.prepare_middleware( middleware, independent_middleware=independent_middleware) self._independent_middleware = independent_middleware class APIManager: def __init__(self): self.app = API() self.port = 8000 self.mode = "development" self.init_api() self.init_web() def set_mode(self, mode): self.mode = mode def enable_auth(self): self.app.set_auth(enabled=True) def disable_auth(self): self.app.set_auth(False) def auth_enabled(self): return self.app.auth_enabled() def set_cors(self, allow_origins): self.app.set_cors(allow_origins) def get_cors(self): return self.app.get_cors() def set_port(self, port): self.port = port def init_api(self): _tag = TagResource() _tags = TagCollectionResource() _group = GroupResource() _groups = GroupCollectionResource() _tag_history = TagHistoryResource() _tag_trend = TrendResource() _tag_trends = TrendCollectionResource() _tag_waveform = WaveformResource() _tag_waveforms = WaveformCollectionResource() _logger = LoggerResource() _control = ControlResource() _controls = ControlCollectionResource() _rule = RuleResource() _rules = RuleCollectionResource() _alarm = AlarmResource() _alarms = AlarmCollectionResource() _events = EventCollectionResource() _summary = AppSummaryResource() _blobs = BlobCollectionResource() _blob = BlobResource() _login = LoginResource() _logout = LogoutResource() self.app.add_route('/api/tags/{tag_id}', _tag) self.app.add_route('/api/tags', _tags) self.app.add_route('/api/groups/{group_id}', _group) self.app.add_route('/api/groups', _groups) self.app.add_route('/api/history/{tag_id}', _tag_history) self.app.add_route('/api/trends/{tag_id}', _tag_trend) self.app.add_route('/api/trends', _tag_trends) self.app.add_route('/api/waveforms/{tag_id}', _tag_waveform) self.app.add_route('/api/waveforms', _tag_waveforms) self.app.add_route('/api/logger', _logger) self.app.add_route('/api/controls/{control_name}', _control) self.app.add_route('/api/controls', _controls) self.app.add_route('/api/rules/{rule_name}', _rule) self.app.add_route('/api/rules', _rules) self.app.add_route('/api/alarms/{alarm_name}', _alarm) self.app.add_route('/api/alarms', _alarms) self.app.add_route('/api/events', _events) self.app.add_route('/api/summary', _summary) self.app.add_route('/api/blobs', _blobs) self.app.add_route('/api/blobs/{blob_name}', _blob) self.app.add_route('/api/login', _login) self.app.add_route('/api/logout', _logout) def init_web(self): web = self.app _static = StaticResource() pairs = resource_pairs() for path, route in pairs: route += "/{filename}" web.add_route(route, _static) def add_route(self, route, resource): self.app.add_route(route, resource)
nilq/baby-python
python
#!/usr/bin/env python3 import os import sys import json import zipfile import datetime import shutil from wearebeautiful import model_params as param MAX_SCREENSHOT_SIZE = 256000 # 256Kb is enough! bundles_json_file = "bundles.json" def bundle_setup(bundle_dir_arg): ''' Make the bundle dir, in case it doesn't exist ''' global bundle_dir bundle_dir = bundle_dir_arg try: os.makedirs(bundle_dir) except FileExistsError: pass def create_bundle_index(): ''' Iterate the bundles directory and read the manifest files ''' bundles = [] for path in os.listdir(bundle_dir): if path[0:6].isdigit() and path[6] == '-': with open(os.path.join(bundle_dir, path, "manifest.json"), "r") as f: manifest = json.loads(f.read()) bundles.append(manifest) with open(os.path.join(bundle_dir, bundles_json_file), "w") as out: out.write(json.dumps(bundles)) return bundles def load_bundle_data_into_redis(app): ''' Read the bundles.json file and load into ram ''' redis = app.redis bundles = [] loaded_bundles = [] try: with open(os.path.join(bundle_dir, bundles_json_file), "r") as f: loaded_bundles = json.loads(f.read()) except IOError as err: print("ERROR: Cannot read bundles.json.", err) except ValueError as err: print("ERROR: Cannot read bundles.json.", err) # Clean up old redis keys for k in redis.scan_iter("m:*"): redis.delete(k) redis.delete("m:ids") redis.delete("b:index") # Now add new redis keys bundles = [] ids = {} for bundle in loaded_bundles: redis.set("m:%s:%s:%s" % (bundle['id'], bundle['bodypart'], bundle['pose']), json.dumps(bundle)) data = { 'id' : bundle['id'], 'bodypart' : bundle['bodypart'], 'pose' : bundle['pose'] } bundles.append(data) if not bundle['id'] in ids: ids[bundle['id']] = [] ids[bundle['id']].append(data) redis.set("b:index", json.dumps(bundles)) redis.set("m:ids", json.dumps(ids)) return len(bundles) def get_bundle_id_list(redis): """ Get the list of current ids """ bundles = redis.get("b:index") or "[]" return json.loads(bundles) def get_model_id_list(redis): """ Get the list of model ids """ ids = redis.get("m:ids") or "{}" return json.loads(ids) def get_bundle(redis, id, bodypart, pose): """ Get the manifest of the given bundle """ manifest = redis.get("m:%s:%s:%s" % (id, bodypart, pose)) return json.loads(manifest) def import_bundle(bundle_file): """ unzip and read bundle file """ allowed_files = ['manifest.json', 'surface-low.stl', 'surface-medium.stl', 'solid.stl', 'surface-orig.stl', 'screenshot.jpg'] try: zipf = zipfile.ZipFile(bundle_file) except zipfile.BadZipFile: return "Invalid zip file." files = zipf.namelist() for f in files: if not f in allowed_files: return "file %s is not part of a normal bundle. don't fuck it up, ok?" % f try: rmanifest = zipf.read("manifest.json") except IOError: return "Cannot read manifest.json" try: manifest = json.loads(rmanifest) except json.decoder.JSONDecodeError as err: return err err = validate_manifest(manifest) if err: return err # The bundle looks ok, copy it into place dest_dir = os.path.join(bundle_dir, "%s-%s-%s" % (manifest['id'], manifest['bodypart'], manifest['pose'])) while True: try: os.mkdir(dest_dir) break except FileExistsError: try: shutil.rmtree(dest_dir) except IOError as err: print("Failed to erase old bundle.", err) return err try: for member in allowed_files: print(os.path.join(dest_dir, member)) zipf.extract(member, dest_dir) except IOError as err: print("IO error: ", err) return err return "" def validate_date(date, partial=False): if partial: try: date_obj = datetime.datetime.strptime(date, '%Y-%m') except ValueError as err: print("Invalid date format. Must be YYYY-MM. (%s)" % err) return False else: try: date_obj = datetime.datetime.strptime(date, '%Y-%m-%d') except ValueError as err: print("Invalid date format. Must be YYYY-MM-DD. (%s)" % err) return False if date_obj.year < 2019 or date_obj.year > datetime.datetime.now().year: print("Invalid year.") return False return True def validate_manifest(manifest): if manifest['version'] != param.FORMAT_VERSION: return "Incorrect format version. This script can only accept version %s" % param.FORMAT_VERSION if manifest.keys() in param.REQUIRED_KEYS: missing = list(set(param.REQUIRED_KEYS) - set(manifest.keys())) return "Some top level fields are missing. %s\n" % ",".join(missing) if len(manifest['id']) != 6 or not manifest['id'].isdigit(): return "Incorrect ID length or non digits in ID." if not validate_date(manifest['created'], partial=True): return "Incorrect created date. Must be in YYYY-MM format and minimally specify year and month." if not validate_date(manifest['released']): return "Incorrect released date. Must be in YYYY-MM-DD format" try: id = int(manifest['id']) except ValueError: return "Incorrect ID format. Must be a 4 digit number." if manifest['gender'] not in param.GENDERS: return "Invalid gender. Must be one of: ", param.GENDERS if manifest['bodypart'] not in param.BODYPART: return "Invalid bodypart. Must be one of: ", param.BODYPART if manifest['pose'] not in param.POSE: return "Invalid pose. Must be one of: ", param.POSE if manifest['pose'] == 'variant': if 'pose_variant' not in manifest: return "pose_variant field required for variant poses." if len(manifest['pose_variant']) < param.MIN_FREETEXT_FIELD_LEN: return "pose_variant field too short. Must be at least %d characters. " % param.MIN_FREETEXT_FIELD_LEN if manifest['pose'] != 'variant': if 'pose_variant' in manifest: return "pose_variant field must be blank when post not variant." if len(manifest['country']) != 2: return "Incorrect ID length" if manifest['country'] not in param.COUNTRIES: return "Invalid country. Must be one of ", param.COUNTRIES try: age = int(manifest['age']) except ValueError: return "Cannot parse age." if age < 18 or age > 200: return "Invalid age. Must be 18-200" if manifest['body_type'] not in param.BODY_TYPES: return "Invalid body type. Must be one of ", param.BODY_TYPES if manifest['mother'] not in param.MOTHER: return "Invalid value for the field mother. Must be one of ", param.MOTHER if len(manifest['ethnicity']) < param.MIN_FREETEXT_FIELD_LEN: return "ethnicity field too short. Must be at least %d characters. " % param.MIN_FREETEXT_FIELD_LEN if 'modification' in manifest: if type(manifest['modification']) != list: return "modification must be a list." if len(manifest['modification']) > 0 and manifest['modification'] not in param.MODIFICATIONS: return "modification must be one of: ", param.MODIFICATIONS return ""
nilq/baby-python
python
from flask import render_template, flash, redirect, url_for, session, Markup from flask_login import login_user, logout_user, login_required from app import app, db, lm from app.models.forms import * from app.models.tables import * @lm.user_loader def load_user(id): return Usuario.query.filter_by(id=id).first() @app.route("/index") @app.route("/") def index(): return render_template('index.html') #Iniciando parte de login/logoff @app.route("/login", methods=["GET", "POST"]) def login(): form = LoginForm() if form.validate_on_submit(): user = Usuario.query.filter_by(username=form.username.data).first() if user and user.password == form.password.data: login_user(user) flash("Usuário logado") return redirect(url_for("index")) else: flash("Login inválido") #else: # return "erro no login" return render_template('login.html', form=form) @app.route("/logout") @login_required def logout(): logout_user() flash("Usuário Deslogado") return redirect(url_for("index")) #Iniciando parte de pedidos @app.route("/pedidos", methods=["GET", "POST"]) @login_required def pedidos(): form = PedidoForm() if form.validate_on_submit(): i = Pedido(form.servico.data, form.observacao.data, form.data_pedido.data, form.quantidade.data, form.preco.data, form.status_conclusao.data) db.session.add(i) db.session.commit() flash("Pedido adicionado com sucesso!!") return render_template('pedidos.html', form=form) @app.route("/visualizar", methods=["GET", "POST"]) @login_required def visualizar(): pedidos_ativos = Pedido.query.filter_by(status_conclusao=False).all() pedidos_concluidos = Pedido.query.filter_by(status_conclusao=True).all() return render_template('visualizar.html', pedidos_ativos=pedidos_ativos, pedidos_concluidos=pedidos_concluidos) @app.route('/visualizar/complete/<id>') @login_required def complete(id): pedido = Pedido.query.filter_by(id=int(id)).first_or_404() pedido.status_conclusao = True db.session.commit() return redirect(url_for('visualizar')) @app.route("/visualizar/delete/<id>") @login_required def delete(id): pedido = Pedido.query.filter_by(id=int(id)).first_or_404() db.session.delete(pedido) db.session.commit() return redirect(url_for('visualizar')) @app.route("/visualizar/confirmacao/<id>") @login_required def confirmacao(id): flash(Markup("Confirma a exclusão do pedido?</br></br><a href='/visualizar/delete/" + str(id) + "' class='btn btn-outline-danger btn-sm mr-3 ml-3'>Sim</a><a href='/visualizar' class='btn btn-outline-primary btn-sm ml-3 mr-3'>Não</a>")) return redirect(url_for('visualizar')) #Iniciando parte de controle de estoque @app.route('/estoque') @login_required def estoque(): estoque = Estoque.query.order_by(Estoque.id).all() return render_template('estoque.html', estoque=estoque) @app.route('/estoque/adicionar', methods=["GET", "POST"]) @login_required def adicionarEstoque(): form = EstoqueForm() if form.validate_on_submit(): i = Estoque(form.nome_item.data, form.quantidade_estoque.data, form.quantidade_minimo.data, form.data_atualizacao.data) db.session.add(i) db.session.commit() flash("Item adicionado com sucesso!!") return render_template('adicionar_estoque.html', form=form) @app.route('/estoque/atualizar/<id>', methods=["GET", "POST"]) @login_required def atualizarItem(id): item = Estoque.query.filter_by(id=int(id)).first() form = EstoqueForm() if form.validate_on_submit(): item.nome_item = form.nome_item.data item.quantidade_estoque = form.quantidade_estoque.data item.quantidade_minimo = form.quantidade_minimo.data item.data_atualizacao = form.data_atualizacao.data i = Estoque(item.nome_item, item.quantidade_estoque, item.quantidade_minimo, item.data_atualizacao) db.session.commit() flash("Atualização concluída..") return redirect(url_for('estoque')) return render_template('atualizar_estoque.html', form=form, item=item) @app.route("/estoque/delete/<id>") @login_required def deleteItem(id): item = Estoque.query.filter_by(id=int(id)).first_or_404() db.session.delete(item) db.session.commit() return redirect(url_for('estoque')) @app.route("/estoque/confirmacao/<id>") @login_required def confirmacaoEstoque(id): flash(Markup("Confirma a exclusão do item?</br></br><a href='/estoque/delete/" + str(id) + "' class='btn btn-outline-danger btn-sm mr-3 ml-3'>Sim</a><a href='/estoque' class='btn btn-outline-primary btn-sm ml-3 mr-3'>Não</a>")) return redirect(url_for('estoque'))
nilq/baby-python
python
import logging import paho.mqtt.client as mqtt import time # from utils_intern.messageLogger import MessageLogger logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s: %(message)s', level=logging.DEBUG) logger = logging.getLogger(__file__) class MQTTClient: def __init__(self, host, mqttPort, client_id, keepalive=60, username=None, password=None, ca_cert_path=None, set_insecure=False, id=None): # self.logger = MessageLogger.get_logger(__file__, id) self.host = host self.port = int(mqttPort) self.keepalive = keepalive self.receivedMessages = [] self.topic_sub_ack = [] self.callback_function = None self.client_id = client_id self.id = id self.connected = False self.messages = "" self.client = mqtt.Client(client_id, clean_session=False) if username is not None and password is not None: logger.debug("u " + username + " p " + password) self.client.username_pw_set(username, password) if ca_cert_path is not None and len(ca_cert_path) > 0: logger.debug("ca " + ca_cert_path) self.client.tls_set(ca_certs=ca_cert_path) logger.debug("insec " + str(set_insecure)) if not isinstance(set_insecure, bool): set_insecure = bool(set_insecure) self.client.tls_insecure_set(set_insecure) self.client.on_message = self.on_message self.client.on_publish = self.on_publish self.client.on_connect = self.on_connect self.client.on_subscribe = self.on_subscribe self.client.on_disconnect = self.on_disconnect logger.info("Trying to connect to the MQTT broker " + str(self.host) + " " + str(self.port)) try: self.client.connect(self.host, self.port, self.keepalive) except Exception as e: self.connected = False msg = "Invalid MQTT host " + str(self.host) + " " + str(self.port) logger.error("Error connecting client " + str(self.host) + " " + str(self.port) + " " + str(e)) raise InvalidMQTTHostException(msg) # self.client.loop_forever() self.client.loop_start() # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. def on_connect(self, client, userdata, flags, rc): logger.info("Connected with result code " + str(rc)) if rc == 0: self.connected = True client.connected_Flag = True logger.info("Connected to the broker") else: logger.error("Error connecting to broker " + str(rc)) def on_disconnect(self, *args): logger.error("Disconnected to broker") logger.info(str(args)) def on_message(self, client, userdata, message): # print("Message received") self.callback_function(message.payload.decode()) def sendResults(self, topic, data, qos): try: if self.connected: logger.debug("Sending results to this topic: " + topic) self.publish(topic, data, qos=qos) logger.debug("Results published") except Exception as e: logger.error(e) def publish(self, topic, message, waitForAck=False, qos=2): if self.connected: mid = self.client.publish(topic, message, qos)[1] if (waitForAck): while mid not in self.receivedMessages: logger.debug("waiting for pub ack for topic " + str(topic)) time.sleep(0.25) def on_publish(self, client, userdata, mid): self.receivedMessages.append(mid) def MQTTExit(self): logger.debug("Disconnecting MQTT") self.client.disconnect() logger.debug("Disconnected from the MQTT clients") self.client.loop_stop() logger.debug("MQTT service disconnected") def subscribe_to_topics(self, topics_qos, callback_function): count = 0 while not self.connected: time.sleep(1) count += 1 if count > 15: raise Exception mid = self.subscribe(topics_qos, callback_function) while not self.subscribe_ack_wait(mid): mid = self.subscribe(topics_qos, callback_function) logger.error("Topic subscribe missing ack") def subscribe(self, topics_qos, callback_function): # topics_qos is a list of tuples. eg [("topic",0)] try: if self.connected: logger.info("Subscribing to topics with qos: " + str(topics_qos)) result, mid = self.client.subscribe(topics_qos) if result == 0: logger.debug( "Subscribed to topics: " + str(topics_qos) + " result = " + str(result) + " , mid = " + str( mid)) self.callback_function = callback_function return mid else: logger.info("error on subscribing " + str(result)) return -1 except Exception as e: logger.error(e) return -1 def on_subscribe(self, client, userdata, mid, granted_qos): """check mid values from topic ack list""" self.topic_sub_ack.append(mid) def subscribe_ack_wait(self, mid): if mid < 0: return False count = 0 if self.connected: while count < 15: if mid in self.topic_sub_ack: return True else: logger.info("topic sub ack len = " + str(len(self.topic_sub_ack))) time.sleep(1) count += 1 self.topic_sub_ack.remove(mid) return False class InvalidMQTTHostException(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg)
nilq/baby-python
python
from gym_trafficnetwork.envs.parallel_network import Cell import numpy as np # For the simplest road type def homogeneous_road(num_cells, vfkph, cell_length, num_lanes): r = [] for _ in range(num_cells): r.append(Cell(vfkph, cell_length, num_lanes)) return r # For roads who have cells with the number of lanes as n-n-n-m-n def road_with_single_bottleneck(num_cells, vfkph, cell_length, num_lanes, bottleneck_id, bottleneck_num_lanes): # bottleneck_id is the id of the cell that has bottleneck_num_lanes-many lanes (0 is the first cell, and num_cells-1 is the last) # I know we will say "let's we have 5 cells and the last one is the bottleneck, so bottleneck_id is 5". Let's correct it. if bottleneck_id >= num_cells: import warnings warnings.warn("bottleneck_id is invalid! I am setting it to be the last cell.") import time time.sleep(5) bottleneck_id = num_cells - 1 r = [] for _ in range(num_cells - 1): r.append(Cell(vfkph, cell_length, num_lanes)) r.insert(bottleneck_id, Cell(vfkph, cell_length, bottleneck_num_lanes)) return r # For roads who have cells with the number of lanes as n-n-n-m-m def two_partition_road(firstpart_num_cells, secondpart_num_cells, vfkph, cell_length, firstpart_num_lanes, secondpart_num_lanes): r = [] for _ in range(firstpart_num_cells): r.append(Cell(vfkph, cell_length, firstpart_num_lanes)) for _ in range(secondpart_num_cells): r.append(Cell(vfkph, cell_length, secondpart_num_lanes)) return r # Generalization of the two_partition_road (and homogeneous_road) to n-partition roads. All parameters will be either an array or a scalar def n_partition_road(num_cells, vfkph, cell_length, num_lanes): if not (isinstance(num_cells, list) or isinstance(num_cells, np.ndarray)): num_cells = [num_cells] if not (isinstance(vfkph, list) or isinstance(vfkph, np.ndarray)): vfkph = [vfkph] if not (isinstance(cell_length, list) or isinstance(cell_length, np.ndarray)): cell_length = [cell_length] if not (isinstance(num_lanes, list) or isinstance(num_lanes, np.ndarray)): num_lanes = [num_lanes] num_partitions = np.max([len(num_cells), len(vfkph), len(cell_length), len(num_lanes)]) if len(num_cells) == 1: num_cells = [num_cells[0]]*num_partitions if len(vfkph) == 1: vfkph = [vfkph[0]]*num_partitions if len(cell_length) == 1: cell_length = [cell_length[0]]*num_partitions if len(num_lanes) == 1: num_lanes = [num_lanes[0]]*num_partitions r = [] for i in range(len(num_cells)): for _ in range(num_cells[i]): r.append(Cell(vfkph[i], cell_length[i], num_lanes[i])) return r
nilq/baby-python
python
import re import typing as tp from time import time from loguru import logger def time_execution(func: tp.Any) -> tp.Any: """This decorator shows the execution time of the function object passed""" def wrap_func(*args: tp.Any, **kwargs: tp.Any) -> tp.Any: t1 = time() result = func(*args, **kwargs) t2 = time() logger.debug(f"Function {func.__name__!r} executed in {(t2 - t1):.4f}s") return result return wrap_func def get_headers(rfid_card_id: str) -> tp.Dict[str, str]: """return a dict with all the headers required for using the backend""" return {"rfid-card-id": rfid_card_id} def is_a_ean13_barcode(string: str) -> bool: """define if the barcode scanner input is a valid EAN13 barcode""" return bool(re.fullmatch("\d{13}", string))
nilq/baby-python
python
import os import time import argparse import numpy as np import cv2 from datetime import datetime import nnabla as nn import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S import nnabla.logger as logger import nnabla.utils.save as save from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile from dataset import prepare_dataloader from model import depth_cnn_model, l1_loss from auxiliary import convert_depth2colormap def main(args): from numpy.random import seed seed(46) # Get context. from nnabla.ext_utils import get_extension_context ctx = get_extension_context('cudnn', device_id='0', type_config='float') nn.set_default_context(ctx) # Create CNN network # === TRAIN === # Create input variables. image = nn.Variable([args.batch_size, 3, args.img_height, args.img_width]) label = nn.Variable([args.batch_size, 1, args.img_height, args.img_width]) # Create prediction graph. pred = depth_cnn_model(image, test=False) pred.persistent = True # Create loss function. loss = l1_loss(pred, label) # === VAL === #vimage = nn.Variable([args.batch_size, 3, args.img_height, args.img_width]) #vlabel = nn.Variable([args.batch_size, 1, args.img_height, args.img_width]) #vpred = depth_cnn_model(vimage, test=True) #vloss = l1_loss(vpred, vlabel) # Prepare monitors. monitor = Monitor(os.path.join(args.log_dir, 'nnmonitor')) monitors = { 'train_epoch_loss': MonitorSeries('Train epoch loss', monitor, interval=1), 'train_itr_loss': MonitorSeries('Train itr loss', monitor, interval=100), # 'val_epoch_loss': MonitorSeries('Val epoch loss', monitor, interval=1), 'train_viz': MonitorImageTile('Train images', monitor, interval=1000, num_images=4) } # Create Solver. If training from checkpoint, load the info. if args.optimizer == "adam": solver = S.Adam(alpha=args.learning_rate, beta1=0.9, beta2=0.999) elif args.optimizer == "sgd": solver = S.Momentum(lr=args.learning_rate, momentum=0.9) solver.set_parameters(nn.get_parameters()) # Initialize DataIterator data_dic = prepare_dataloader(args.dataset_path, datatype_list=['train', 'val'], batch_size=args.batch_size, img_size=(args.img_height, args.img_width)) # Training loop. logger.info("Start training!!!") total_itr_index = 0 for epoch in range(1, args.epochs + 1): ## === training === ## total_train_loss = 0 index = 0 while index < data_dic['train']['size']: # Preprocess image.d, label.d = data_dic['train']['itr'].next() loss.forward(clear_no_need_grad=True) # Initialize gradients solver.zero_grad() # Backward execution loss.backward(clear_buffer=True) # Update parameters by computed gradients if args.optimizer == 'sgd': solver.weight_decay(1e-4) solver.update() # Update log index += 1 total_itr_index += 1 total_train_loss += loss.d # Pass to monitor monitors['train_itr_loss'].add(total_itr_index, loss.d) # Visualization pred.forward(clear_buffer=True) train_viz = np.concatenate([image.d, convert_depth2colormap(label.d), convert_depth2colormap(pred.d)], axis=3) monitors['train_viz'].add(total_itr_index, train_viz) # Logger logger.info("[{}] {}/{} Train Loss {} ({})".format(epoch, index, data_dic['train']['size'], total_train_loss / index, loss.d)) # Pass training loss to a monitor. train_error = total_train_loss / data_dic['train']['size'] monitors['train_epoch_loss'].add(epoch, train_error) # Save Parameter out_param_file = os.path.join(args.log_dir, 'checkpoint' + str(epoch) + '.h5') nn.save_parameters(out_param_file) ## === Validation === ## #total_val_loss = 0.0 #val_index = 0 # while val_index < data_dic['val']['size']: # # Inference # vimage.d, vlabel.d = data_dic['val']['itr'].next() # vpred.forward(clear_buffer=True) # vloss.forward(clear_buffer=True) # total_val_loss += vloss.d # val_index += 1 # break # Pass validation loss to a monitor. #val_error = total_val_loss / data_dic['val']['size'] #monitors['val_epoch_loss'].add(epoch, val_error) if __name__ == "__main__": parser = argparse.ArgumentParser('depth-cnn-nnabla') parser.add_argument('--dataset-path', type=str, default="~/datasets/nyudepthv2") parser.add_argument('--batch-size', type=int, default=8) parser.add_argument('--img-height', type=int, default=228) parser.add_argument('--img-width', type=int, default=304) parser.add_argument('--optimizer', type=str, default='sgd') parser.add_argument('--learning-rate', type=float, default=1e-3) parser.add_argument('--epochs', type=int, default=30) parser.add_argument('--log-dir', default='./log') args = parser.parse_args() main(args)
nilq/baby-python
python
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: get_app_health_config_v2.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from monitor_sdk.model.monitor_config import app_health_config_pb2 as monitor__sdk_dot_model_dot_monitor__config_dot_app__health__config__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='get_app_health_config_v2.proto', package='app_health', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x1eget_app_health_config_v2.proto\x12\napp_health\x1a\x38monitor_sdk/model/monitor_config/app_health_config.proto\"-\n\x1bGetAppHealthConfigV2Request\x12\x0e\n\x06\x61pp_id\x18\x01 \x01(\t\"h\n\x1cGetAppHealthConfigV2Response\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12-\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1f.monitor_config.AppHealthConfig\"\x8f\x01\n#GetAppHealthConfigV2ResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x36\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32(.app_health.GetAppHealthConfigV2Responseb\x06proto3') , dependencies=[monitor__sdk_dot_model_dot_monitor__config_dot_app__health__config__pb2.DESCRIPTOR,]) _GETAPPHEALTHCONFIGV2REQUEST = _descriptor.Descriptor( name='GetAppHealthConfigV2Request', full_name='app_health.GetAppHealthConfigV2Request', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='app_id', full_name='app_health.GetAppHealthConfigV2Request.app_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=149, ) _GETAPPHEALTHCONFIGV2RESPONSE = _descriptor.Descriptor( name='GetAppHealthConfigV2Response', full_name='app_health.GetAppHealthConfigV2Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='app_health.GetAppHealthConfigV2Response.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='msg', full_name='app_health.GetAppHealthConfigV2Response.msg', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='app_health.GetAppHealthConfigV2Response.data', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=151, serialized_end=255, ) _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER = _descriptor.Descriptor( name='GetAppHealthConfigV2ResponseWrapper', full_name='app_health.GetAppHealthConfigV2ResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=258, serialized_end=401, ) _GETAPPHEALTHCONFIGV2RESPONSE.fields_by_name['data'].message_type = monitor__sdk_dot_model_dot_monitor__config_dot_app__health__config__pb2._APPHEALTHCONFIG _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER.fields_by_name['data'].message_type = _GETAPPHEALTHCONFIGV2RESPONSE DESCRIPTOR.message_types_by_name['GetAppHealthConfigV2Request'] = _GETAPPHEALTHCONFIGV2REQUEST DESCRIPTOR.message_types_by_name['GetAppHealthConfigV2Response'] = _GETAPPHEALTHCONFIGV2RESPONSE DESCRIPTOR.message_types_by_name['GetAppHealthConfigV2ResponseWrapper'] = _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) GetAppHealthConfigV2Request = _reflection.GeneratedProtocolMessageType('GetAppHealthConfigV2Request', (_message.Message,), { 'DESCRIPTOR' : _GETAPPHEALTHCONFIGV2REQUEST, '__module__' : 'get_app_health_config_v2_pb2' # @@protoc_insertion_point(class_scope:app_health.GetAppHealthConfigV2Request) }) _sym_db.RegisterMessage(GetAppHealthConfigV2Request) GetAppHealthConfigV2Response = _reflection.GeneratedProtocolMessageType('GetAppHealthConfigV2Response', (_message.Message,), { 'DESCRIPTOR' : _GETAPPHEALTHCONFIGV2RESPONSE, '__module__' : 'get_app_health_config_v2_pb2' # @@protoc_insertion_point(class_scope:app_health.GetAppHealthConfigV2Response) }) _sym_db.RegisterMessage(GetAppHealthConfigV2Response) GetAppHealthConfigV2ResponseWrapper = _reflection.GeneratedProtocolMessageType('GetAppHealthConfigV2ResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER, '__module__' : 'get_app_health_config_v2_pb2' # @@protoc_insertion_point(class_scope:app_health.GetAppHealthConfigV2ResponseWrapper) }) _sym_db.RegisterMessage(GetAppHealthConfigV2ResponseWrapper) # @@protoc_insertion_point(module_scope)
nilq/baby-python
python
#!/usr/bin/env python import os import sys import visa import time #-------------------------------------------------------------# ## main function # @param there is no parameter for main function def main(): rm = visa.ResourceManager() print rm.list_resources() instr1 = rm.open_resource('USB0::0x05E6::0x2280::4106469::INSTR') print instr1.query("*IDN?") for i in xrange(60): print "output voltage %sV"%i instr1.write(":VOLTage %s"%i) time.sleep(0.5) print "OK" #-------------------------------------------------------------# ## if statement if __name__ == '__main__': main()
nilq/baby-python
python
import os, time from this import d import numpy as np import pandas as pd import tensorflow as tf from sklearn.preprocessing import MinMaxScaler from datetime import datetime, timedelta from detector import detect_anomaly from decomposition import load_STL_results, decompose_model from models import * from data_loader import _create_sequences, _decreate_sequences, _count_anomaly_segments, _wavelet from data_loader import convert_datetime, get_dummies, add_temporal_info os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES']= "-1" def get_dataset_name(column_names): # 9 -> IoT, 10 -> samsung, 4 -> kpi, 43 -> energy n_columns = len(column_names) dataset_names = {9: 'IoT', 10: 'samsung', 4: 'kpi', 34: 'energy'} return dataset_names[n_columns] def preprocess_uploaded_file(filepath): if filepath.split('.')[-1] == 'csv': df = pd.read_csv(filepath) dataset_name = get_dataset_name(df.columns) anomaly_scores, th = run_detector(df) anomaly_scores = _decreate_sequences(anomaly_scores) print('Threshold ==>', th) chart_data = [] if dataset_name == 'samsung': columns = df.columns[1:] for col in columns[:-2]: for i in range(df.shape[0]): chart_data.append({ 'date': df['date'].iloc[i][:-3], 'value': float(df[col].iloc[i]), 'column': col, 'score': float(anomaly_scores[i]), 'label': int(df['label'].iloc[i]) }) display_columns = columns[:-2].tolist() elif dataset_name == 'energy': columns = df.columns[1:] for col in columns[:-2]: for i in range(df.shape[0]): chart_data.append({ 'date': df['date'].iloc[i][:-3], 'value': float(df[col].iloc[i]), 'column': col, 'score': float(anomaly_scores[i]), 'label': int(df['label'].iloc[i]) }) display_columns = columns[:-2].tolist() os.remove(filepath) return {'status': 200, 'data': chart_data, 'columns': display_columns, 'anomaly_scores': anomaly_scores, 'threshold': th} else: return {'status': 400, 'message': 'upsupported file type'} def preprocess_samsung_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_test, y_test = [], [] y_segment_test = [] x_test_resid = [] label_seq, test_seq = [], [] # Samsung test_df = df if temporal == True: test_df = np.array(add_temporal_info('samsung', test_df, test_df.date)) test_df = test_df[:, 6:-1].astype(float) else: if decomposition == True: test_holiday = np.array(add_temporal_info('samsung', test_df, test_df.date)['holiday']) test_weekend = np.array(add_temporal_info('samsung', test_df, test_df.date)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = np.array(test_df) labels = test_df[:, -1].astype(int) test_df = test_df[:, 1:-1].astype(float) scaler = MinMaxScaler() test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) deep_pattern = decompose_model(x_test_normal, 'samsung') deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) # Wavelet transformation test_resid_wav = _wavelet(test_resid) test_resid_wavelet = _wavelet(test_resid_wav) for _ in range(wavelet_num): test_resid_wavelet = _wavelet(test_resid_wavelet) if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid_wavelet, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def preprocess_iot_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_test, y_test = [], [] y_segment_test = [] x_test_resid = [] label_seq, test_seq = [], [] # IoT Modbus date_format = '%d-%b-%y' time_format = '%H:%M:%S' df['date'] = [datetime.strptime(date, date_format) for date in df['date']] df['date'] = df['date'].dt.date df['time'] = df['time'].str.strip() df['time'] = pd.to_datetime(df['time'], format=time_format).dt.time datetimes = ['date', 'time'] df['timestamp'] =df[datetimes].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) df.insert(0, 'timestamp', df.pop('timestamp')) df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S') df.sort_values('timestamp', inplace=True) df.reset_index(drop=True, inplace=True) drop_list = ['ts', 'date', 'time', 'type'] df = df.drop(drop_list, axis=1) if temporal == True: test_df = df test_df = add_temporal_info('IoT_modbus', test_df, test_df.timestamp) test_df.set_index(test_df['timestamp'], inplace=True) test_df = np.array(test_df.drop(['timestamp'], axis=1)) test_df = test_df[:, 3:-1].astype(float) labels = test_df[:, -1].astype(int) else: if decomposition == True: test_df = df test_holiday = np.array(add_temporal_info('IoT_modbus', test_df, test_df.timestamp)['holiday']) test_weekend = np.array(add_temporal_info('IoT_modbus', test_df, test_df.timestamp)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = np.array(df) labels = test_df[:, -1].astype(int) test_df = test_df[:, 1:-1].astype(float) scaler = MinMaxScaler(feature_range=(0, 1)) test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) start_time = time.time() deep_pattern = decompose_model(x_test_normal, seq_length) deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) print(f"Deep Decomposer Taken: {time.time() - start_time}") test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) # Wavelet transformation start_time = time.time() print('Start Wavelet Transform') test_resid_wav = _wavelet(test_resid) test_resid_wavelet = _wavelet(test_resid_wav) for _ in range(wavelet_num): test_resid_wavelet = _wavelet(test_resid_wavelet) print(f'Wavelet Transform Taken: {time.time() - start_time}') if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid_wavelet, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def preprocess_kpi_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_train, x_test, y_test = [], [], [] y_segment_test = [] x_train_resid, x_test_resid = [], [] label_seq, test_seq = [], [] # for avoid RuntimeWarning: invalid value encountered in true_divide (wavelet) df['value'] = df['value'] * 1e+6 if temporal == True: test_df = df[['timestamp', 'value', 'label']] test_df['timestamp'] = pd.to_datetime(test_df['timestamp'], unit='s') test_df = np.array(add_temporal_info('kpi', test_df, test_df.timestamp)) test_df = test_df[:, 2:-1].astype(float) else: if decomposition == True: test_df = df[['timestamp', 'value', 'label']] test_df['timestamp'] = pd.to_datetime(test_df['timestamp'], unit='s') test_holiday = np.array(add_temporal_info('kpi', test_df, test_df.timestamp)['holiday']) test_weekend = np.array(add_temporal_info('kpi', test_df, test_df.timestamp)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = df['value'].values.reshape(-1, 1) labels = df['label'].values.astype(int) scaler = MinMaxScaler(feature_range=(0, 1)) test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) deep_pattern = decompose_model(x_test_normal, 'kpi') deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def preprocess_energy_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_train, x_test, y_test = [], [], [] y_segment_test = [] x_train_resid, x_test_resid = [], [] label_seq, test_seq = [], [] test_df = df # test_df['date'] = pd.to_datetime(test_df['date'], format='%Y-%m-%d %H:%M:%S') if temporal == True: test_df = np.array(add_temporal_info('energy', test_df, test_df.date)) test_df = test_df[:, 1:-1].astype(float) labels = test_df[:, -1].astype(int) else: if decomposition == True: test_holiday = np.array(add_temporal_info('energy', test_df, test_df.date)['holiday']) test_weekend = np.array(add_temporal_info('energy', test_df, test_df.date)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = np.array(test_df) labels = test_df[:, -1].astype(int) test_df = test_df[:, 1:-1].astype(float) scaler = MinMaxScaler(feature_range=(0, 1)) test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) deep_pattern = decompose_model(x_test_normal, 'energy') deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) # Wavelet transformation test_resid_wav = _wavelet(test_resid) test_resid_wavelet = _wavelet(test_resid_wav) for iter in range(wavelet_num): test_resid_wavelet = _wavelet(test_resid_wavelet) if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid_wavelet, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def load_detector(dataset_name): return tf.keras.models.load_model(f'pretrained_models/AD_{dataset_name}') # tf.keras.models.load_model('pretrained_models/Samsung') def run_detector(upload_data): stride = 1 SEED = 0 MODEL = "Bi-GRU" TEMPORAL = 0 DECOMPOSITION = 1 # 0 SEGMENTATION = 1 # 0 lamda_t = -0.7 wavelet_num = 3 dataset_name = get_dataset_name(upload_data.columns) detector = load_detector(dataset_name) # implicitly check dataset_name by feature number ? column names ? etc ? if dataset_name == "IoT": preprocessor = preprocess_iot_file seq_length = 60 elif dataset_name == 'samsung': preprocessor = preprocess_samsung_file seq_length = 36 elif dataset_name == 'energy': preprocessor = preprocess_energy_file seq_length = 60 aux_data = None if TEMPORAL: aux_data = preprocessor(upload_data, seq_length, stride, lamda_t, wavelet_num, temporal=TEMPORAL) data = preprocessor(upload_data, seq_length, stride, lamda_t, wavelet_num, decomposition=DECOMPOSITION, segmentation=SEGMENTATION) # preprocess file print('start detection phase') start_time = time.time() anomaly_scores, th = detect_anomaly(data, aux_data, detector, detector, MODEL, TEMPORAL, DECOMPOSITION, SEGMENTATION) print(f'dection phase taken {time.time() - start_time}') return anomaly_scores, th
nilq/baby-python
python
import datetime import time from open_publishing.core.enums import EventTarget, EventAction, EventType class Events(object): def __init__(self, ctx): self._ctx = ctx def get(self, references=None, target=None, action=None, type=None, filters=None, since=None, till=None, history=False): """ Return specified events. Since parameter filters all events since given timestamp. Till parameter filters all events till given timestamp. If history is set to False (default) per object only the latest event will be returned. If history is set to True all events will be returned. """ event_types = self._get_event_types(target, action, type, filters) references = self._normalize_references(references) from_timestamp = self._normalize_timestamp(since) to_timestamp = self._normalize_timestamp(till) method= 'history' if history else 'list_status' response = self._ctx.gjp.fetch_events(method=method, event_types=event_types, references=references, from_timestamp=from_timestamp, to_timestamp=to_timestamp) execution_timestamp = datetime.datetime.fromtimestamp(response['execution_timestamp']) result = EventsList(execution_timestamp) def add_items(items): for item in items: timestamp = None if 'last_modified' in item: timestamp = item['last_modified'] if 'log_time' in item: timestamp = item['log_time'] result.append(EventsList.Event(target=EventTarget.from_id(item['target']), action=EventAction.from_id(item['action']), type=EventType.from_id(item['type']), timestamp=datetime.datetime.fromtimestamp(timestamp), guid=(item['source_type'] + '.' + str(item['reference_id'])).lower(), app=item.get('app', None), uuid=item.get('uuid', None))) add_items(response['items']) while 'resumption_token' in response: response = self._ctx.gjp.fetch_events(method=method, resumption_token=response['resumption_token']) add_items(response['items']) result.sort(key=lambda a: a.timestamp) return result def last_event(self, references, target=None, action=None, type=None, filters=None): event_types = self._get_event_types(target, action, type, filters) if isinstance(references, (list, tuple)): str_references = ','.join(set(references)) else: raise TypeError('references: expected list or tuple, got: {0}'.format(type(references))) events = {} def add_items(items): for item in items: guid = (item['source_type'] + '.' + str(item['reference_id'])).lower() if guid not in events or events[guid]['last_modified'] < item['last_modified']: events[guid] = item response = self._ctx.gjp.fetch_events(method='list_status', event_types=event_types, references=str_references) execution_timestamp = datetime.datetime.fromtimestamp(response['execution_timestamp']) add_items(response['items']) while 'resumption_token' in response: response = self._ctx.gjp.fetch_events('list_status', resumption_token=response['resumption_token']) add_items(response['items']) result = EventsList(execution_timestamp) for ref in references: guid = ref.lower() if guid in events: result.append(EventsList.Event(target=EventTarget.from_id(events[guid]['target']), action=EventAction.from_id(events[guid]['action']), type=EventType.from_id(events[guid]['type']), timestamp=datetime.datetime.fromtimestamp(events[guid]['last_modified']), guid=guid)) else: result.append(None) return result def _get_event_types(self, target, action, type, filters): if target is not None or action is not None or type is not None: if filters is not None: raise KeyError('filters or target/action/type should be set, not both') elif ((target is not None and target not in EventTarget) or (action is not None and action not in EventAction) or (type is not None and type not in EventType)): raise ValueError('target/action/type should be None or from op.events.target/action.type respectively, got: {0}, {1}, {2}'.format(target, action, type)) else: event_types = '({target},{action},{type})'.format(target=target if target is not None else '', action=action if action is not None else '', type=type if type is not None else '') else: if filters is None: event_types = '(,,)' #All events else: if not isinstance(filters, list): raise ValueError('filters should be list of tuples of (op.events.target, op.events.action, op.event.type), got: {0}'.format(filters)) event_types = [] for target, action, type in filters: if ((target is not None and target not in EventTarget) or (action is not None and action not in EventAction) or (type is not None and type not in EventType)): raise ValueError('filters should be list of tuples of (op.events.target|None, op.events.action|None, op.event.type|None), got: {0}'.format(filters)) else: event_types.append('({target},{action},{type})'.format(target=target if target is not None else '', action=action if action is not None else '', type=type if type is not None else '')) event_types = ';'.join(event_types) return event_types @staticmethod def _normalize_timestamp(timestamp): """Normalize timestamp to the format needed by API.""" if timestamp is None: return None if not isinstance(timestamp, (datetime.datetime, datetime.date)): raise TypeError('since should be datetime.datetime or datetime.date, got {0}'.format(timestamp)) return int(time.mktime(timestamp.timetuple())) @staticmethod def _normalize_references(references): if references is None: return None if not isinstance(references, (list, tuple)): raise TypeError('references: expected list or tuple, got: {0}'.format(type(references))) return ','.join(references) class EventsList(list): """List of Open Publishing Events.""" class Event(object): """Open Publishing Event object.""" def __init__(self, target, action, type, timestamp, guid, app=None, uuid=None): self._target = target self._action = action self._type = type self._timestamp = timestamp self._guid = guid self._app = app self._uuid = uuid @property def target(self): return self._target @property def action(self): return self._action @property def type(self): return self._type @property def tuple(self): return (self.target, self.action, self.type) @property def timestamp(self): return self._timestamp @property def guid(self): return self._guid @property def app(self): return self._app @property def uuid(self): return self._uuid def __repr__(self): '''Returns representation of the object''' return("{}(guid={}, target={}, action={}, type={}, app={})".format(self.__class__.__name__, self.guid, self.target, self.action, self.type, self.app)) def __init__(self, execution_timestamp): super(EventsList, self).__init__([]) self._execution_timestamp = execution_timestamp @property def execution_timestamp(self): return self._execution_timestamp
nilq/baby-python
python
def add(x, y): return x + y def double(x): return x + x
nilq/baby-python
python
import math import datetime block_size = 0.5 def block_name(lat, lon): discretized_lat = (math.floor(lat/block_size)+0.5)*block_size discretized_lon = (math.floor(lon/block_size)+0.5)*block_size return (discretized_lat, discretized_lon) def inside_polygon(x, y, points): """ Return True if a coordinate (x, y) is inside a polygon defined by a list of verticies [(x1, y1), (x2, x2), ... , (xN, yN)]. Reference: http://www.ariel.com.au/a/python-point-int-poly.html """ n = len(points) inside = False p1y, p1x = points[0] for i in range(1, n + 1): p2y, p2x = points[i % n] if y > min(p1y, p2y): if y <= max(p1y, p2y): if x <= max(p1x, p2x): if p1y != p2y: xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x if p1x == p2x or x <= xinters: inside = not inside p1x, p1y = p2x, p2y return inside def get_covered_blocks(polygon): if polygon[0] != polygon[-1]: polygon.append(polygon[0]) lats = [pos[1] for pos in polygon] max_lat = max(lats) min_lat = min(lats) longs = [pos[0] for pos in polygon] max_long = max(longs) min_long = min(longs) max_block = block_name(max_lat, max_long) min_block = block_name(min_lat, min_long) covered_blocks = [] for lat_i in range(int((max_block[0] - min_block[0])/block_size)): for long_i in range(int((max_block[1] - min_block[1])/block_size)): la, lo = min_block[0] + lat_i * block_size, min_block[1] + long_i * block_size if inside_polygon(la, lo, polygon): covered_blocks.append((la, lo)) return covered_blocks def add_1_day(string): new_date = datetime.datetime.strptime(string, "%Y%m%d") + datetime.timedelta(days = 1) return datetime.datetime.strftime(new_date, '%Y%m%d') def sub_1_day(string): new_date = datetime.datetime.strptime(string, "%Y%m%d") - datetime.timedelta(days = 1) return datetime.datetime.strftime(new_date, '%Y%m%d') def wx_json_2_timestamp(string): return int(datetime.datetime.strftime(datetime.datetime.strptime(string, "%Y-%m-%dT%H:%M:%SZ"),'%s'))* 1000
nilq/baby-python
python
""" Results represent Prefect Task inputs and outputs. In particular, anytime a Task runs, its output is encapsulated in a `Result` object. This object retains information about what the data is, and how to "handle" it if it needs to be saved / retrieved at a later time (for example, if this Task requests for its outputs to be cached or checkpointed). An instantiated Result object has the following attributes: - a `value`: the value of a Result represents a single piece of data - a `safe_value`: this attribute maintains a reference to a `SafeResult` object which contains a "safe" representation of the `value`; for example, the `value` of a `SafeResult` might be a URI or filename pointing to where the raw data lives - a `result_handler` that holds onto the `ResultHandler` used to read / write the value to / from its handled representation To distinguish between a Task that runs but does not return output from a Task that has yet to run, Prefect also provides a `NoResult` object representing the _absence_ of computation / data. This is in contrast to a `Result` whose value is `None`. """ from typing import Any from prefect.engine.result_handlers import ResultHandler class ResultInterface: """ A necessary evil so that Results can store SafeResults and NoResults in its attributes without pickle recursion problems. """ def __eq__(self, other: Any) -> bool: if type(self) == type(other): eq = True for attr in self.__dict__: if attr.startswith("_"): continue eq &= getattr(self, attr, object()) == getattr(other, attr, object()) return eq return False def __repr__(self) -> str: val = self.value # type: ignore return "<{type}: {val}>".format(type=type(self).__name__, val=repr(val)) def to_result(self, result_handler: ResultHandler = None) -> "ResultInterface": """ If no result handler provided, returns self. If a ResultHandler is provided, however, it will become the new result handler for this result. Args: - result_handler (optional): an optional result handler to override the current handler Returns: - ResultInterface: a potentially new Result object """ if result_handler is not None: self.result_handler = result_handler return self def store_safe_value(self) -> None: """Performs no computation.""" class Result(ResultInterface): """ A representation of the result of a Prefect task; this class contains information about the value of a task's result, a result handler specifying how to serialize or store this value securely, and a `safe_value` attribute which holds information about the current "safe" representation of this result. Args: - value (Any): the value of the result - result_handler (ResultHandler, optional): the result handler to use when storing / serializing this result's value; required if you intend on persisting this result in some way """ def __init__(self, value: Any, result_handler: ResultHandler = None): self.value = value self.safe_value = NoResult # type: SafeResult self.result_handler = result_handler # type: ignore def store_safe_value(self) -> None: """ Populate the `safe_value` attribute with a `SafeResult` using the result handler """ # don't bother with `None` values if self.value is None: return if self.safe_value == NoResult: assert isinstance( self.result_handler, ResultHandler ), "Result has no ResultHandler" # mypy assert value = self.result_handler.write(self.value) self.safe_value = SafeResult( value=value, result_handler=self.result_handler ) class SafeResult(ResultInterface): """ A _safe_ representation of the result of a Prefect task; this class contains information about the serialized value of a task's result, and a result handler specifying how to deserialize this value Args: - value (Any): the safe represenation of a value - result_handler (ResultHandler): the result handler to use when reading this result's value """ def __init__(self, value: Any, result_handler: ResultHandler): self.value = value self.result_handler = result_handler @property def safe_value(self) -> "SafeResult": return self def to_result(self, result_handler: ResultHandler = None) -> "ResultInterface": """ Read the value of this result using the result handler and return a fully hydrated Result. If a new ResultHandler is provided, it will instead be used to read the underlying value and the `result_handler` attribute of this result will be reset accordingly. Args: - result_handler (optional): an optional result handler to override the current handler Returns: - ResultInterface: a potentially new Result object """ if result_handler is not None: self.result_handler = result_handler value = self.result_handler.read(self.value) res = Result(value=value, result_handler=self.result_handler) res.safe_value = self return res class NoResultType(SafeResult): """ A `SafeResult` subclass representing the _absence_ of computation / output. A `NoResult` object returns itself for its `value` and its `safe_value`. """ def __init__(self) -> None: super().__init__(value=None, result_handler=ResultHandler()) def __eq__(self, other: Any) -> bool: if type(self) == type(other): return True else: return False def __repr__(self) -> str: return "<No result>" def __str__(self) -> str: return "NoResult" def to_result(self, result_handler: ResultHandler = None) -> "ResultInterface": """ Performs no computation and returns self. Args: - result_handler (optional): a passthrough for interface compatibility """ return self NoResult = NoResultType()
nilq/baby-python
python
import numpy import pytest from pauxy.systems.ueg import UEG from pauxy.estimators.ueg import fock_ueg, local_energy_ueg from pauxy.estimators.greens_function import gab from pauxy.utils.testing import get_random_wavefunction from pauxy.utils.misc import timeit @pytest.mark.unit def test_fock_build(): sys = UEG({'rs': 2.0, 'ecut': 2, 'nup': 7, 'ndown': 7, 'thermal': True}) numpy.random.seed(7) psi = get_random_wavefunction(sys.nelec, sys.nbasis).real trial = numpy.eye(sys.nbasis, sys.nelec[0]) G = numpy.array([gab(psi[:,:sys.nup], psi[:,:sys.nup]), gab(psi[:,sys.nup:], psi[:,sys.nup:])]).astype(numpy.complex128) nb = sys.nbasis # from pyscf import gto, scf, ao2mo # mol = gto.M() # mol.nelec = sys.nelec # mf = scf.UHF(mol) # U = sys.compute_real_transformation() # h1_8 = numpy.dot(U.conj().T, numpy.dot(sys.H1[0], U)) # mf.get_hcore = lambda *args: h1_8 # mf.get_ovlp = lambda *args: numpy.eye(nb) # mf._eri = sys.eri_8() # mf._eri = ao2mo.restore(8, eri_8, nb) # veff = mf.get_veff(dm=dm) eris = sys.eri_4() F = fock_ueg(sys, G) vj = numpy.einsum('pqrs,xqp->xrs', eris, G) vk = numpy.einsum('pqrs,xqr->xps', eris, G) fock = numpy.zeros((2,33,33), dtype=numpy.complex128) fock[0] = sys.H1[0] + vj[0] + vj[1] - vk[0] fock[1] = sys.H1[1] + vj[0] + vj[1] - vk[1] assert numpy.linalg.norm(fock - F) == pytest.approx(0.0) @pytest.mark.unit def test_build_J(): sys = UEG({'rs': 2.0, 'ecut': 2.0, 'nup': 7, 'ndown': 7, 'thermal': True}) Gkpq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) Gpmq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) psi = get_random_wavefunction(sys.nelec, sys.nbasis).real trial = numpy.eye(sys.nbasis, sys.nelec[0]) G = numpy.array([gab(psi[:,:sys.nup], psi[:,:sys.nup]), gab(psi[:,sys.nup:], psi[:,sys.nup:])]) from pauxy.estimators.ueg import coulomb_greens_function for s in [0,1]: coulomb_greens_function(len(sys.qvecs), sys.ikpq_i, sys.ikpq_kpq, sys.ipmq_i, sys.ipmq_pmq, Gkpq[s], Gpmq[s], G[s]) from pauxy.estimators.ueg import build_J J1 = timeit(build_J)(sys, Gpmq, Gkpq) from pauxy.estimators.ueg_kernels import build_J_opt J2 = timeit(build_J_opt)(len(sys.qvecs), sys.vqvec, sys.vol, sys.nbasis, sys.ikpq_i, sys.ikpq_kpq, sys.ipmq_i, sys.ipmq_pmq, Gkpq, Gpmq) assert numpy.linalg.norm(J1-J2) == pytest.approx(0.0) @pytest.mark.unit def test_build_K(): sys = UEG({'rs': 2.0, 'ecut': 2.0, 'nup': 7, 'ndown': 7, 'thermal': True}) Gkpq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) Gpmq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) psi = get_random_wavefunction(sys.nelec, sys.nbasis).real trial = numpy.eye(sys.nbasis, sys.nelec[0]) G = numpy.array([gab(psi[:,:sys.nup], psi[:,:sys.nup]), gab(psi[:,sys.nup:], psi[:,sys.nup:])]).astype(numpy.complex128) from pauxy.estimators.ueg import build_K from pauxy.estimators.ueg_kernels import build_K_opt K1 = timeit(build_K)(sys, G) K2 = timeit(build_K_opt)(len(sys.qvecs), sys.vqvec, sys.vol, sys.nbasis, sys.ikpq_i, sys.ikpq_kpq, sys.ipmq_i, sys.ipmq_pmq, G) assert numpy.linalg.norm(K1-K2) == pytest.approx(0.0)
nilq/baby-python
python
import subprocess host = ["www.google.com", "192.0.0.25"] rounds = 32 ping = subprocess.Popen( ["ping", "-c", str(rounds), host[1]], stdout = subprocess.PIPE, stderr = subprocess.PIPE ) out, error = ping.communicate() print "Out : %s"%out import re matcher = re.compile("rtt min/avg/max/mdev = (\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)") values = matcher.search(out).groups() print "Output : %s"%out print "Min : %s"%values[0] print "Average: %s"%values[1] print "Maximum: %s"%values[2] print "MDeviation: %s"%values[3]
nilq/baby-python
python
#!/usr/bin/env python3 # This script is used to avoid issues with `xcopy.exe` under Windows Server 2016 (https://github.com/moby/moby/issues/38425) import glob, os, shutil, sys # If the destination is an existing directory then expand wildcards in the source destination = sys.argv[2] if os.path.isdir(destination) == True: sources = glob.glob(sys.argv[1]) else: sources = [sys.argv[1]] # Copy each of our source files/directories for source in sources: if os.path.isdir(source): dest = os.path.join(destination, os.path.basename(source)) shutil.copytree(source, dest) else: shutil.copy2(source, destination) print('Copied {} to {}.'.format(source, destination), file=sys.stderr)
nilq/baby-python
python
from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase class TicketTests(APITestCase): def setUp(self): """ Configurations to be made available before each individual test case inheriting from this class. """ url = reverse('account-registration') data = { "username": "Adenike", "email": "adenike@gmagil.com", "password": "dayo" } self.response = self.client.post(url, data, format='json') url = reverse('create-flight') data = { "flight_type": "economy", "to_location": "Abuja", "from_location": "Lagos", "departure_date": "2019-08-22T14:47:05Z", "return_date": "2019-08-27T14:47:05Z", "total_seats": 50, "available_seats": 37, } token = 'Bearer ' + self.response['Authorization'] self.client.post(url, data, HTTP_AUTHORIZATION=token, format='json') url = '/ticket/flight/13/' data = { 'cost': 67 } self.client.post(url, data, HTTP_AUTHORIZATION=token, format='json') def test_ticket_is_created_successfully(self): """ Ensure a ticket is successfully created """ url = '/ticket/flight/12/' data = {"ticket_class":"BS","cost":0} token = 'Bearer ' + self.response['Authorization'] response = self.client.post(url, data, HTTP_AUTHORIZATION=token, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_all_tickets_created_successfully(self): """ Ensure all tickets are gotten """ url = '/ticket/' token = 'Bearer ' + self.response['Authorization'] response = self.client.get(url, HTTP_AUTHORIZATION=token, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)
nilq/baby-python
python
from interface import create_new_user import unittest from passlock import User,Credentials class TestClass(unittest.TestCase): ''' A Test class that defines test case for the user behaviour ''' def setUp(self) : ''' set up mehtod that runs each testcase ''' self.new_user = User('mark','mark002.') return super().setUp() def test_init(self): ''' test_init testcase that test if the object has been initialized correctly ''' self.assertEquals(self.new_user.username,'mark') self.assertEquals(self.new_user.password,'mark002.') def test_save_user(self): ''' test case to test if useer object is saved in the user list ''' self.new_user.save_user() self.assertEquals(len(User.user_list),1) class TestCredentials(unittest.TestCase): ''' A Test class that defines test case for the credentials ''' def setUp(self): ''' Mehtod that run each testcase ''' new_credentials =Credentials(self,'Gmial','mark_kk','mark002.') def test_inti(self,account,userName,password): ''' testcase method that check if user credentials instances have been initialized ''' self.assertEquals(self.new_credentials.account,'Gmail') self.assertEquals(self.new_credenital.userName,'mark_kk') self.assertEquals(self.new_credentials.password,'mark002.') def test_save_credentials(self): ''' testcase to check if credentials object is saved in credentials list ''' self.new_credentials(self) self.new_credentials(len(Credentials.Credentials_list),1) def teaarDown(self): ''' method that clean up after each test case has run ''' Credentials.Credentials_list = [] def test_save_many_accounts(self): ''' test to check if many credentials can be saved in the credentials list ''' test_credentials = Credentials('mark','mark002','markoo2') test_credentials.save_details() self.assertEquals(Credentials.Credentials_list_found) if __name == '__main__': unittest.main()
nilq/baby-python
python
import unittest import numpy as np import theano import theano.tensor as T from daps.model import weigthed_binary_crossentropy class test_loss_functions(unittest.TestCase): def test_weigthed_binary_crossentropy(self): w0_val, w1_val = 0.5, 1.0 x_val, y_val = np.random.rand(5, 3), np.random.randint(0, 2, (5, 3)) expected_val = -(w1_val * y_val * np.log(x_val) + w0_val * (1 - y_val) * np.log(1 - x_val)) w0, w1 = T.constant(w0_val), T.constant(w1_val) x, y = T.matrix('pred'), T.matrix('true') loss = weigthed_binary_crossentropy(x, y, w0, w1) f = theano.function([x, y], loss, allow_input_downcast=True) np.testing.assert_array_almost_equal(expected_val, f(x_val, y_val))
nilq/baby-python
python
''' Filters that operate on ImageStim inputs. ''' import numpy as np from PIL import Image from PIL import ImageFilter as PillowFilter from pliers.stimuli.image import ImageStim from .base import Filter class ImageFilter(Filter): ''' Base class for all ImageFilters. ''' _input_type = ImageStim class ImageCroppingFilter(ImageFilter): ''' Crops an image. Args: box (tuple): a 4-length tuple containing the left, upper, right, and lower coordinates for the desired region of the image. If none is specified, crops out black borders from the image. ''' _log_attributes = ('box',) VERSION = '1.0' def __init__(self, box=None): self.box = box super().__init__() def _filter(self, stim): if self.box: x0, y0, x1, y1 = self.box else: pillow_img = Image.fromarray(stim.data) x0, y0, x1, y1 = pillow_img.getbbox() new_img = stim.data[y0:y1, x0:x1] return ImageStim(stim.filename, data=new_img) class ImageResizingFilter(ImageFilter): ''' Resizes an image, while optionally maintaining aspect ratio. Args: size (tuple of two ints): new size of the image. maintain_aspect_ratio (boolean): if true, resize the image while maintaining aspect ratio, and pad the rest with zero values. Otherwise, potentially distort the image during resizing to fit the new size. resample str: resampling method. One of 'nearest', 'bilinear', 'bicubic', 'lanczos', 'box', and 'hamming'. See https://pillow.readthedocs.io/en/5.1.x/handbook/concepts.html#concept-filters for more information. ''' _log_attributes = ('size', 'maintain_aspect_ratio', 'resample') VERSION = '1.0' def __init__(self, size, maintain_aspect_ratio=False, resample='bicubic'): self.size = size self.maintain_aspect_ratio = maintain_aspect_ratio resampling_mapping = { 'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC, 'lanczos': Image.LANCZOS, 'box': Image.BOX, 'hamming': Image.HAMMING, } if resample.lower() not in resampling_mapping.keys(): raise ValueError( "Unknown resampling method '{}'. Allowed values are '{}'" .format(resample, "', '".join(resampling_mapping.keys()))) self.resample = resampling_mapping[resample] super().__init__() def _filter(self, stim): pillow_img = Image.fromarray(stim.data) if not self.maintain_aspect_ratio: new_img = np.array( pillow_img.resize(self.size, resample=self.resample)) else: # Resize the image to the requested size in one of the dimensions. # We then create a black image of the requested size and paste the # resized image into the middle of this new image. The effect is # that there is a black border on the top and bottom or the left # and right of the resized image. orig_size = pillow_img.size ratio = max(self.size) / max(orig_size) inter_size = (np.array(orig_size) * ratio).astype(np.int32) inter_img = pillow_img.resize(inter_size, resample=self.resample) new_img = Image.new('RGB', self.size) upper_left = ( (self.size[0] - inter_size[0]) // 2, (self.size[1] - inter_size[1]) // 2) new_img.paste(inter_img, box=upper_left) new_img = np.array(new_img) return ImageStim(stim.filename, data=new_img) class PillowImageFilter(ImageFilter): ''' Uses the ImageFilter module from PIL to run a pre-defined image enhancement filter on an ImageStim. Sample of available filters: BLUR, CONTOUR, DETAIL, EDGE_ENHANCE, EDGE_ENHANCE_MORE, EMBOSS, FIND_EDGES, SMOOTH, SMOOTH_MORE, SHARPEN Args: image_filter (str or type or ImageFilter): specific name or type of the filter to be used, with supporting *args and **kwargs. Also accepted to directly pass an instance of PIL's ImageFilter.Filter args, kwargs: Optional positional and keyword arguments passed onto the pillow ImageFilter initializer. ''' _log_attributes = ('filter',) def __init__(self, image_filter=None, *args, **kwargs): if image_filter is None: pillow_url = "http://pillow.readthedocs.io/en/3.4.x/reference/" "ImageFilter.html#filters" raise ValueError("Must enter a valid filter to use. See %s" "for a list of valid PIL filters." % pillow_url) if isinstance(image_filter, type): image_filter = image_filter(*args, **kwargs) if isinstance(image_filter, PillowFilter.Filter): self.filter = image_filter elif isinstance(image_filter, str): self.filter = getattr(PillowFilter, image_filter)(*args, **kwargs) else: raise ValueError("Must provide an image_filter as a string, type, " "or ImageFilter object. ") super().__init__() def _filter(self, stim): pillow_img = Image.fromarray(stim.data) new_img = np.array(pillow_img.filter(self.filter)) return ImageStim(stim.filename, data=new_img)
nilq/baby-python
python
# # @lc app=leetcode.cn id=206 lang=python3 # # [206] 反转链表 # # https://leetcode-cn.com/problems/reverse-linked-list/description/ # # algorithms # Easy (58.01%) # Total Accepted: 38.9K # Total Submissions: 66.5K # Testcase Example: '[1,2,3,4,5]' # # 反转一个单链表。 # # 示例: # # 输入: 1->2->3->4->5->NULL # 输出: 5->4->3->2->1->NULL # # 进阶: # 你可以迭代或递归地反转链表。你能否用两种方法解决这道题? # # # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution1: def reverseList(self, head: ListNode) -> ListNode: node, last = head, None while node: tmp_last = last last = node tmp_node_next = node.next last.next = tmp_last node = tmp_node_next return last class Solution2: def reverseList(self, head: ListNode) -> ListNode: last = self._reverseList(head, None) return last def _reverseList(self, node, last): if not node: return last next_node = node.next node.next = last last = node return self._reverseList(next_node, last) class Solution: def reverseList(self, head: ListNode) -> ListNode: node, last = head, None while node: last, last.next, node = node, last, node.next return last
nilq/baby-python
python
# encoding: utf-8 """Test utility functions.""" from unittest import TestCase import os from viltolyckor.utils import parse_result_page from requests.exceptions import HTTPError DATA_DIR = "tests/data" class TestUtils(TestCase): def setUp(self): pass def test_parse_result_page(self): file_path = os.path.join(DATA_DIR, "result_page.html") with open(file_path) as f: content = f.read() data = [x for x in parse_result_page(content)] assert len(data) == 13 * 14 result = data[0] assert "year" in result assert "viltslag" in result assert "month" in result assert "value" in result assert isinstance(result["value"], int)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ *** Same as its parent apart that text baselines are reflected as a LineString (instead of its centroid) DU task for ABP Table: doing jointly row BIO and near horizontal cuts SIO block2line edges do not cross another block. The cut are based on baselines of text blocks, with some positive or negative inclination. - the labels of cuts are SIO Copyright Naver Labs Europe(C) 2018 JL Meunier Developed for the EU project READ. The READ project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No 674943. """ import sys, os import math try: #to ease the use without proper Python installation import TranskribusDU_version except ImportError: sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) ) import TranskribusDU_version TranskribusDU_version from common.trace import traceln from tasks import _exit from tasks.DU_CRF_Task import DU_CRF_Task from tasks.DU_Table.DU_ABPTableSkewed import GraphSkewedCut, main from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import SkewedCutAnnotator from tasks.DU_Table.DU_ABPTableSkewed_txtBIO_sepSIO_line import DU_ABPTableSkewedRowCutLine from tasks.DU_Table.DU_ABPTableSkewed_txtBIOH_sepSIO_line import DU_ABPTableSkewedRowCutLine_BIOH # ---------------------------------------------------------------------------- if __name__ == "__main__": version = "v.01" usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version) # parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels") #FOR GCN # parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges") parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailed reporting (score per document)") parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method") parser.add_option("--line_see_line", dest='iLineVisibility', action="store", type=int, default=GraphSkewedCut.iLineVisibility, help="seeline2line: how far in pixel can a line see another cut line?") parser.add_option("--block_see_line", dest='iBlockVisibility', action="store", type=int, default=GraphSkewedCut.iBlockVisibility, help="seeblock2line: how far in pixel can a block see a cut line?") parser.add_option("--height", dest="fCutHeight", default=GraphSkewedCut.fCutHeight , action="store", type=float, help="Minimal height of a cut") # parser.add_option("--cut-above", dest='bCutAbove', action="store_true", default=False # ,help="Each object defines one or several cuts above it (instead of below as by default)") parser.add_option("--angle", dest='lsAngle' , action="store", type="string", default="-1,0,+1" ,help="Allowed cutting angles, in degree, comma-separated") parser.add_option("--graph", dest='bGraph', action="store_true", help="Store the graph in the XML for displaying it") parser.add_option("--bioh", "--BIOH", dest='bBIOH', action="store_true", help="Text are categorised along BIOH instead of BIO") parser.add_option("--text", "--txt", dest='bTxt', action="store_true", help="Use textual features.") # --- #parse the command line (options, args) = parser.parse_args() options.bCutAbove = True # Forcing this! if options.bBIOH: DU_CLASS = DU_ABPTableSkewedRowCutLine_BIOH else: DU_CLASS = DU_ABPTableSkewedRowCutLine if options.bGraph: import os.path # hack DU_CLASS.bCutAbove = options.bCutAbove traceln("\t%s.bCutAbove=" % DU_CLASS.__name__, DU_CLASS.bCutAbove) DU_CLASS.lRadAngle = [math.radians(v) for v in [float(s) for s in options.lsAngle.split(",")]] traceln("\t%s.lRadAngle=" % DU_CLASS.__name__, DU_CLASS.lRadAngle) for sInputFilename in args: sp, sf = os.path.split(sInputFilename) sOutFilename = os.path.join(sp, "graph-" + sf) doer = DU_CLASS("debug", "." , iBlockVisibility=options.iBlockVisibility , iLineVisibility=options.iLineVisibility , fCutHeight=options.fCutHeight , bCutAbove=options.bCutAbove , lRadAngle=[math.radians(float(s)) for s in options.lsAngle.split(",")] , bTxt=options.bTxt) o = doer.cGraphClass() o.parseDocFile(sInputFilename, 9) o.addEdgeToDoc() print('Graph edges added to %s'%sOutFilename) o.doc.write(sOutFilename, encoding='utf-8',pretty_print=True,xml_declaration=True) SkewedCutAnnotator.gtStatReport() exit(0) # --- try: sModelDir, sModelName = args except Exception as e: traceln("Specify a model folder and a model name!") _exit(usage, 1, e) main(DU_CLASS, sModelDir, sModelName, options)
nilq/baby-python
python
import pytest from pybatfish.client.session import Session from pybatfish.datamodel import PathConstraints, HeaderConstraints from test_suite.sot_utils import (SoT, BLOCKED_PREFIXES, SNAPSHOT_NODES_SPEC, OPEN_CLIENT_PORTS) @pytest.mark.network_independent def test_no_forwarding_loops(bf: Session) -> None: """Check that there are no forwarding loops in the network.""" looping_flows = bf.q.detectLoops().answer().frame() assert looping_flows.empty, \ "Found flows that loop: {}".format(looping_flows.to_dict(orient="records")) @pytest.mark.network_independent def test_subnet_multipath_consistency(bf: Session) -> None: """ Check that all flows between all pairs are multipath consistent. Searches across all flows between subnets that are treated differently (i.e., dropped versus forwarded) by different paths in the network and returns example flows. """ multipath_inconsistent_flows = bf.q.subnetMultipathConsistency().answer().frame() assert multipath_inconsistent_flows.empty, \ "Found flows that are multipath inconsistent: {}".format(multipath_inconsistent_flows.to_dict(orient="records")) def test_public_services(bf: Session, sot: SoT) -> None: """Check that all public services are accessible from the Internet.""" for service in sot.public_services: failed_flows = bf.q.reachability( pathConstraints=PathConstraints(startLocation="internet"), headers=HeaderConstraints( srcIps='0.0.0.0/0 \\ ({})'.format(",".join(BLOCKED_PREFIXES)), srcPorts=OPEN_CLIENT_PORTS, dstIps=",".join(service["ips"]), applications=",".join(service["applications"])), actions="failure").answer().frame() assert failed_flows.empty, \ "Some flows to public service '{}' fail: {}".format(service["description"], failed_flows["Flow"]) def test_private_services(bf: Session, sot: SoT) -> None: """Check that all private services are inaccessible from the Internet.""" for service in sot.private_services: allowed_flows = bf.q.reachability( pathConstraints=PathConstraints(startLocation="internet"), headers=HeaderConstraints( dstIps=",".join(service["ips"]), applications=",".join(service["applications"])), actions="success").answer().frame() assert allowed_flows.empty, \ "Some traffic to private service {} is allowed: {}".format(service["description"], allowed_flows["Flow"]) def test_external_services(bf: Session, sot: SoT) -> None: """Check that all external services are accessible from all leaf routers.""" for service in sot.external_services: failed_flows = bf.q.reachability( pathConstraints=PathConstraints(startLocation="/leaf.*/"), headers=HeaderConstraints( dstIps=",".join(service["ips"]), applications=",".join(service["applications"])), actions="failure").answer().frame() assert failed_flows.empty, \ "Some flows to external service {} fail: {}".format(service["description"], failed_flows["Flow"]) def test_all_svi_prefixes_are_on_all_leafs(bf: Session, sot: SoT): """Check that all SVI prefixes are on all leafs.""" all_leafs = set(sot.inventory.get_groups_dict()['leaf']) # for each prefix set on each vlan interface for svi_prefixes in bf.q.interfaceProperties(interfaces="/vlan.*/").answer().frame()['All_Prefixes']: for prefix in svi_prefixes: # each vlan prefix should be present on each leaf leafs_with_prefix = set(bf.q.routes(nodes="/leaf.*/", network=prefix).answer().frame()["Node"].unique()) assert all_leafs == leafs_with_prefix def test_default_route_presence(bf: Session, sot: SoT): """Check that all routers have the default route.""" all_nodes = {host.get_name() for host in sot.inventory.get_hosts()} nodes_with_default = set(bf.q.routes(nodes=SNAPSHOT_NODES_SPEC, network="0.0.0.0/0").answer().frame()["Node"].unique()) assert all_nodes == nodes_with_default
nilq/baby-python
python
"""Externalized strings for better structure and easier localization""" setup_greeting = """Dwarf - First run configuration Insert your bot's token, or enter 'cancel' to cancel the setup:""" not_a_token = "Invalid input. Restart Dwarf and repeat the configuration process." choose_prefix = """Choose a prefix. A prefix is what you type before a command. A typical prefix would be the exclamation mark. Can be multiple characters. You will be able to change it later and add more of them. Choose your prefix:""" confirm_prefix = """Are you sure you want {0} as your prefix? You will be able to issue commands like this: {0}help Type yes to confirm or no to change it""" no_prefix_set = "No prefix set. Defaulting to !" setup_finished = """ The configuration is done. Do not exit this session to keep your bot online. All commands will have to be issued via Discord, this session will now be read only. Press enter to continue""" logging_into_discord = "Logging into Discord..." invalid_credentials = """Invalid login credentials. If they worked before Discord might be having temporary technical issues. In this case, press enter and try again later. Otherwise you can type 'reset' to delete the current configuration and redo the setup process again the next start. > """ keep_updated = "Make sure to keep Dwarf updated by using the {}update command." official_server = "Official server: {}" invite_link = "https://discord.gg/rAHwvyE" bot_is_online = "{} is now online." connected_to = "Connected to:" connected_to_servers = "{} servers" connected_to_channels = "{} channels" connected_to_users = "{} users" prefix_singular = "Prefix" prefix_plural = "Prefixes" use_this_url = "Use this URL to bring your bot to a server:" update_the_api = """\nYou are using an outdated discord.py.\n Update using pip3 install -U discord.py""" command_not_found = "No command called {} found." command_disabled = "That command is disabled." exception_in_command = "Exception in command '{}'" error_in_command = "Error in command '{}' - {}: {}" not_available_in_dm = "That command is not available in DMs." command_has_no_subcommands = "Command {0.name} has no subcommands." group_help = "{} command group" owner_recognized = "{} has been recognized and set as owner." user_registered = """{}, thanks for using my commands! I just registered you in my database so you can use all my features. I hope that's okay for you. If it isn't, please use the `unregister` command. That will remove all of the data I store about you. The only thing I will still keep is your ID so I don't forget that you don't want data about you to be stored. Keep in mind that if I'm not allowed to store data about you, you won't be able to use many of my commands. If you ever change your mind about this, use the `register` command. Whatever your decision looks like, I wish you lots of fun on Discord."""
nilq/baby-python
python
#coding=utf-8 from django.conf.urls import patterns, url, include from cmdb.views import contract urlpatterns = patterns('', url(r'^$', contract.list_contract, name='contract_index'), url(r'add/$', contract.add_contract, name='add_contract'), url(r'del/(?P<contract_id>\d+)/$', contract.del_contract, name='del_contract'), url(r'(?P<contract_id>\d+)/$', contract.edit_contract, name='edit_contract'), url(r'list/$', contract.list_contract, name='list_contract'), )
nilq/baby-python
python