hexsha
stringlengths
40
40
size
int64
2
1.02M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
245
max_stars_repo_name
stringlengths
6
130
max_stars_repo_head_hexsha
stringlengths
40
40
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
245
max_issues_repo_name
stringlengths
6
130
max_issues_repo_head_hexsha
stringlengths
40
40
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
245
max_forks_repo_name
stringlengths
6
130
max_forks_repo_head_hexsha
stringlengths
40
40
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
2
1.02M
avg_line_length
float64
1
417k
max_line_length
int64
1
987k
alphanum_fraction
float64
0
1
content_no_comment
stringlengths
0
1.01M
is_comment_constant_removed
bool
1 class
is_sharp_comment_removed
bool
1 class
f7135806589d0c1076d6e28256a88b5306cc2b5d
31,697
py
Python
metasmoke.py
mehrdad-shokri/SmokeDetector
683d4b3f2d5a4c80fd2831bc64f2fe37836eb879
[ "Apache-2.0", "MIT" ]
null
null
null
metasmoke.py
mehrdad-shokri/SmokeDetector
683d4b3f2d5a4c80fd2831bc64f2fe37836eb879
[ "Apache-2.0", "MIT" ]
null
null
null
metasmoke.py
mehrdad-shokri/SmokeDetector
683d4b3f2d5a4c80fd2831bc64f2fe37836eb879
[ "Apache-2.0", "MIT" ]
null
null
null
# coding=utf-8 import json import requests import importlib # for .reload() from globalvars import GlobalVars import threading # noinspection PyPackageRequirements import websocket try: from collections.abc import Iterable except ImportError: from collections import Iterable from datetime import datetime, timedelta from glob import glob from regex import sub import sys import traceback import time import os import subprocess as sp import datahandling import parsing import apigetpost import spamhandling import classes import chatcommunicate from helpers import log, exit_mode, only_blacklists_changed, \ only_modules_changed, blacklist_integrity_check, reload_modules, log_exception from gitmanager import GitManager import findspam from socketscience import SocketScience import metasmoke_cache MS_WEBSOCKET_LONG_INTERVAL = 60 MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL = 5 MAX_FAILURES = 10 # Preservative, 10 errors = MS down NO_ACTIVITY_PINGS_TO_REBOOT = 4 NO_ACTIVITY_PINGS_TO_STANDBY = 5 # This is effectively disabled NO_ACTIVITY_PINGS_TO_REPORT = 3 # noinspection PyClassHasNoInit,PyBroadException,PyUnresolvedReferences,PyProtectedMember class Metasmoke: status_pings_since_scan_activity = 0 scan_stat_snapshot = None class AutoSwitch: """ Automatically switch metasmoke status """ MAX_FAILURES = 10 # More than 10 failures == ms down MAX_SUCCESSES = 1 # More than 1 success == ms up ping_failure_counter = 0 # Negative values indicate consecutive successes autoswitch_is_on = True rw_lock = threading.Lock() @staticmethod def ping_failed(): """ Indicate a metasmoke status ping connection failure """ with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.ping_failure_counter < 0: # Consecutive counter. Switch sign. Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.ping_failure_counter += 1 current_counter = Metasmoke.AutoSwitch.ping_failure_counter current_auto = Metasmoke.AutoSwitch.autoswitch_is_on # MAX_FAILURES is constant so no lock. if current_counter > Metasmoke.AutoSwitch.MAX_FAILURES and\ GlobalVars.MSStatus.is_up() and current_auto: log("warning", "Last {} connection(s) to metasmoke failed".format(current_counter) + " Setting metasmoke status to down.") chatcommunicate.tell_rooms_with("debug", "**Warning**: {}: ".format(GlobalVars.location) + "Last {} connection(s) to metasmoke".format(current_counter) + " failed. Setting metasmoke status to **down**.") Metasmoke.set_ms_down(tell=False) @staticmethod def ping_succeeded(): """ Indicate a metasmoke status ping connection success """ with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.ping_failure_counter > 0: # Consecutive counter. Switch sign. Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.ping_failure_counter -= 1 # Negative values for success current_counter = -Metasmoke.AutoSwitch.ping_failure_counter current_auto = Metasmoke.AutoSwitch.autoswitch_is_on # MAX_SUCCESSES is constant so no lock. if current_counter > Metasmoke.AutoSwitch.MAX_SUCCESSES and\ GlobalVars.MSStatus.is_down() and current_auto: # Why use warning? Because some action may be needed if people don't think metasmoke is up. log("warning", "Last {} connection(s) to metasmoke succeeded".format(current_counter) + " Setting metasmoke status to up.") chatcommunicate.tell_rooms_with("debug", "**Notice**: {}: ".format(GlobalVars.location) + "Last {} connection(s) to metasmoke".format(current_counter) + " succeeded. Setting metasmoke status to **up**.") Metasmoke.set_ms_up(tell=False) @staticmethod def enable_autoswitch(to_enable): """ Enable or disable auto status switch """ switch_auto_msg = "" with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.autoswitch_is_on is not to_enable: # Log and post chat message only if there really is a change. switch_auto_msg = "Metasmoke status autoswitch is now {}abled.".format("en" if to_enable else "dis") Metasmoke.AutoSwitch.autoswitch_is_on = to_enable if switch_auto_msg: log("info", switch_auto_msg) chatcommunicate.tell_rooms_with("debug", switch_auto_msg) @staticmethod def get_ping_failure(): """ Get ping failure count. Negative number is ping success count. """ with Metasmoke.AutoSwitch.rw_lock: return Metasmoke.AutoSwitch.ping_failure_counter @staticmethod def reset_switch(): """ Reset class Metasmoke.AutoSwitch to default values """ with Metasmoke.AutoSwitch.rw_lock: Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.autoswitch_is_on = True @staticmethod def set_ms_up(tell=True): """ Switch metasmoke status to up """ # We must first set metasmoke to up, then say that metasmoke is up, not the other way around. ms_msg = "" if GlobalVars.MSStatus.is_down(): ms_msg = "Metasmoke status: set to up." GlobalVars.MSStatus.set_up() if ms_msg: log("info", ms_msg) if tell: chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg)) @staticmethod def set_ms_down(tell=True): """ Switch metasmoke status to down """ ms_msg = "" if GlobalVars.MSStatus.is_up(): ms_msg = "Metasmoke status: set to down." GlobalVars.MSStatus.set_down() if ms_msg: log("info", ms_msg) if tell: chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg)) @staticmethod def connect_websocket(): GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host, origin=GlobalVars.metasmoke_host) payload = json.dumps({"command": "subscribe", "identifier": "{\"channel\":\"SmokeDetectorChannel\"," "\"key\":\"" + GlobalVars.metasmoke_key + "\"}"}) GlobalVars.metasmoke_ws.send(payload) GlobalVars.metasmoke_ws.settimeout(10) @staticmethod def init_websocket(): has_succeeded = False failed_connection_attempts = 0 while GlobalVars.metasmoke_key and GlobalVars.metasmoke_ws_host: try: Metasmoke.connect_websocket() has_succeeded = True while True: a = GlobalVars.metasmoke_ws.recv() try: data = json.loads(a) Metasmoke.handle_websocket_data(data) GlobalVars.MSStatus.succeeded() failed_connection_attempts = 0 except ConnectionError: raise except Exception as e: log('error', '{}: {}'.format(type(e).__name__, e)) log_exception(*sys.exc_info()) GlobalVars.MSStatus.failed() Metasmoke.connect_websocket() except Exception: GlobalVars.MSStatus.failed() log('error', "Couldn't bind to MS websocket") if not has_succeeded: failed_connection_attempts += 1 if failed_connection_attempts == MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL: chatcommunicate.tell_rooms_with("debug", "Cannot initiate MS websocket." " Continuing to retry at longer intervals.") log('warning', "Cannot initiate MS websocket." " Continuing to retry at longer intervals.") if failed_connection_attempts >= MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL: time.sleep(MS_WEBSOCKET_LONG_INTERVAL) else: # Wait and hopefully network issues will be solved time.sleep(10) else: time.sleep(10) @staticmethod def handle_websocket_data(data): if "message" not in data: if "type" in data and data['type'] == "reject_subscription": log('error', "MS WebSocket subscription was rejected. Check your MS key.") raise ConnectionError("MS WebSocket connection rejected") return message = data['message'] if not isinstance(message, Iterable): return if "message" in message: from_ms = message['message'] if (from_ms.startswith("[ [charcoal-se.github.io](https://github.com/Charcoal-SE/charcoal-se.github.io) ]" " continuous-integration/travis-ci/push")): from_ms = from_ms.replace(": ", ", or the [SD wiki](//git.io/vyDZv)" " ([history](//github.com/Charcoal-SE/SmokeDetector/wiki/_history)): ", 1) from_ms = from_ms.replace("https:", "") chatcommunicate.tell_rooms_with("metasmoke", from_ms) elif "autoflag_fp" in message: event = message["autoflag_fp"] chatcommunicate.tell_rooms(event["message"], ("debug", "site-" + event["site"]), ("no-site-" + event["site"],), notify_site="/autoflag_fp") elif "exit" in message: os._exit(message["exit"]) elif "blacklist" in message: ids = (message['blacklist']['uid'], message['blacklist']['site']) datahandling.add_blacklisted_user(ids, "metasmoke", message['blacklist']['post']) datahandling.last_feedbacked = (ids, time.time() + 60) elif "unblacklist" in message: ids = (message['unblacklist']['uid'], message['unblacklist']['site']) datahandling.remove_blacklisted_user(ids) elif "naa" in message: post_site_id = parsing.fetch_post_id_and_site_from_url(message["naa"]["post_link"]) datahandling.add_ignored_post(post_site_id[0:2]) elif "fp" in message: post_site_id = parsing.fetch_post_id_and_site_from_url(message["fp"]["post_link"]) datahandling.add_false_positive(post_site_id[0:2]) elif "report" in message: import chatcommands # Do it here chatcommands.report_posts([message["report"]["post_link"]], message["report"]["user"], True, "the metasmoke API") elif "deploy_updated" in message: return # Disabled sha = message["deploy_updated"]["head_commit"]["id"] if sha != os.popen('git log -1 --pretty="%H"').read(): if "autopull" in message["deploy_updated"]["head_commit"]["message"]: if only_blacklists_changed(GitManager.get_remote_diff()): commit_md = "[`{0}`](https://github.com/{1}/commit/{0})" \ .format(sha[:7], GlobalVars.bot_repo_slug) integrity = blacklist_integrity_check() if len(integrity) == 0: # No issues GitManager.pull_remote() findspam.reload_blacklists() chatcommunicate.tell_rooms_with("debug", "No code modified in {0}, only blacklists" " reloaded.".format(commit_md)) else: integrity.append("please fix before pulling.") chatcommunicate.tell_rooms_with("debug", ", ".join(integrity)) elif "commit_status" in message: c = message["commit_status"] sha = c["commit_sha"][:7] recent_commits = sp.check_output(["git", "log", "-50", "--pretty=%H"]).decode('utf-8').strip().split('\n') if c["commit_sha"] in recent_commits: return # Same rev, or earlier rev (e.g. when watching things faster than CI completes), nothing to do if c["status"] == "success": if "autopull" in c["commit_message"] or c["commit_message"].startswith("!") or \ c["commit_message"].startswith("Auto "): s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/" \ "commit/{commit_sha}) succeeded. Message contains 'autopull', pulling...".format( ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) remote_diff = GitManager.get_remote_diff() if only_blacklists_changed(remote_diff): GitManager.pull_remote() if not GlobalVars.on_branch: # Restart if HEAD detached log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True) exit_mode("checkout_deploy") GlobalVars.reload() findspam.FindSpam.reload_blacklists() chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_blacklists) elif only_modules_changed(remote_diff): GitManager.pull_remote() if not GlobalVars.on_branch: # Restart if HEAD detached log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True) exit_mode("checkout_deploy") GlobalVars.reload() reload_modules() chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_findspam) else: chatcommunicate.tell_rooms_with('debug', s, notify_site="/ci") exit_mode("pull_update") else: s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \ "succeeded.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci") elif c["status"] == "failure": s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \ "failed.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci") elif "everything_is_broken" in message: if message["everything_is_broken"] is True: exit_mode("shutdown") elif "domain_whitelist" in message: if message["domain_whitelist"] == "refresh": metasmoke_cache.MetasmokeCache.delete('whitelisted-domains') @staticmethod def send_stats_on_post(title, link, reasons, body, markdown, username, user_link, why, owner_rep, post_score, up_vote_count, down_vote_count): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send stats but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke down, not sending stats.") return metasmoke_key = GlobalVars.metasmoke_key try: if len(why) > 4096: why = why[:2048] + ' ... ' + why[-2043:] # Basic maths post = {'title': title, 'link': link, 'reasons': reasons, 'body': body, 'markdown': markdown, 'username': username, 'user_link': user_link, 'why': why, 'user_reputation': owner_rep, 'score': post_score, 'upvote_count': up_vote_count, 'downvote_count': down_vote_count} # Remove None values (if they somehow manage to get through) post = {k: v for k, v in post.items() if v} payload = {'post': post, 'key': metasmoke_key} headers = {'Content-type': 'application/json'} Metasmoke.post("/posts.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_feedback_for_post(post_link, feedback_type, user_name, user_id, chat_host): if GlobalVars.metasmoke_host is None: log('info', 'Received chat feedback but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending feedback") return metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'feedback': { 'user_name': user_name, 'chat_user_id': user_id, 'chat_host': chat_host, 'feedback_type': feedback_type, 'post_link': post_link }, 'key': metasmoke_key } headers = {'Content-type': 'application/json'} Metasmoke.post("/feedbacks.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_deletion_stats_for_post(post_link, is_deleted): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send deletion data but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending deletion stats") return metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'deletion_log': { 'is_deleted': is_deleted, 'post_link': post_link }, 'key': metasmoke_key } headers = {'Content-type': 'application/json'} Metasmoke.post("/deletion_logs.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_status_ping(): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send status ping but metasmoke_host is undefined. Not sent.') return elif GlobalVars.MSStatus.is_down(): payload = { "location": GlobalVars.location, "timestamp": time.time() } SocketScience.send(payload) metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'location': GlobalVars.location, 'key': metasmoke_key, 'standby': GlobalVars.standby_mode or GlobalVars.no_se_activity_scan } headers = {'content-type': 'application/json'} response = Metasmoke.post("/status-update.json", data=json.dumps(payload), headers=headers, ignore_down=True) try: response = response.json() if response.get('pull_update', False): log('info', "Received pull command from MS ping response") exit_mode("pull_update") if ('failover' in response and GlobalVars.standby_mode and not GlobalVars.no_se_activity_scan): # If we're not scanning, then we don't want to become officially active due to failover. if response['failover']: GlobalVars.standby_mode = False chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " received failover signal.", notify_site="/failover") if response.get('standby', False): chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " entering metasmoke-forced standby.") time.sleep(2) exit_mode("standby") if response.get('shutdown', False): exit_mode("shutdown") except Exception: # TODO: What could happen here? pass except Exception as e: log('error', e) @staticmethod def update_code_privileged_users_list(): if GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, can't update blacklist manager privilege list") return payload = {'key': GlobalVars.metasmoke_key} headers = {'Content-type': 'application/json'} try: response = Metasmoke.get("/api/users/code_privileged", data=json.dumps(payload), headers=headers).json()['items'] except Exception as e: log('error', e) return GlobalVars.code_privileged_users = set() for id in response["stackexchange_chat_ids"]: GlobalVars.code_privileged_users.add(("stackexchange.com", id)) for id in response["meta_stackexchange_chat_ids"]: GlobalVars.code_privileged_users.add(("meta.stackexchange.com", id)) for id in response["stackoverflow_chat_ids"]: GlobalVars.code_privileged_users.add(("stackoverflow.com", id)) @staticmethod def determine_if_autoflagged(post_url): """ Given the URL for a post, determine whether or not it has been autoflagged. """ payload = { 'key': GlobalVars.metasmoke_key, 'filter': 'GFGJGHFMHGOLMMJMJJJGHIGOMKFKKILF', # id and autoflagged 'urls': post_url } try: response = Metasmoke.get("/api/v2.0/posts/urls", params=payload).json() except Exception as e: log('error', e) return False, [] # The first report of a URL is the only one that will be autoflagged. MS responses to the # /posts/urls endpoint have the oldest report last. if len(response["items"]) > 0 and response["items"][-1]["autoflagged"]: # get flagger names id = str(response["items"][-1]["id"]) payload = {'key': GlobalVars.metasmoke_key} flags = Metasmoke.get("/api/v2.0/posts/" + id + "/flags", params=payload).json() if len(flags["items"]) > 0: return True, [user["username"] for user in flags["items"][0]["autoflagged"]["users"]] return False, [] @staticmethod def stop_autoflagging(): payload = {'key': GlobalVars.metasmoke_key} headers = {'Content-type': 'application/json'} Metasmoke.post("/flagging/smokey_disable", data=json.dumps(payload), headers=headers) @staticmethod def send_statistics(): if GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending statistics") return # Get current apiquota from globalvars GlobalVars.apiquota_rw_lock.acquire() current_apiquota = GlobalVars.apiquota GlobalVars.apiquota_rw_lock.release() posts_scanned, scan_time, posts_per_second = GlobalVars.PostScanStat.get_stat() payload = {'key': GlobalVars.metasmoke_key, 'statistic': {'posts_scanned': posts_scanned, 'api_quota': current_apiquota}} if posts_per_second: # Send scan rate as well, if applicable. payload['statistic']['post_scan_rate'] = posts_per_second GlobalVars.PostScanStat.reset_stat() headers = {'Content-type': 'application/json'} if GlobalVars.metasmoke_host is not None: log('info', 'Sent statistics to metasmoke: ', payload['statistic']) Metasmoke.post("/statistics.json", data=json.dumps(payload), headers=headers) @staticmethod def post_auto_comment(msg, user, url=None, ids=None): if not GlobalVars.metasmoke_key: return response = None if url is not None: params = {"key": GlobalVars.metasmoke_key, "urls": url, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"} response = Metasmoke.get("/api/v2.0/posts/urls", params=params).json() elif ids is not None: post_id, site = ids site = parsing.api_parameter_from_link(site) params = {"key": GlobalVars.metasmoke_key, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"} try: response = Metasmoke.get("/api/v2.0/posts/uid/{}/{}".format(site, post_id), params=params).json() except AttributeError: response = None if response and "items" in response and len(response["items"]) > 0: ms_id = response["items"][0]["id"] params = {"key": GlobalVars.metasmoke_key, "text": msg[:1].upper() + msg[1:], # Capitalise the first letter of the comment "chat_user_id": user.id, "chat_host": user._client.host} Metasmoke.post("/api/v2.0/comments/post/{}".format(ms_id), params=params) @staticmethod def get_post_bodies_from_ms(post_url): if not GlobalVars.metasmoke_key: return None payload = { 'key': GlobalVars.metasmoke_key, 'filter': 'GHOOIJGNLKHIIOIKGILKIJGHFMNKKGFJ', # posts.id, posts.body, posts.created_at 'urls': parsing.to_protocol_relative(post_url) } try: response = Metasmoke.get('/api/v2.0/posts/urls', params=payload).json() except AttributeError: return None except Exception as e: log('error', '{}: {}'.format(type(e).__name__, e)) log_exception(*sys.exc_info()) exception_only = ''.join(traceback.format_exception_only(type(e), e)).strip() chatcommunicate.tell_rooms_with("debug", "{}: In getting MS post information, recovered from `{}`" .format(GlobalVars.location, exception_only)) return None return response['items'] @staticmethod def get_reason_weights(): if not GlobalVars.metasmoke_key: return None payload = { 'key': GlobalVars.metasmoke_key, 'per_page': 100, 'page': 1, } items = [] try: while True: response = Metasmoke.get('/api/v2.0/reasons', params=payload).json() items.extend(response['items']) if not response['has_more']: break payload['page'] += 1 except AttributeError: return None return items # Some sniffy stuff @staticmethod def request_sender(method): def func(url, *args, ignore_down=False, **kwargs): if not GlobalVars.metasmoke_host or (GlobalVars.MSStatus.is_down() and not ignore_down): return None if 'timeout' not in kwargs: kwargs['timeout'] = 10.000 # Don't throttle by MS response = None # Should return None upon failure, if any try: response = method(GlobalVars.metasmoke_host + url, *args, **kwargs) except Exception: GlobalVars.MSStatus.failed() if ignore_down: # Means that this is a status ping Metasmoke.AutoSwitch.ping_failed() # No need to log here because it's re-raised raise # Maintain minimal difference to the original get/post methods else: GlobalVars.MSStatus.succeeded() if ignore_down: # Means that this is a status ping Metasmoke.AutoSwitch.ping_succeeded() return response return func get = request_sender.__func__(requests.get) post = request_sender.__func__(requests.post) @staticmethod def send_status_ping_and_verify_scanning_if_active(): def reboot_or_standby(action): error_message = "There's been no scan activity for {} status pings. Going to {}." \ .format(Metasmoke.status_pings_since_scan_activity, action) log('error', error_message) chatcommunicate.tell_rooms_with("debug", error_message) if action == "standby": GlobalVars.standby_mode = True # Let MS know immediately, to lessen potential wait time (e.g. if we fail to reboot). Metasmoke.send_status_ping() time.sleep(8) exit_mode(action) in_standby_mode = GlobalVars.standby_mode or GlobalVars.no_se_activity_scan if not in_standby_mode: # This is the active instance, so should be scanning. If it's not scanning, then report or go to standby. if GlobalVars.PostScanStat.get_stat() == Metasmoke.scan_stat_snapshot: # There's been no actvity since the last ping. Metasmoke.status_pings_since_scan_activity += 1 if Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REBOOT: # Assume something is very wrong. Report to debug rooms and go into standby mode. reboot_or_standby("reboot") elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_STANDBY: # Assume something is very wrong. Report to debug rooms and go into standby mode. reboot_or_standby("standby") elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REPORT: # Something might be wrong. Let people in debug rooms know. status_message = "There's been no scan activity for {} status pings. There may be a problem." \ .format(Metasmoke.status_pings_since_scan_activity) log('warning', status_message) chatcommunicate.tell_rooms_with("debug", status_message) else: Metasmoke.status_pings_since_scan_activity = 0 Metasmoke.scan_stat_snapshot = GlobalVars.PostScanStat.get_stat() Metasmoke.send_status_ping()
46.004354
120
0.571884
import json import requests import importlib from globalvars import GlobalVars import threading import websocket try: from collections.abc import Iterable except ImportError: from collections import Iterable from datetime import datetime, timedelta from glob import glob from regex import sub import sys import traceback import time import os import subprocess as sp import datahandling import parsing import apigetpost import spamhandling import classes import chatcommunicate from helpers import log, exit_mode, only_blacklists_changed, \ only_modules_changed, blacklist_integrity_check, reload_modules, log_exception from gitmanager import GitManager import findspam from socketscience import SocketScience import metasmoke_cache MS_WEBSOCKET_LONG_INTERVAL = 60 MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL = 5 MAX_FAILURES = 10 NO_ACTIVITY_PINGS_TO_REBOOT = 4 NO_ACTIVITY_PINGS_TO_STANDBY = 5 NO_ACTIVITY_PINGS_TO_REPORT = 3 class Metasmoke: status_pings_since_scan_activity = 0 scan_stat_snapshot = None class AutoSwitch: MAX_FAILURES = 10 MAX_SUCCESSES = 1 ping_failure_counter = 0 autoswitch_is_on = True rw_lock = threading.Lock() @staticmethod def ping_failed(): with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.ping_failure_counter < 0: Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.ping_failure_counter += 1 current_counter = Metasmoke.AutoSwitch.ping_failure_counter current_auto = Metasmoke.AutoSwitch.autoswitch_is_on if current_counter > Metasmoke.AutoSwitch.MAX_FAILURES and\ GlobalVars.MSStatus.is_up() and current_auto: log("warning", "Last {} connection(s) to metasmoke failed".format(current_counter) + " Setting metasmoke status to down.") chatcommunicate.tell_rooms_with("debug", "**Warning**: {}: ".format(GlobalVars.location) + "Last {} connection(s) to metasmoke".format(current_counter) + " failed. Setting metasmoke status to **down**.") Metasmoke.set_ms_down(tell=False) @staticmethod def ping_succeeded(): with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.ping_failure_counter > 0: Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.ping_failure_counter -= 1 current_counter = -Metasmoke.AutoSwitch.ping_failure_counter current_auto = Metasmoke.AutoSwitch.autoswitch_is_on if current_counter > Metasmoke.AutoSwitch.MAX_SUCCESSES and\ GlobalVars.MSStatus.is_down() and current_auto: log("warning", "Last {} connection(s) to metasmoke succeeded".format(current_counter) + " Setting metasmoke status to up.") chatcommunicate.tell_rooms_with("debug", "**Notice**: {}: ".format(GlobalVars.location) + "Last {} connection(s) to metasmoke".format(current_counter) + " succeeded. Setting metasmoke status to **up**.") Metasmoke.set_ms_up(tell=False) @staticmethod def enable_autoswitch(to_enable): switch_auto_msg = "" with Metasmoke.AutoSwitch.rw_lock: if Metasmoke.AutoSwitch.autoswitch_is_on is not to_enable: # Log and post chat message only if there really is a change. switch_auto_msg = "Metasmoke status autoswitch is now {}abled.".format("en" if to_enable else "dis") Metasmoke.AutoSwitch.autoswitch_is_on = to_enable if switch_auto_msg: log("info", switch_auto_msg) chatcommunicate.tell_rooms_with("debug", switch_auto_msg) @staticmethod def get_ping_failure(): with Metasmoke.AutoSwitch.rw_lock: return Metasmoke.AutoSwitch.ping_failure_counter @staticmethod def reset_switch(): with Metasmoke.AutoSwitch.rw_lock: Metasmoke.AutoSwitch.ping_failure_counter = 0 Metasmoke.AutoSwitch.autoswitch_is_on = True @staticmethod def set_ms_up(tell=True): # We must first set metasmoke to up, then say that metasmoke is up, not the other way around. ms_msg = "" if GlobalVars.MSStatus.is_down(): ms_msg = "Metasmoke status: set to up." GlobalVars.MSStatus.set_up() if ms_msg: log("info", ms_msg) if tell: chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg)) @staticmethod def set_ms_down(tell=True): ms_msg = "" if GlobalVars.MSStatus.is_up(): ms_msg = "Metasmoke status: set to down." GlobalVars.MSStatus.set_down() if ms_msg: log("info", ms_msg) if tell: chatcommunicate.tell_rooms_with("debug", "{}: {}".format(GlobalVars.location, ms_msg)) @staticmethod def connect_websocket(): GlobalVars.metasmoke_ws = websocket.create_connection(GlobalVars.metasmoke_ws_host, origin=GlobalVars.metasmoke_host) payload = json.dumps({"command": "subscribe", "identifier": "{\"channel\":\"SmokeDetectorChannel\"," "\"key\":\"" + GlobalVars.metasmoke_key + "\"}"}) GlobalVars.metasmoke_ws.send(payload) GlobalVars.metasmoke_ws.settimeout(10) @staticmethod def init_websocket(): has_succeeded = False failed_connection_attempts = 0 while GlobalVars.metasmoke_key and GlobalVars.metasmoke_ws_host: try: Metasmoke.connect_websocket() has_succeeded = True while True: a = GlobalVars.metasmoke_ws.recv() try: data = json.loads(a) Metasmoke.handle_websocket_data(data) GlobalVars.MSStatus.succeeded() failed_connection_attempts = 0 except ConnectionError: raise except Exception as e: log('error', '{}: {}'.format(type(e).__name__, e)) log_exception(*sys.exc_info()) GlobalVars.MSStatus.failed() Metasmoke.connect_websocket() except Exception: GlobalVars.MSStatus.failed() log('error', "Couldn't bind to MS websocket") if not has_succeeded: failed_connection_attempts += 1 if failed_connection_attempts == MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL: chatcommunicate.tell_rooms_with("debug", "Cannot initiate MS websocket." " Continuing to retry at longer intervals.") log('warning', "Cannot initiate MS websocket." " Continuing to retry at longer intervals.") if failed_connection_attempts >= MAX_MS_WEBSOCKET_RETRIES_TO_LONG_INTERVAL: time.sleep(MS_WEBSOCKET_LONG_INTERVAL) else: time.sleep(10) else: time.sleep(10) @staticmethod def handle_websocket_data(data): if "message" not in data: if "type" in data and data['type'] == "reject_subscription": log('error', "MS WebSocket subscription was rejected. Check your MS key.") raise ConnectionError("MS WebSocket connection rejected") return message = data['message'] if not isinstance(message, Iterable): return if "message" in message: from_ms = message['message'] if (from_ms.startswith("[ [charcoal-se.github.io](https://github.com/Charcoal-SE/charcoal-se.github.io) ]" " continuous-integration/travis-ci/push")): from_ms = from_ms.replace(": ", ", or the [SD wiki](//git.io/vyDZv)" " ([history](//github.com/Charcoal-SE/SmokeDetector/wiki/_history)): ", 1) from_ms = from_ms.replace("https:", "") chatcommunicate.tell_rooms_with("metasmoke", from_ms) elif "autoflag_fp" in message: event = message["autoflag_fp"] chatcommunicate.tell_rooms(event["message"], ("debug", "site-" + event["site"]), ("no-site-" + event["site"],), notify_site="/autoflag_fp") elif "exit" in message: os._exit(message["exit"]) elif "blacklist" in message: ids = (message['blacklist']['uid'], message['blacklist']['site']) datahandling.add_blacklisted_user(ids, "metasmoke", message['blacklist']['post']) datahandling.last_feedbacked = (ids, time.time() + 60) elif "unblacklist" in message: ids = (message['unblacklist']['uid'], message['unblacklist']['site']) datahandling.remove_blacklisted_user(ids) elif "naa" in message: post_site_id = parsing.fetch_post_id_and_site_from_url(message["naa"]["post_link"]) datahandling.add_ignored_post(post_site_id[0:2]) elif "fp" in message: post_site_id = parsing.fetch_post_id_and_site_from_url(message["fp"]["post_link"]) datahandling.add_false_positive(post_site_id[0:2]) elif "report" in message: import chatcommands chatcommands.report_posts([message["report"]["post_link"]], message["report"]["user"], True, "the metasmoke API") elif "deploy_updated" in message: return sha = message["deploy_updated"]["head_commit"]["id"] if sha != os.popen('git log -1 --pretty="%H"').read(): if "autopull" in message["deploy_updated"]["head_commit"]["message"]: if only_blacklists_changed(GitManager.get_remote_diff()): commit_md = "[`{0}`](https://github.com/{1}/commit/{0})" \ .format(sha[:7], GlobalVars.bot_repo_slug) integrity = blacklist_integrity_check() if len(integrity) == 0: GitManager.pull_remote() findspam.reload_blacklists() chatcommunicate.tell_rooms_with("debug", "No code modified in {0}, only blacklists" " reloaded.".format(commit_md)) else: integrity.append("please fix before pulling.") chatcommunicate.tell_rooms_with("debug", ", ".join(integrity)) elif "commit_status" in message: c = message["commit_status"] sha = c["commit_sha"][:7] recent_commits = sp.check_output(["git", "log", "-50", "--pretty=%H"]).decode('utf-8').strip().split('\n') if c["commit_sha"] in recent_commits: return if c["status"] == "success": if "autopull" in c["commit_message"] or c["commit_message"].startswith("!") or \ c["commit_message"].startswith("Auto "): s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/" \ "commit/{commit_sha}) succeeded. Message contains 'autopull', pulling...".format( ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) remote_diff = GitManager.get_remote_diff() if only_blacklists_changed(remote_diff): GitManager.pull_remote() if not GlobalVars.on_branch: log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True) exit_mode("checkout_deploy") GlobalVars.reload() findspam.FindSpam.reload_blacklists() chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_blacklists) elif only_modules_changed(remote_diff): GitManager.pull_remote() if not GlobalVars.on_branch: log('warning', "Pulling remote with HEAD detached, checkout deploy", f=True) exit_mode("checkout_deploy") GlobalVars.reload() reload_modules() chatcommunicate.tell_rooms_with('debug', GlobalVars.s_norestart_findspam) else: chatcommunicate.tell_rooms_with('debug', s, notify_site="/ci") exit_mode("pull_update") else: s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \ "succeeded.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci") elif c["status"] == "failure": s = "[CI]({ci_link}) on [`{commit_sha}`](https://github.com/{repo}/commit/{commit_sha}) " \ "failed.".format(ci_link=c["ci_url"], repo=GlobalVars.bot_repo_slug, commit_sha=sha) chatcommunicate.tell_rooms_with("debug", s, notify_site="/ci") elif "everything_is_broken" in message: if message["everything_is_broken"] is True: exit_mode("shutdown") elif "domain_whitelist" in message: if message["domain_whitelist"] == "refresh": metasmoke_cache.MetasmokeCache.delete('whitelisted-domains') @staticmethod def send_stats_on_post(title, link, reasons, body, markdown, username, user_link, why, owner_rep, post_score, up_vote_count, down_vote_count): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send stats but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke down, not sending stats.") return metasmoke_key = GlobalVars.metasmoke_key try: if len(why) > 4096: why = why[:2048] + ' ... ' + why[-2043:] post = {'title': title, 'link': link, 'reasons': reasons, 'body': body, 'markdown': markdown, 'username': username, 'user_link': user_link, 'why': why, 'user_reputation': owner_rep, 'score': post_score, 'upvote_count': up_vote_count, 'downvote_count': down_vote_count} post = {k: v for k, v in post.items() if v} payload = {'post': post, 'key': metasmoke_key} headers = {'Content-type': 'application/json'} Metasmoke.post("/posts.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_feedback_for_post(post_link, feedback_type, user_name, user_id, chat_host): if GlobalVars.metasmoke_host is None: log('info', 'Received chat feedback but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending feedback") return metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'feedback': { 'user_name': user_name, 'chat_user_id': user_id, 'chat_host': chat_host, 'feedback_type': feedback_type, 'post_link': post_link }, 'key': metasmoke_key } headers = {'Content-type': 'application/json'} Metasmoke.post("/feedbacks.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_deletion_stats_for_post(post_link, is_deleted): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send deletion data but metasmoke_host is undefined. Ignoring.') return elif GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending deletion stats") return metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'deletion_log': { 'is_deleted': is_deleted, 'post_link': post_link }, 'key': metasmoke_key } headers = {'Content-type': 'application/json'} Metasmoke.post("/deletion_logs.json", data=json.dumps(payload), headers=headers) except Exception as e: log('error', e) @staticmethod def send_status_ping(): if GlobalVars.metasmoke_host is None: log('info', 'Attempted to send status ping but metasmoke_host is undefined. Not sent.') return elif GlobalVars.MSStatus.is_down(): payload = { "location": GlobalVars.location, "timestamp": time.time() } SocketScience.send(payload) metasmoke_key = GlobalVars.metasmoke_key try: payload = { 'location': GlobalVars.location, 'key': metasmoke_key, 'standby': GlobalVars.standby_mode or GlobalVars.no_se_activity_scan } headers = {'content-type': 'application/json'} response = Metasmoke.post("/status-update.json", data=json.dumps(payload), headers=headers, ignore_down=True) try: response = response.json() if response.get('pull_update', False): log('info', "Received pull command from MS ping response") exit_mode("pull_update") if ('failover' in response and GlobalVars.standby_mode and not GlobalVars.no_se_activity_scan): if response['failover']: GlobalVars.standby_mode = False chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " received failover signal.", notify_site="/failover") if response.get('standby', False): chatcommunicate.tell_rooms_with("debug", GlobalVars.location + " entering metasmoke-forced standby.") time.sleep(2) exit_mode("standby") if response.get('shutdown', False): exit_mode("shutdown") except Exception: pass except Exception as e: log('error', e) @staticmethod def update_code_privileged_users_list(): if GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, can't update blacklist manager privilege list") return payload = {'key': GlobalVars.metasmoke_key} headers = {'Content-type': 'application/json'} try: response = Metasmoke.get("/api/users/code_privileged", data=json.dumps(payload), headers=headers).json()['items'] except Exception as e: log('error', e) return GlobalVars.code_privileged_users = set() for id in response["stackexchange_chat_ids"]: GlobalVars.code_privileged_users.add(("stackexchange.com", id)) for id in response["meta_stackexchange_chat_ids"]: GlobalVars.code_privileged_users.add(("meta.stackexchange.com", id)) for id in response["stackoverflow_chat_ids"]: GlobalVars.code_privileged_users.add(("stackoverflow.com", id)) @staticmethod def determine_if_autoflagged(post_url): payload = { 'key': GlobalVars.metasmoke_key, 'filter': 'GFGJGHFMHGOLMMJMJJJGHIGOMKFKKILF', # id and autoflagged 'urls': post_url } try: response = Metasmoke.get("/api/v2.0/posts/urls", params=payload).json() except Exception as e: log('error', e) return False, [] # The first report of a URL is the only one that will be autoflagged. MS responses to the # /posts/urls endpoint have the oldest report last. if len(response["items"]) > 0 and response["items"][-1]["autoflagged"]: # get flagger names id = str(response["items"][-1]["id"]) payload = {'key': GlobalVars.metasmoke_key} flags = Metasmoke.get("/api/v2.0/posts/" + id + "/flags", params=payload).json() if len(flags["items"]) > 0: return True, [user["username"] for user in flags["items"][0]["autoflagged"]["users"]] return False, [] @staticmethod def stop_autoflagging(): payload = {'key': GlobalVars.metasmoke_key} headers = {'Content-type': 'application/json'} Metasmoke.post("/flagging/smokey_disable", data=json.dumps(payload), headers=headers) @staticmethod def send_statistics(): if GlobalVars.MSStatus.is_down(): log('warning', "Metasmoke is down, not sending statistics") return # Get current apiquota from globalvars GlobalVars.apiquota_rw_lock.acquire() current_apiquota = GlobalVars.apiquota GlobalVars.apiquota_rw_lock.release() posts_scanned, scan_time, posts_per_second = GlobalVars.PostScanStat.get_stat() payload = {'key': GlobalVars.metasmoke_key, 'statistic': {'posts_scanned': posts_scanned, 'api_quota': current_apiquota}} if posts_per_second: # Send scan rate as well, if applicable. payload['statistic']['post_scan_rate'] = posts_per_second GlobalVars.PostScanStat.reset_stat() headers = {'Content-type': 'application/json'} if GlobalVars.metasmoke_host is not None: log('info', 'Sent statistics to metasmoke: ', payload['statistic']) Metasmoke.post("/statistics.json", data=json.dumps(payload), headers=headers) @staticmethod def post_auto_comment(msg, user, url=None, ids=None): if not GlobalVars.metasmoke_key: return response = None if url is not None: params = {"key": GlobalVars.metasmoke_key, "urls": url, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"} response = Metasmoke.get("/api/v2.0/posts/urls", params=params).json() elif ids is not None: post_id, site = ids site = parsing.api_parameter_from_link(site) params = {"key": GlobalVars.metasmoke_key, "filter": "GFGJGHFJNFGNHKNIKHGGOMILHKLJIFFN"} try: response = Metasmoke.get("/api/v2.0/posts/uid/{}/{}".format(site, post_id), params=params).json() except AttributeError: response = None if response and "items" in response and len(response["items"]) > 0: ms_id = response["items"][0]["id"] params = {"key": GlobalVars.metasmoke_key, "text": msg[:1].upper() + msg[1:], # Capitalise the first letter of the comment "chat_user_id": user.id, "chat_host": user._client.host} Metasmoke.post("/api/v2.0/comments/post/{}".format(ms_id), params=params) @staticmethod def get_post_bodies_from_ms(post_url): if not GlobalVars.metasmoke_key: return None payload = { 'key': GlobalVars.metasmoke_key, 'filter': 'GHOOIJGNLKHIIOIKGILKIJGHFMNKKGFJ', # posts.id, posts.body, posts.created_at 'urls': parsing.to_protocol_relative(post_url) } try: response = Metasmoke.get('/api/v2.0/posts/urls', params=payload).json() except AttributeError: return None except Exception as e: log('error', '{}: {}'.format(type(e).__name__, e)) log_exception(*sys.exc_info()) exception_only = ''.join(traceback.format_exception_only(type(e), e)).strip() chatcommunicate.tell_rooms_with("debug", "{}: In getting MS post information, recovered from `{}`" .format(GlobalVars.location, exception_only)) return None return response['items'] @staticmethod def get_reason_weights(): if not GlobalVars.metasmoke_key: return None payload = { 'key': GlobalVars.metasmoke_key, 'per_page': 100, 'page': 1, } items = [] try: while True: response = Metasmoke.get('/api/v2.0/reasons', params=payload).json() items.extend(response['items']) if not response['has_more']: break payload['page'] += 1 except AttributeError: return None return items # Some sniffy stuff @staticmethod def request_sender(method): def func(url, *args, ignore_down=False, **kwargs): if not GlobalVars.metasmoke_host or (GlobalVars.MSStatus.is_down() and not ignore_down): return None if 'timeout' not in kwargs: kwargs['timeout'] = 10.000 # Don't throttle by MS response = None try: response = method(GlobalVars.metasmoke_host + url, *args, **kwargs) except Exception: GlobalVars.MSStatus.failed() if ignore_down: Metasmoke.AutoSwitch.ping_failed() raise # Maintain minimal difference to the original get/post methods else: GlobalVars.MSStatus.succeeded() if ignore_down: # Means that this is a status ping Metasmoke.AutoSwitch.ping_succeeded() return response return func get = request_sender.__func__(requests.get) post = request_sender.__func__(requests.post) @staticmethod def send_status_ping_and_verify_scanning_if_active(): def reboot_or_standby(action): error_message = "There's been no scan activity for {} status pings. Going to {}." \ .format(Metasmoke.status_pings_since_scan_activity, action) log('error', error_message) chatcommunicate.tell_rooms_with("debug", error_message) if action == "standby": GlobalVars.standby_mode = True Metasmoke.send_status_ping() time.sleep(8) exit_mode(action) in_standby_mode = GlobalVars.standby_mode or GlobalVars.no_se_activity_scan if not in_standby_mode: if GlobalVars.PostScanStat.get_stat() == Metasmoke.scan_stat_snapshot: # There's been no actvity since the last ping. Metasmoke.status_pings_since_scan_activity += 1 if Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REBOOT: reboot_or_standby("reboot") elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_STANDBY: reboot_or_standby("standby") elif Metasmoke.status_pings_since_scan_activity >= NO_ACTIVITY_PINGS_TO_REPORT: status_message = "There's been no scan activity for {} status pings. There may be a problem." \ .format(Metasmoke.status_pings_since_scan_activity) log('warning', status_message) chatcommunicate.tell_rooms_with("debug", status_message) else: Metasmoke.status_pings_since_scan_activity = 0 Metasmoke.scan_stat_snapshot = GlobalVars.PostScanStat.get_stat() Metasmoke.send_status_ping()
true
true
f713581c9c1bd2636d6a37b696cabacf9b938f7f
1,557
py
Python
gbdxtools/images/dem_image.py
mail4y/gbdxtools
62111e1db65a35e39c8bbda040cd63272aac714f
[ "MIT" ]
null
null
null
gbdxtools/images/dem_image.py
mail4y/gbdxtools
62111e1db65a35e39c8bbda040cd63272aac714f
[ "MIT" ]
null
null
null
gbdxtools/images/dem_image.py
mail4y/gbdxtools
62111e1db65a35e39c8bbda040cd63272aac714f
[ "MIT" ]
null
null
null
from gbdxtools.images.base import RDABaseImage from gbdxtools.images.drivers import RDADaskImageDriver from gbdxtools.rda.util import reproject_params from gbdxtools.rda.interface import RDA rda = RDA() from shapely.geometry import box class DemDriver(RDADaskImageDriver): image_option_support = ["proj", "bbox"] __image_option_defaults__ = {"bbox": None} class DemImage(RDABaseImage): ''' Image class for Digital Elevation Model (DEM) data from the NED/SRTM dataset. This class has no Catalog IDs and is created by passing an AOI. It shares most of the same methods as CatalogImage objects. Args: aoi (list): list of coordinate in BBOX format proj (str): (optional) EPSG string of projection reproject to. Native projection is "EPSG:4326" (WGS84) Example: >>> dem = DemImage(aoi=[5.279, 60.358, 5.402, 60.419])''' __Driver__ = DemDriver __rda_id__ = "dgdem-v20180406-DEFLATED-ca4649c5acb" def __post_new_hook__(self, **kwargs): self = self.aoi(**kwargs) if self.rda.metadata["image"]["minX"] == -1: return self[:, :, 1:-1] return self @classmethod def _build_graph(cls, aoi, proj="EPSG:4326", **kwargs): wkt = box(*aoi).wkt dem = rda.GeospatialCrop(rda.IdahoRead(bucketName="idaho-dems-2018", imageId="dgdem-v20180406-DEFLATED-ca4649c5acb", objectStore="S3"), geospatialWKT=str(wkt)) if proj is not "EPSG:4326": dem = rda.Reproject(dem, **reproject_params(proj)) return dem
37.071429
167
0.674374
from gbdxtools.images.base import RDABaseImage from gbdxtools.images.drivers import RDADaskImageDriver from gbdxtools.rda.util import reproject_params from gbdxtools.rda.interface import RDA rda = RDA() from shapely.geometry import box class DemDriver(RDADaskImageDriver): image_option_support = ["proj", "bbox"] __image_option_defaults__ = {"bbox": None} class DemImage(RDABaseImage): __Driver__ = DemDriver __rda_id__ = "dgdem-v20180406-DEFLATED-ca4649c5acb" def __post_new_hook__(self, **kwargs): self = self.aoi(**kwargs) if self.rda.metadata["image"]["minX"] == -1: return self[:, :, 1:-1] return self @classmethod def _build_graph(cls, aoi, proj="EPSG:4326", **kwargs): wkt = box(*aoi).wkt dem = rda.GeospatialCrop(rda.IdahoRead(bucketName="idaho-dems-2018", imageId="dgdem-v20180406-DEFLATED-ca4649c5acb", objectStore="S3"), geospatialWKT=str(wkt)) if proj is not "EPSG:4326": dem = rda.Reproject(dem, **reproject_params(proj)) return dem
true
true
f7135845eae99a991a5c9495ba7a1ee36382feef
3,219
py
Python
accelbyte_py_sdk/api/lobby/models/model_create_topic_request.py
encyphered/accelbyte-python-sdk
09c1e989d7251de308150fdcd3119d662ca2d205
[ "MIT" ]
null
null
null
accelbyte_py_sdk/api/lobby/models/model_create_topic_request.py
encyphered/accelbyte-python-sdk
09c1e989d7251de308150fdcd3119d662ca2d205
[ "MIT" ]
null
null
null
accelbyte_py_sdk/api/lobby/models/model_create_topic_request.py
encyphered/accelbyte-python-sdk
09c1e989d7251de308150fdcd3119d662ca2d205
[ "MIT" ]
null
null
null
# Auto-generated at 2021-09-27T17:12:33.419763+08:00 # from: Justice Lobby Service (1.33.0) # Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved. # This is licensed software from AccelByte Inc, for limitations # and restrictions contact your company contract manager. # pylint: disable=duplicate-code # pylint: disable=line-too-long # pylint: disable=missing-function-docstring # pylint: disable=missing-module-docstring # pylint: disable=too-many-arguments # pylint: disable=too-many-branches # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-lines # pylint: disable=too-many-locals # pylint: disable=too-many-public-methods # pylint: disable=too-many-return-statements # pylint: disable=too-many-statements # pylint: disable=unused-import from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple, Union from ....core import Model class ModelCreateTopicRequest(Model): """Model create topic request Properties: description: (description) REQUIRED str topic: (topic) REQUIRED str """ # region fields description: str # REQUIRED topic: str # REQUIRED # endregion fields # region with_x methods def with_description(self, value: str) -> ModelCreateTopicRequest: self.description = value return self def with_topic(self, value: str) -> ModelCreateTopicRequest: self.topic = value return self # endregion with_x methods # region to methods def to_dict(self, include_empty: bool = False) -> dict: result = {} if hasattr(self, "description") and self.description: result["description"] = str(self.description) elif include_empty: result["description"] = str() if hasattr(self, "topic") and self.topic: result["topic"] = str(self.topic) elif include_empty: result["topic"] = str() return result # endregion to methods # region static methods @classmethod def create( cls, description: str, topic: str, ) -> ModelCreateTopicRequest: instance = cls() instance.description = description instance.topic = topic return instance @classmethod def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelCreateTopicRequest: instance = cls() if not dict_: return instance if "description" in dict_ and dict_["description"] is not None: instance.description = str(dict_["description"]) elif include_empty: instance.description = str() if "topic" in dict_ and dict_["topic"] is not None: instance.topic = str(dict_["topic"]) elif include_empty: instance.topic = str() return instance @staticmethod def get_field_info() -> Dict[str, str]: return { "description": "description", "topic": "topic", } # endregion static methods
29.805556
109
0.617272
from __future__ import annotations from typing import Any, Dict, List, Optional, Tuple, Union from ....core import Model class ModelCreateTopicRequest(Model): description: str topic: str def with_description(self, value: str) -> ModelCreateTopicRequest: self.description = value return self def with_topic(self, value: str) -> ModelCreateTopicRequest: self.topic = value return self def to_dict(self, include_empty: bool = False) -> dict: result = {} if hasattr(self, "description") and self.description: result["description"] = str(self.description) elif include_empty: result["description"] = str() if hasattr(self, "topic") and self.topic: result["topic"] = str(self.topic) elif include_empty: result["topic"] = str() return result @classmethod def create( cls, description: str, topic: str, ) -> ModelCreateTopicRequest: instance = cls() instance.description = description instance.topic = topic return instance @classmethod def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelCreateTopicRequest: instance = cls() if not dict_: return instance if "description" in dict_ and dict_["description"] is not None: instance.description = str(dict_["description"]) elif include_empty: instance.description = str() if "topic" in dict_ and dict_["topic"] is not None: instance.topic = str(dict_["topic"]) elif include_empty: instance.topic = str() return instance @staticmethod def get_field_info() -> Dict[str, str]: return { "description": "description", "topic": "topic", }
true
true
f71358df074d5103676d37572a4ddb4db93aa59a
2,659
py
Python
compare.py
smarsu/facenet
a0fa3ffe32e295b4cc980a4a178593cc7f1bad12
[ "MIT" ]
null
null
null
compare.py
smarsu/facenet
a0fa3ffe32e295b4cc980a4a178593cc7f1bad12
[ "MIT" ]
null
null
null
compare.py
smarsu/facenet
a0fa3ffe32e295b4cc980a4a178593cc7f1bad12
[ "MIT" ]
null
null
null
# -------------------------------------------------------- # SMNet FaceNet # Licensed under The MIT License [see LICENSE for details] # Copyright 2019 smarsu. All Rights Reserved. # -------------------------------------------------------- import os.path as osp import numpy as np from sklearn import metrics import matplotlib.pyplot as plt from euclidean import euclidean_distance EPS = 1e-12 def load_feature_map_from_txt(path_txt): """""" with open(path_txt, 'r') as fb: lines = fb.readlines() feature_map = {} for line in lines: line = line.strip().split() name = line[0] feature = [float(v) for v in line[1:]] feature_map[name] = np.array(feature, dtype=np.float64) return feature_map def load_pairs(pair_path): with open(pair_path, 'r') as fb: lfw_root = '/datasets/lfw_detected' lines = fb.readlines() pairs = [] for line in lines: fst, snd, match = line.strip().split() fst = osp.join(lfw_root, fst) snd = osp.join(lfw_root, snd) pairs.append([fst, snd, int(match)]) return pairs def l2_norm(x): """ Args: x: ndarray, [n, feature_len] """ x = np.array(x, dtype=np.float64) return x / (np.sqrt(np.sum(np.square(x), axis=-1, keepdims=True)) + EPS) def cosine_similarity(a, b): """ Args: a: ndarray, [feature_len] b: ndarray, [feature_len] """ a = np.array(a, dtype=np.float64) b = np.array(b, dtype=np.float64) return np.sum(a * b) def auc(scores, labels): """""" return metrics.roc_auc_score(labels, scores) def roc(scores, labels): """""" fpr, tpr, thresholds = metrics.roc_curve(labels, scores) plt.plot(fpr, tpr) plt.savefig('roc.png') def main(): feature_map = load_feature_map_from_txt('features.txt') feature_map = {k: l2_norm(v) for k, v in feature_map.items()} pairs = load_pairs('parsed_pair.txt') scores = [] labels = [] for fst, snd, match in pairs: labels.append(match) if fst not in feature_map: scores.append(1) print('WARNING: not found', fst) continue elif snd not in feature_map: scores.append(1) print('WARNING: not found', snd) continue score = 2 - euclidean_distance(feature_map[fst], feature_map[snd]) scores.append(score) print(scores) print(labels) print(min(scores)) print(max(scores)) print(auc(scores, labels)) roc(scores, labels) if __name__ == '__main__': main()
25.084906
76
0.563746
import os.path as osp import numpy as np from sklearn import metrics import matplotlib.pyplot as plt from euclidean import euclidean_distance EPS = 1e-12 def load_feature_map_from_txt(path_txt): with open(path_txt, 'r') as fb: lines = fb.readlines() feature_map = {} for line in lines: line = line.strip().split() name = line[0] feature = [float(v) for v in line[1:]] feature_map[name] = np.array(feature, dtype=np.float64) return feature_map def load_pairs(pair_path): with open(pair_path, 'r') as fb: lfw_root = '/datasets/lfw_detected' lines = fb.readlines() pairs = [] for line in lines: fst, snd, match = line.strip().split() fst = osp.join(lfw_root, fst) snd = osp.join(lfw_root, snd) pairs.append([fst, snd, int(match)]) return pairs def l2_norm(x): x = np.array(x, dtype=np.float64) return x / (np.sqrt(np.sum(np.square(x), axis=-1, keepdims=True)) + EPS) def cosine_similarity(a, b): a = np.array(a, dtype=np.float64) b = np.array(b, dtype=np.float64) return np.sum(a * b) def auc(scores, labels): return metrics.roc_auc_score(labels, scores) def roc(scores, labels): fpr, tpr, thresholds = metrics.roc_curve(labels, scores) plt.plot(fpr, tpr) plt.savefig('roc.png') def main(): feature_map = load_feature_map_from_txt('features.txt') feature_map = {k: l2_norm(v) for k, v in feature_map.items()} pairs = load_pairs('parsed_pair.txt') scores = [] labels = [] for fst, snd, match in pairs: labels.append(match) if fst not in feature_map: scores.append(1) print('WARNING: not found', fst) continue elif snd not in feature_map: scores.append(1) print('WARNING: not found', snd) continue score = 2 - euclidean_distance(feature_map[fst], feature_map[snd]) scores.append(score) print(scores) print(labels) print(min(scores)) print(max(scores)) print(auc(scores, labels)) roc(scores, labels) if __name__ == '__main__': main()
true
true
f7135937417269430f1b42738050726405fe330a
21,924
py
Python
python35/Lib/site-packages/sklearn/linear_model/base.py
Ombarus/python_env
4615976a51aa4f02206f5e03fc091b088d3273fc
[ "Apache-2.0" ]
null
null
null
python35/Lib/site-packages/sklearn/linear_model/base.py
Ombarus/python_env
4615976a51aa4f02206f5e03fc091b088d3273fc
[ "Apache-2.0" ]
null
null
null
python35/Lib/site-packages/sklearn/linear_model/base.py
Ombarus/python_env
4615976a51aa4f02206f5e03fc091b088d3273fc
[ "Apache-2.0" ]
1
2020-08-08T12:44:48.000Z
2020-08-08T12:44:48.000Z
""" Generalized Linear models. """ # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Vincent Michel <vincent.michel@inria.fr> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck # Maryan Morel <maryan.morel@polytechnique.edu> # Giorgio Patrini <giorgio.patrini@anu.edu.au> # License: BSD 3 clause from __future__ import division from abc import ABCMeta, abstractmethod import numbers import warnings import numpy as np import scipy.sparse as sp from scipy import linalg from scipy import sparse from ..externals import six from ..externals.joblib import Parallel, delayed from ..base import BaseEstimator, ClassifierMixin, RegressorMixin from ..utils import check_array, check_X_y, deprecated, as_float_array from ..utils.validation import FLOAT_DTYPES from ..utils import check_random_state from ..utils.extmath import safe_sparse_dot from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale from ..utils.fixes import sparse_lsqr from ..utils.seq_dataset import ArrayDataset, CSRDataset from ..utils.validation import check_is_fitted from ..exceptions import NotFittedError from ..preprocessing.data import normalize as f_normalize # TODO: bayesian_ridge_regression and bayesian_regression_ard # should be squashed into its respective objects. SPARSE_INTERCEPT_DECAY = 0.01 # For sparse data intercept updates are scaled by this decay factor to avoid # intercept oscillation. def make_dataset(X, y, sample_weight, random_state=None): """Create ``Dataset`` abstraction for sparse and dense inputs. This also returns the ``intercept_decay`` which is different for sparse datasets. """ rng = check_random_state(random_state) # seed should never be 0 in SequentialDataset seed = rng.randint(1, np.iinfo(np.int32).max) if sp.issparse(X): dataset = CSRDataset(X.data, X.indptr, X.indices, y, sample_weight, seed=seed) intercept_decay = SPARSE_INTERCEPT_DECAY else: dataset = ArrayDataset(X, y, sample_weight, seed=seed) intercept_decay = 1.0 return dataset, intercept_decay @deprecated("sparse_center_data was deprecated in version 0.18 and will be " "removed in 0.20. Use utilities in preprocessing.data instead") def sparse_center_data(X, y, fit_intercept, normalize=False): """ Compute information needed to center data to have mean zero along axis 0. Be aware that X will not be centered since it would break the sparsity, but will be normalized if asked so. """ if fit_intercept: # we might require not to change the csr matrix sometimes # store a copy if normalize is True. # Change dtype to float64 since mean_variance_axis accepts # it that way. if sp.isspmatrix(X) and X.getformat() == 'csr': X = sp.csr_matrix(X, copy=normalize, dtype=np.float64) else: X = sp.csc_matrix(X, copy=normalize, dtype=np.float64) X_offset, X_var = mean_variance_axis(X, axis=0) if normalize: # transform variance to std in-place X_var *= X.shape[0] X_std = np.sqrt(X_var, X_var) del X_var X_std[X_std == 0] = 1 inplace_column_scale(X, 1. / X_std) else: X_std = np.ones(X.shape[1]) y_offset = y.mean(axis=0) y = y - y_offset else: X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_std @deprecated("center_data was deprecated in version 0.18 and will be removed in " "0.20. Use utilities in preprocessing.data instead") def center_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None): """ Centers data to have mean zero along axis 0. This is here because nearly all linear models will want their data to be centered. If sample_weight is not None, then the weighted mean of X and y is zero, and not the mean itself """ X = as_float_array(X, copy) if fit_intercept: if isinstance(sample_weight, numbers.Number): sample_weight = None if sp.issparse(X): X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) else: X_offset = np.average(X, axis=0, weights=sample_weight) X -= X_offset # XXX: currently scaled to variance=n_samples if normalize: X_std = np.sqrt(np.sum(X ** 2, axis=0)) X_std[X_std == 0] = 1 X /= X_std else: X_std = np.ones(X.shape[1]) y_offset = np.average(y, axis=0, weights=sample_weight) y = y - y_offset else: X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_std def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None, return_mean=False): """ Centers data to have mean zero along axis 0. If fit_intercept=False or if the X is a sparse matrix, no centering is done, but normalization can still be applied. The function returns the statistics necessary to reconstruct the input data, which are X_offset, y_offset, X_scale, such that the output X = (X - X_offset) / X_scale X_scale is the L2 norm of X - X_offset. If sample_weight is not None, then the weighted mean of X and y is zero, and not the mean itself. If return_mean=True, the mean, eventually weighted, is returned, independently of whether X was centered (option used for optimization with sparse data in coordinate_descend). This is here because nearly all linear models will want their data to be centered. """ if isinstance(sample_weight, numbers.Number): sample_weight = None X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'], dtype=FLOAT_DTYPES) if fit_intercept: if sp.issparse(X): X_offset, X_var = mean_variance_axis(X, axis=0) if not return_mean: X_offset = np.zeros(X.shape[1]) if normalize: # TODO: f_normalize could be used here as well but the function # inplace_csr_row_normalize_l2 must be changed such that it # can return also the norms computed internally # transform variance to norm in-place X_var *= X.shape[0] X_scale = np.sqrt(X_var, X_var) del X_var X_scale[X_scale == 0] = 1 inplace_column_scale(X, 1. / X_scale) else: X_scale = np.ones(X.shape[1]) else: X_offset = np.average(X, axis=0, weights=sample_weight) X -= X_offset if normalize: X, X_scale = f_normalize(X, axis=0, copy=False, return_norm=True) else: X_scale = np.ones(X.shape[1]) y_offset = np.average(y, axis=0, weights=sample_weight) y = y - y_offset else: X_offset = np.zeros(X.shape[1]) X_scale = np.ones(X.shape[1]) y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_scale # TODO: _rescale_data should be factored into _preprocess_data. # Currently, the fact that sag implements its own way to deal with # sample_weight makes the refactoring tricky. def _rescale_data(X, y, sample_weight): """Rescale data so as to support sample_weight""" n_samples = X.shape[0] sample_weight = sample_weight * np.ones(n_samples) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)): """Base class for Linear Models""" @abstractmethod def fit(self, X, y): """Fit model.""" @deprecated(" and will be removed in 0.19.") def decision_function(self, X): """Decision function of the linear model. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns ------- C : array, shape = (n_samples,) Returns predicted values. """ return self._decision_function(X) def _decision_function(self, X): check_is_fitted(self, "coef_") X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ def predict(self, X): """Predict using the linear model Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns ------- C : array, shape = (n_samples,) Returns predicted values. """ return self._decision_function(X) _preprocess_data = staticmethod(_preprocess_data) def _set_intercept(self, X_offset, y_offset, X_scale): """Set the intercept_ """ if self.fit_intercept: self.coef_ = self.coef_ / X_scale self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T) else: self.intercept_ = 0. # XXX Should this derive from LinearModel? It should be a mixin, not an ABC. # Maybe the n_features checking can be moved to LinearModel. class LinearClassifierMixin(ClassifierMixin): """Mixin for linear classifiers. Handles prediction for sparse and dense X. """ def decision_function(self, X): """Predict confidence scores for samples. The confidence score for a sample is the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix}, shape = (n_samples, n_features) Samples. Returns ------- array, shape=(n_samples,) if n_classes == 2 else (n_samples, n_classes) Confidence scores per (sample, class) combination. In the binary case, confidence score for self.classes_[1] where >0 means this class would be predicted. """ if not hasattr(self, 'coef_') or self.coef_ is None: raise NotFittedError("This %(name)s instance is not fitted " "yet" % {'name': type(self).__name__}) X = check_array(X, accept_sparse='csr') n_features = self.coef_.shape[1] if X.shape[1] != n_features: raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features)) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() if scores.shape[1] == 1 else scores def predict(self, X): """Predict class labels for samples in X. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Samples. Returns ------- C : array, shape = [n_samples] Predicted class label per sample. """ scores = self.decision_function(X) if len(scores.shape) == 1: indices = (scores > 0).astype(np.int) else: indices = scores.argmax(axis=1) return self.classes_[indices] def _predict_proba_lr(self, X): """Probability estimation for OvR logistic regression. Positive class probabilities are computed as 1. / (1. + np.exp(-self.decision_function(X))); multiclass is handled by normalizing that over all classes. """ prob = self.decision_function(X) prob *= -1 np.exp(prob, prob) prob += 1 np.reciprocal(prob, prob) if prob.ndim == 1: return np.vstack([1 - prob, prob]).T else: # OvR normalization, like LibLinear's predict_probability prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) return prob class SparseCoefMixin(object): """Mixin for converting coef_ to and from CSR format. L1-regularizing estimators should inherit this. """ def densify(self): """Convert coefficient matrix to dense array format. Converts the ``coef_`` member (back) to a numpy.ndarray. This is the default format of ``coef_`` and is required for fitting, so calling this method is only required on models that have previously been sparsified; otherwise, it is a no-op. Returns ------- self: estimator """ msg = "Estimator, %(name)s, must be fitted before densifying." check_is_fitted(self, "coef_", msg=msg) if sp.issparse(self.coef_): self.coef_ = self.coef_.toarray() return self def sparsify(self): """Convert coefficient matrix to sparse format. Converts the ``coef_`` member to a scipy.sparse matrix, which for L1-regularized models can be much more memory- and storage-efficient than the usual numpy.ndarray representation. The ``intercept_`` member is not converted. Notes ----- For non-sparse models, i.e. when there are not many zeros in ``coef_``, this may actually *increase* memory usage, so use this method with care. A rule of thumb is that the number of zero elements, which can be computed with ``(coef_ == 0).sum()``, must be more than 50% for this to provide significant benefits. After calling this method, further fitting with the partial_fit method (if any) will not work until you call densify. Returns ------- self: estimator """ msg = "Estimator, %(name)s, must be fitted before sparsifying." check_is_fitted(self, "coef_", msg=msg) self.coef_ = sp.csr_matrix(self.coef_) return self class LinearRegression(LinearModel, RegressorMixin): """ Ordinary least squares Linear Regression. Parameters ---------- fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. This parameter is ignored when `fit_intercept` is set to False. When the regressors are normalized, note that this makes the hyperparameters learnt more robust and almost independent of the number of samples. The same property is not valid for standardized data. However, if you wish to standardize, please use `preprocessing.StandardScaler` before calling `fit` on an estimator with `normalize=False`. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. n_jobs : int, optional, default 1 The number of jobs to use for the computation. If -1 all CPUs are used. This will only provide speedup for n_targets > 1 and sufficient large problems. Attributes ---------- coef_ : array, shape (n_features, ) or (n_targets, n_features) Estimated coefficients for the linear regression problem. If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features), while if only one target is passed, this is a 1D array of length n_features. residues_ : array, shape (n_targets,) or (1,) or empty Sum of residuals. Squared Euclidean 2-norm for each target passed during the fit. If the linear regression problem is under-determined (the number of linearly independent rows of the training matrix is less than its number of linearly independent columns), this is an empty array. If the target vector passed during the fit is 1-dimensional, this is a (1,) shape array. intercept_ : array Independent term in the linear model. Notes ----- From the implementation point of view, this is just plain Ordinary Least Squares (scipy.linalg.lstsq) wrapped as a predictor object. """ def __init__(self, fit_intercept=True, normalize=False, copy_X=True, n_jobs=1): self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.n_jobs = n_jobs @property @deprecated("``residues_`` is deprecated and will be removed in 0.19") def residues_(self): """Get the residues of the fitted model.""" return self._residues def fit(self, X, y, sample_weight=None): """ Fit linear model. Parameters ---------- X : numpy array or sparse matrix of shape [n_samples,n_features] Training data y : numpy array of shape [n_samples, n_targets] Target values sample_weight : numpy array of shape [n_samples] Individual weights for each sample .. versionadded:: 0.17 parameter *sample_weight* support to LinearRegression. Returns ------- self : returns an instance of self. """ n_jobs_ = self.n_jobs X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], y_numeric=True, multi_output=True) if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1: raise ValueError("Sample weights must be 1D array or scalar") X, y, X_offset, y_offset, X_scale = self._preprocess_data( X, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=self.copy_X, sample_weight=sample_weight) if sample_weight is not None: # Sample weight can be implemented via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) if sp.issparse(X): if y.ndim < 2: out = sparse_lsqr(X, y) self.coef_ = out[0] self._residues = out[3] else: # sparse_lstsq cannot handle y with shape (M, K) outs = Parallel(n_jobs=n_jobs_)( delayed(sparse_lsqr)(X, y[:, j].ravel()) for j in range(y.shape[1])) self.coef_ = np.vstack(out[0] for out in outs) self._residues = np.vstack(out[3] for out in outs) else: self.coef_, self._residues, self.rank_, self.singular_ = \ linalg.lstsq(X, y) self.coef_ = self.coef_.T if y.ndim == 1: self.coef_ = np.ravel(self.coef_) self._set_intercept(X_offset, y_offset, X_scale) return self def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy): """Aux function used at beginning of fit in linear models""" n_samples, n_features = X.shape if sparse.isspmatrix(X): precompute = False X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=fit_intercept, normalize=normalize, return_mean=True) else: # copy was done in fit if necessary X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy) if hasattr(precompute, '__array__') and ( fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or normalize and not np.allclose(X_scale, np.ones(n_features))): warnings.warn("Gram matrix was provided but X was centered" " to fit intercept, " "or X was normalized : recomputing Gram matrix.", UserWarning) # recompute Gram precompute = 'auto' Xy = None # precompute if n_samples > n_features if isinstance(precompute, six.string_types) and precompute == 'auto': precompute = (n_samples > n_features) if precompute is True: # make sure that the 'precompute' array is contiguous. precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order='C') np.dot(X.T, X, out=precompute) if not hasattr(precompute, '__array__'): Xy = None # cannot use Xy if precompute is not Gram if hasattr(precompute, '__array__') and Xy is None: common_dtype = np.find_common_type([X.dtype, y.dtype], []) if y.ndim == 1: # Xy is 1d, make sure it is contiguous. Xy = np.empty(shape=n_features, dtype=common_dtype, order='C') np.dot(X.T, y, out=Xy) else: # Make sure that Xy is always F contiguous even if X or y are not # contiguous: the goal is to make it fast to extract the data for a # specific target. n_targets = y.shape[1] Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order='F') np.dot(y.T, X, out=Xy.T) return X, y, X_offset, y_offset, X_scale, precompute, Xy
36.601002
80
0.616721
from __future__ import division from abc import ABCMeta, abstractmethod import numbers import warnings import numpy as np import scipy.sparse as sp from scipy import linalg from scipy import sparse from ..externals import six from ..externals.joblib import Parallel, delayed from ..base import BaseEstimator, ClassifierMixin, RegressorMixin from ..utils import check_array, check_X_y, deprecated, as_float_array from ..utils.validation import FLOAT_DTYPES from ..utils import check_random_state from ..utils.extmath import safe_sparse_dot from ..utils.sparsefuncs import mean_variance_axis, inplace_column_scale from ..utils.fixes import sparse_lsqr from ..utils.seq_dataset import ArrayDataset, CSRDataset from ..utils.validation import check_is_fitted from ..exceptions import NotFittedError from ..preprocessing.data import normalize as f_normalize SPARSE_INTERCEPT_DECAY = 0.01 def make_dataset(X, y, sample_weight, random_state=None): rng = check_random_state(random_state) seed = rng.randint(1, np.iinfo(np.int32).max) if sp.issparse(X): dataset = CSRDataset(X.data, X.indptr, X.indices, y, sample_weight, seed=seed) intercept_decay = SPARSE_INTERCEPT_DECAY else: dataset = ArrayDataset(X, y, sample_weight, seed=seed) intercept_decay = 1.0 return dataset, intercept_decay @deprecated("sparse_center_data was deprecated in version 0.18 and will be " "removed in 0.20. Use utilities in preprocessing.data instead") def sparse_center_data(X, y, fit_intercept, normalize=False): if fit_intercept: if sp.isspmatrix(X) and X.getformat() == 'csr': X = sp.csr_matrix(X, copy=normalize, dtype=np.float64) else: X = sp.csc_matrix(X, copy=normalize, dtype=np.float64) X_offset, X_var = mean_variance_axis(X, axis=0) if normalize: X_var *= X.shape[0] X_std = np.sqrt(X_var, X_var) del X_var X_std[X_std == 0] = 1 inplace_column_scale(X, 1. / X_std) else: X_std = np.ones(X.shape[1]) y_offset = y.mean(axis=0) y = y - y_offset else: X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_std @deprecated("center_data was deprecated in version 0.18 and will be removed in " "0.20. Use utilities in preprocessing.data instead") def center_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None): X = as_float_array(X, copy) if fit_intercept: if isinstance(sample_weight, numbers.Number): sample_weight = None if sp.issparse(X): X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) else: X_offset = np.average(X, axis=0, weights=sample_weight) X -= X_offset if normalize: X_std = np.sqrt(np.sum(X ** 2, axis=0)) X_std[X_std == 0] = 1 X /= X_std else: X_std = np.ones(X.shape[1]) y_offset = np.average(y, axis=0, weights=sample_weight) y = y - y_offset else: X_offset = np.zeros(X.shape[1]) X_std = np.ones(X.shape[1]) y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_std def _preprocess_data(X, y, fit_intercept, normalize=False, copy=True, sample_weight=None, return_mean=False): if isinstance(sample_weight, numbers.Number): sample_weight = None X = check_array(X, copy=copy, accept_sparse=['csr', 'csc'], dtype=FLOAT_DTYPES) if fit_intercept: if sp.issparse(X): X_offset, X_var = mean_variance_axis(X, axis=0) if not return_mean: X_offset = np.zeros(X.shape[1]) if normalize: X_var *= X.shape[0] X_scale = np.sqrt(X_var, X_var) del X_var X_scale[X_scale == 0] = 1 inplace_column_scale(X, 1. / X_scale) else: X_scale = np.ones(X.shape[1]) else: X_offset = np.average(X, axis=0, weights=sample_weight) X -= X_offset if normalize: X, X_scale = f_normalize(X, axis=0, copy=False, return_norm=True) else: X_scale = np.ones(X.shape[1]) y_offset = np.average(y, axis=0, weights=sample_weight) y = y - y_offset else: X_offset = np.zeros(X.shape[1]) X_scale = np.ones(X.shape[1]) y_offset = 0. if y.ndim == 1 else np.zeros(y.shape[1], dtype=X.dtype) return X, y, X_offset, y_offset, X_scale def _rescale_data(X, y, sample_weight): n_samples = X.shape[0] sample_weight = sample_weight * np.ones(n_samples) sample_weight = np.sqrt(sample_weight) sw_matrix = sparse.dia_matrix((sample_weight, 0), shape=(n_samples, n_samples)) X = safe_sparse_dot(sw_matrix, X) y = safe_sparse_dot(sw_matrix, y) return X, y class LinearModel(six.with_metaclass(ABCMeta, BaseEstimator)): @abstractmethod def fit(self, X, y): @deprecated(" and will be removed in 0.19.") def decision_function(self, X): return self._decision_function(X) def _decision_function(self, X): check_is_fitted(self, "coef_") X = check_array(X, accept_sparse=['csr', 'csc', 'coo']) return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ def predict(self, X): return self._decision_function(X) _preprocess_data = staticmethod(_preprocess_data) def _set_intercept(self, X_offset, y_offset, X_scale): if self.fit_intercept: self.coef_ = self.coef_ / X_scale self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T) else: self.intercept_ = 0. class LinearClassifierMixin(ClassifierMixin): def decision_function(self, X): if not hasattr(self, 'coef_') or self.coef_ is None: raise NotFittedError("This %(name)s instance is not fitted " "yet" % {'name': type(self).__name__}) X = check_array(X, accept_sparse='csr') n_features = self.coef_.shape[1] if X.shape[1] != n_features: raise ValueError("X has %d features per sample; expecting %d" % (X.shape[1], n_features)) scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ return scores.ravel() if scores.shape[1] == 1 else scores def predict(self, X): scores = self.decision_function(X) if len(scores.shape) == 1: indices = (scores > 0).astype(np.int) else: indices = scores.argmax(axis=1) return self.classes_[indices] def _predict_proba_lr(self, X): prob = self.decision_function(X) prob *= -1 np.exp(prob, prob) prob += 1 np.reciprocal(prob, prob) if prob.ndim == 1: return np.vstack([1 - prob, prob]).T else: prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) return prob class SparseCoefMixin(object): def densify(self): msg = "Estimator, %(name)s, must be fitted before densifying." check_is_fitted(self, "coef_", msg=msg) if sp.issparse(self.coef_): self.coef_ = self.coef_.toarray() return self def sparsify(self): msg = "Estimator, %(name)s, must be fitted before sparsifying." check_is_fitted(self, "coef_", msg=msg) self.coef_ = sp.csr_matrix(self.coef_) return self class LinearRegression(LinearModel, RegressorMixin): def __init__(self, fit_intercept=True, normalize=False, copy_X=True, n_jobs=1): self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.n_jobs = n_jobs @property @deprecated("``residues_`` is deprecated and will be removed in 0.19") def residues_(self): return self._residues def fit(self, X, y, sample_weight=None): n_jobs_ = self.n_jobs X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], y_numeric=True, multi_output=True) if sample_weight is not None and np.atleast_1d(sample_weight).ndim > 1: raise ValueError("Sample weights must be 1D array or scalar") X, y, X_offset, y_offset, X_scale = self._preprocess_data( X, y, fit_intercept=self.fit_intercept, normalize=self.normalize, copy=self.copy_X, sample_weight=sample_weight) if sample_weight is not None: # Sample weight can be implemented via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) if sp.issparse(X): if y.ndim < 2: out = sparse_lsqr(X, y) self.coef_ = out[0] self._residues = out[3] else: # sparse_lstsq cannot handle y with shape (M, K) outs = Parallel(n_jobs=n_jobs_)( delayed(sparse_lsqr)(X, y[:, j].ravel()) for j in range(y.shape[1])) self.coef_ = np.vstack(out[0] for out in outs) self._residues = np.vstack(out[3] for out in outs) else: self.coef_, self._residues, self.rank_, self.singular_ = \ linalg.lstsq(X, y) self.coef_ = self.coef_.T if y.ndim == 1: self.coef_ = np.ravel(self.coef_) self._set_intercept(X_offset, y_offset, X_scale) return self def _pre_fit(X, y, Xy, precompute, normalize, fit_intercept, copy): n_samples, n_features = X.shape if sparse.isspmatrix(X): precompute = False X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=fit_intercept, normalize=normalize, return_mean=True) else: # copy was done in fit if necessary X, y, X_offset, y_offset, X_scale = _preprocess_data( X, y, fit_intercept=fit_intercept, normalize=normalize, copy=copy) if hasattr(precompute, '__array__') and ( fit_intercept and not np.allclose(X_offset, np.zeros(n_features)) or normalize and not np.allclose(X_scale, np.ones(n_features))): warnings.warn("Gram matrix was provided but X was centered" " to fit intercept, " "or X was normalized : recomputing Gram matrix.", UserWarning) # recompute Gram precompute = 'auto' Xy = None # precompute if n_samples > n_features if isinstance(precompute, six.string_types) and precompute == 'auto': precompute = (n_samples > n_features) if precompute is True: # make sure that the 'precompute' array is contiguous. precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order='C') np.dot(X.T, X, out=precompute) if not hasattr(precompute, '__array__'): Xy = None # cannot use Xy if precompute is not Gram if hasattr(precompute, '__array__') and Xy is None: common_dtype = np.find_common_type([X.dtype, y.dtype], []) if y.ndim == 1: # Xy is 1d, make sure it is contiguous. Xy = np.empty(shape=n_features, dtype=common_dtype, order='C') np.dot(X.T, y, out=Xy) else: # Make sure that Xy is always F contiguous even if X or y are not # contiguous: the goal is to make it fast to extract the data for a # specific target. n_targets = y.shape[1] Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order='F') np.dot(y.T, X, out=Xy.T) return X, y, X_offset, y_offset, X_scale, precompute, Xy
true
true
f71359ee2b05ef67367758b809320b3f7167d7e2
2,776
py
Python
Data_Loader.py
StephenLouis/ISIC_2019
340ece42915c770e68bc13da64698a7a8987420e
[ "Apache-2.0" ]
2
2020-04-24T06:11:39.000Z
2020-04-24T06:13:38.000Z
Data_Loader.py
StephenLouis/ISIC_2019
340ece42915c770e68bc13da64698a7a8987420e
[ "Apache-2.0" ]
null
null
null
Data_Loader.py
StephenLouis/ISIC_2019
340ece42915c770e68bc13da64698a7a8987420e
[ "Apache-2.0" ]
null
null
null
import os import torch import csv import numpy as np from torch.utils.data import Dataset from PIL import Image def split_csv(file): data = [] a_train_file = r'/home/huangyinyue/ISIC_2019/train.csv' a_test_file = r'/home/huangyinyue/ISIC_2019/test.csv' seed = 3 np.random.seed(seed) train_indices = np.random.choice(25331, 20265, replace=False) # 设置随机数生成从0-150中随机挑选120个随机数 test_indices = np.array(list(set(range(25331)) - set(train_indices))) # test_indices = np.random.choice(len(residue), 30, replace=False) # 如果训练集和测试集综合的数据加起来就是一整个数据集则不需要这个操作 with open(file)as afile: a_reader = csv.reader(afile) # 从原始数据集中将所有数据读取出来并保存到a_reader中 labels = next(a_reader) # 提取第一行设置为labels for row in a_reader: # 将a_reader中每一行的数据提取出来并保存到data的列表中 data.append(row) # 生成训练数据集 if not os.path.exists(a_train_file): with open(a_train_file, "w", newline='') as a_trian: writer = csv.writer(a_trian) writer.writerows([labels]) # 第一行为标签行 writer.writerows(np.array(data)[train_indices]) a_trian.close() # 生成测试数据集 if not os.path.exists(a_test_file): with open(a_test_file, "w", newline='')as a_test: writer = csv.writer(a_test) writer.writerows([labels]) # 第一行为标签行 writer.writerows(np.array(data)[test_indices]) a_test.close() def read_labels_csv(file,header=True): images = [] num_categories = 0 with open(file, 'r') as f: reader = csv.reader(f) rownum = 0 for row in reader: if header and rownum == 0: header = row else: if num_categories == 0: num_categories = len(row) - 1 name = row[0] labels = (np.asarray(row[1:num_categories + 1])).astype(np.float32) labels = torch.from_numpy(labels) item = (name, labels) images.append(item) rownum += 1 return images class ISICDataset(Dataset): def __init__(self,csv_file,image_path,transform=None): self.images = read_labels_csv(csv_file) self.root_dir = image_path self.transform = transform def __len__(self): return len(self.images) def __getitem__(self, index): image_name,target = self.images[index] # print(os.path.join(self.root_dir,image_name+'.jpg')) image = Image.open(os.path.join(self.root_dir,image_name+'.jpg')).convert('RGB') if self.transform is not None: image = self.transform(image) return image,target if __name__ == '__main__': split_csv(file=r"/home/huangyinyue/ISIC_2019/ISIC_2019_Training_GroundTruth.csv")
33.445783
107
0.622118
import os import torch import csv import numpy as np from torch.utils.data import Dataset from PIL import Image def split_csv(file): data = [] a_train_file = r'/home/huangyinyue/ISIC_2019/train.csv' a_test_file = r'/home/huangyinyue/ISIC_2019/test.csv' seed = 3 np.random.seed(seed) train_indices = np.random.choice(25331, 20265, replace=False) test_indices = np.array(list(set(range(25331)) - set(train_indices))) a_reader = csv.reader(afile) labels = next(a_reader) for row in a_reader: data.append(row) if not os.path.exists(a_train_file): with open(a_train_file, "w", newline='') as a_trian: writer = csv.writer(a_trian) writer.writerows([labels]) writer.writerows(np.array(data)[train_indices]) a_trian.close() if not os.path.exists(a_test_file): with open(a_test_file, "w", newline='')as a_test: writer = csv.writer(a_test) writer.writerows([labels]) writer.writerows(np.array(data)[test_indices]) a_test.close() def read_labels_csv(file,header=True): images = [] num_categories = 0 with open(file, 'r') as f: reader = csv.reader(f) rownum = 0 for row in reader: if header and rownum == 0: header = row else: if num_categories == 0: num_categories = len(row) - 1 name = row[0] labels = (np.asarray(row[1:num_categories + 1])).astype(np.float32) labels = torch.from_numpy(labels) item = (name, labels) images.append(item) rownum += 1 return images class ISICDataset(Dataset): def __init__(self,csv_file,image_path,transform=None): self.images = read_labels_csv(csv_file) self.root_dir = image_path self.transform = transform def __len__(self): return len(self.images) def __getitem__(self, index): image_name,target = self.images[index] image = Image.open(os.path.join(self.root_dir,image_name+'.jpg')).convert('RGB') if self.transform is not None: image = self.transform(image) return image,target if __name__ == '__main__': split_csv(file=r"/home/huangyinyue/ISIC_2019/ISIC_2019_Training_GroundTruth.csv")
true
true
f7135bd8e64dff9c17d511834a28f2d313ce5dfc
52
py
Python
ox_mon/misc/__init__.py
emin63/ox_mon
965a36c430950c47d3cce79486c1ab2cc5ee89a4
[ "MIT" ]
null
null
null
ox_mon/misc/__init__.py
emin63/ox_mon
965a36c430950c47d3cce79486c1ab2cc5ee89a4
[ "MIT" ]
null
null
null
ox_mon/misc/__init__.py
emin63/ox_mon
965a36c430950c47d3cce79486c1ab2cc5ee89a4
[ "MIT" ]
null
null
null
"""Package for useful ox_mon commands and tasks """
17.333333
47
0.730769
true
true
f7135c08808687352a851c163a3b0dd12fad5af9
608
py
Python
Python-Basics/12. While Exercise/05.Vending machine.py
Xamaneone/SoftUni-Intro
985fe3249cd2adf021c2003372e840219811d989
[ "MIT" ]
null
null
null
Python-Basics/12. While Exercise/05.Vending machine.py
Xamaneone/SoftUni-Intro
985fe3249cd2adf021c2003372e840219811d989
[ "MIT" ]
null
null
null
Python-Basics/12. While Exercise/05.Vending machine.py
Xamaneone/SoftUni-Intro
985fe3249cd2adf021c2003372e840219811d989
[ "MIT" ]
null
null
null
coins = 0 price = round(float(input()), 2) while price != 0: if price >= 2: price -= 2 coins += 1 elif price >= 1: price -= 1 coins += 1 elif price >= 0.50: price -= 0.50 coins += 1 elif price >= 0.20: price -= 0.20 coins += 1 elif price >= 0.10: price -= 0.10 coins += 1 elif price >= 0.05: price -= 0.05 coins += 1 elif price >= 0.02: price -= 0.02 coins += 1 elif price >= 0.01: price -= 0.01 coins += 1 price = round(price, 2) print (coins)
20.965517
32
0.430921
coins = 0 price = round(float(input()), 2) while price != 0: if price >= 2: price -= 2 coins += 1 elif price >= 1: price -= 1 coins += 1 elif price >= 0.50: price -= 0.50 coins += 1 elif price >= 0.20: price -= 0.20 coins += 1 elif price >= 0.10: price -= 0.10 coins += 1 elif price >= 0.05: price -= 0.05 coins += 1 elif price >= 0.02: price -= 0.02 coins += 1 elif price >= 0.01: price -= 0.01 coins += 1 price = round(price, 2) print (coins)
true
true
f7135c1083d5fe7e7d0e6035ed7772e899993282
2,640
py
Python
data preprocessing/MTCNN2.py
daniyaljamal/Personality-prediction-based-on-video-using-CNN
0f1052d09fe14c73e38ac529ad35e4e98a8d859e
[ "MIT" ]
null
null
null
data preprocessing/MTCNN2.py
daniyaljamal/Personality-prediction-based-on-video-using-CNN
0f1052d09fe14c73e38ac529ad35e4e98a8d859e
[ "MIT" ]
null
null
null
data preprocessing/MTCNN2.py
daniyaljamal/Personality-prediction-based-on-video-using-CNN
0f1052d09fe14c73e38ac529ad35e4e98a8d859e
[ "MIT" ]
null
null
null
# extract and plot each detected face in a photograph from facenet_pytorch import MTCNN from cv2 import cv2 from PIL import Image import numpy as np from matplotlib import pyplot as plt from tqdm.notebook import tqdm import os import tensorflow as tf from torchvision import models import torch from torchvision import transforms from pathlib import Path def getface_from_video(path): # Create face detector mtcnn = MTCNN(margin=20, post_process=False) # Load a video v_cap = cv2.VideoCapture(path) v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT)) # Loop through video, taking a handful of frames to form a batch frames = [] for i in tqdm(range(v_len)): # Load frame success = v_cap.grab() if i % 50 == 0: success, frame = v_cap.retrieve() else: continue if not success: continue # Add to batch frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(Image.fromarray(frame)) # Detect faces in batch try: faces = mtcnn(frames) for i in range(len(faces)): plt.imshow(faces[i].permute(1, 2, 0).int().numpy()) plt.axis('off') #plt.show() except: print("Error in detection") return plt dir(models) os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" TRAIN_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\train\\Extracted\\train videos\\" TEST_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\test\\Extracted\\test videos\\" VAL_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\validate\\Extracted\\validation videos\\" PIC_TRAIN_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\train\\Extracted\\face\\" PIC_TEST_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\test\\Extracted\\face\\" PIC_VAL_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\validate\\Extracted\\face\\" train_videos = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)] test_videos = [TEST_DIR+i for i in os.listdir(TEST_DIR)] val_videos = [VAL_DIR+i for i in os.listdir(VAL_DIR)] i=0 while (i<len(val_videos)): #print(train_videos[i]) fig = getface_from_video(val_videos[i]) fig.savefig(os.path.splitext(PIC_VAL_DIR+Path(val_videos[i]).name)[0] +".jpg", bbox_inches='tight') i+=1
36.666667
152
0.685227
from facenet_pytorch import MTCNN from cv2 import cv2 from PIL import Image import numpy as np from matplotlib import pyplot as plt from tqdm.notebook import tqdm import os import tensorflow as tf from torchvision import models import torch from torchvision import transforms from pathlib import Path def getface_from_video(path): mtcnn = MTCNN(margin=20, post_process=False) v_cap = cv2.VideoCapture(path) v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT)) frames = [] for i in tqdm(range(v_len)): success = v_cap.grab() if i % 50 == 0: success, frame = v_cap.retrieve() else: continue if not success: continue frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frames.append(Image.fromarray(frame)) try: faces = mtcnn(frames) for i in range(len(faces)): plt.imshow(faces[i].permute(1, 2, 0).int().numpy()) plt.axis('off') except: print("Error in detection") return plt dir(models) os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" TRAIN_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\train\\Extracted\\train videos\\" TEST_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\test\\Extracted\\test videos\\" VAL_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\validate\\Extracted\\validation videos\\" PIC_TRAIN_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\train\\Extracted\\face\\" PIC_TEST_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\test\\Extracted\\face\\" PIC_VAL_DIR = "E:\\F Y P\\Personality prediction using image processing\\datasets\\First Impressions V2 (CVPR'17)\\validate\\Extracted\\face\\" train_videos = [TRAIN_DIR+i for i in os.listdir(TRAIN_DIR)] test_videos = [TEST_DIR+i for i in os.listdir(TEST_DIR)] val_videos = [VAL_DIR+i for i in os.listdir(VAL_DIR)] i=0 while (i<len(val_videos)): fig = getface_from_video(val_videos[i]) fig.savefig(os.path.splitext(PIC_VAL_DIR+Path(val_videos[i]).name)[0] +".jpg", bbox_inches='tight') i+=1
true
true
f7135c821e3e46703d7054e31dfbf06db7506a95
1,281
py
Python
spire/runtime/daemon.py
siq/spire
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
[ "Linux-OpenIB" ]
null
null
null
spire/runtime/daemon.py
siq/spire
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
[ "Linux-OpenIB" ]
1
2016-09-15T16:19:27.000Z
2016-09-15T16:20:06.000Z
spire/runtime/daemon.py
siq/spire
6365590277e9a6bfb6e4e0df5b2b47dba0f71711
[ "Linux-OpenIB" ]
null
null
null
from scheme import * from scheme.supplemental import ObjectReference from spire.runtime.runtime import Runtime from spire.support.daemon import * SCHEMA = Structure({ 'detached': Boolean(default=True), 'gid': Text(nonnull=True), 'pidfile': Text(nonnull=True), 'uid': Text(nonnull=True), }) class Runtime(Runtime): def __init__(self, configuration=None, assembly=None, **params): super(Runtime, self).__init__(configuration, assembly) self.deploy() self.startup() configuration = self.configuration.get('daemon') or {} configuration = SCHEMA.process(configuration, serialized=True) for attr, value in params.iteritems(): if value is not None: configuration[attr] = value self.daemon = self.assembly.collate(Daemon, single=True) if not self.daemon: raise Exception('no daemon component') if configuration['detached']: detach_process() self.pidfile = None if 'pidfile' in configuration: self.pidfile = Pidfile(configuration['pidfile']) self.pidfile.write() if 'uid' in configuration: switch_user(configuration['uid'], configuration.get('gid')) self.daemon.run()
29.790698
71
0.638564
from scheme import * from scheme.supplemental import ObjectReference from spire.runtime.runtime import Runtime from spire.support.daemon import * SCHEMA = Structure({ 'detached': Boolean(default=True), 'gid': Text(nonnull=True), 'pidfile': Text(nonnull=True), 'uid': Text(nonnull=True), }) class Runtime(Runtime): def __init__(self, configuration=None, assembly=None, **params): super(Runtime, self).__init__(configuration, assembly) self.deploy() self.startup() configuration = self.configuration.get('daemon') or {} configuration = SCHEMA.process(configuration, serialized=True) for attr, value in params.iteritems(): if value is not None: configuration[attr] = value self.daemon = self.assembly.collate(Daemon, single=True) if not self.daemon: raise Exception('no daemon component') if configuration['detached']: detach_process() self.pidfile = None if 'pidfile' in configuration: self.pidfile = Pidfile(configuration['pidfile']) self.pidfile.write() if 'uid' in configuration: switch_user(configuration['uid'], configuration.get('gid')) self.daemon.run()
true
true
f7135ec9ab5137fd6e0ce8310a95924f23443122
1,146
py
Python
wavelet_compress.py
Igor-ID/Image-Compression
e54881b62f258260baa7036cdd3b264b0d8adf05
[ "MIT" ]
null
null
null
wavelet_compress.py
Igor-ID/Image-Compression
e54881b62f258260baa7036cdd3b264b0d8adf05
[ "MIT" ]
null
null
null
wavelet_compress.py
Igor-ID/Image-Compression
e54881b62f258260baa7036cdd3b264b0d8adf05
[ "MIT" ]
null
null
null
import pywt import matplotlib.pyplot as plt from matplotlib.image import imread import numpy as np """Image compression using discrete Wavelet transform.""" plt.rcParams['figure.figsize'] = [8, 8] plt.rcParams.update({'font.size': 18}) im = imread('data/dog.jpg') im_gray = np.mean(im, -1) # convert RGB to gray scale # Wavelet Compression n = 4 # Use Daubechies 1 wavelet family. w = 'db1' coeffs = pywt.wavedec2(im_gray, wavelet=w, level=n) coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs) Csort = np.sort(np.abs(coeff_arr.reshape(-1))) for keep in (0.1, 0.05, 0.01, 0.005): thresh = Csort[int(np.floor((1 - keep) * len(Csort)))] ind = np.abs(coeff_arr) > thresh Cfilt = coeff_arr * ind # Threshold small indices coeffs_filt = pywt.array_to_coeffs(Cfilt, coeff_slices, output_format='wavedec2') # Plot reconstruction Arecon = pywt.waverec2(coeffs_filt, wavelet=w) plt.figure() plt.imshow(Arecon.astype('uint8'), cmap='gray') plt.axis('off') plt.title('keep = ' + str(keep)) plt.show() # Conclusion. As we can see, image compression works batter when we using Wavelets in compare with FFT
30.157895
102
0.702443
import pywt import matplotlib.pyplot as plt from matplotlib.image import imread import numpy as np plt.rcParams['figure.figsize'] = [8, 8] plt.rcParams.update({'font.size': 18}) im = imread('data/dog.jpg') im_gray = np.mean(im, -1) n = 4 w = 'db1' coeffs = pywt.wavedec2(im_gray, wavelet=w, level=n) coeff_arr, coeff_slices = pywt.coeffs_to_array(coeffs) Csort = np.sort(np.abs(coeff_arr.reshape(-1))) for keep in (0.1, 0.05, 0.01, 0.005): thresh = Csort[int(np.floor((1 - keep) * len(Csort)))] ind = np.abs(coeff_arr) > thresh Cfilt = coeff_arr * ind coeffs_filt = pywt.array_to_coeffs(Cfilt, coeff_slices, output_format='wavedec2') Arecon = pywt.waverec2(coeffs_filt, wavelet=w) plt.figure() plt.imshow(Arecon.astype('uint8'), cmap='gray') plt.axis('off') plt.title('keep = ' + str(keep)) plt.show()
true
true
f7135fe25e44d7942b0a84914465cbd8f57575a7
786
py
Python
keepr/tests/test_cli.py
Geek-ubaid/ShopKeepr
09c7e9b0a232e3c0e52869a76e48ce2f153bb6d5
[ "MIT" ]
10
2019-10-25T18:48:13.000Z
2019-12-24T12:41:54.000Z
keepr/tests/test_cli.py
Geek-ubaid/ShopKeepr
09c7e9b0a232e3c0e52869a76e48ce2f153bb6d5
[ "MIT" ]
12
2019-10-25T17:26:56.000Z
2020-05-28T17:17:07.000Z
keepr/tests/test_cli.py
Geek-ubaid/ShopKeepr
09c7e9b0a232e3c0e52869a76e48ce2f153bb6d5
[ "MIT" ]
1
2020-05-19T23:19:50.000Z
2020-05-19T23:19:50.000Z
from keepr.__main__ import run_application from click.testing import CliRunner import sys sys.path.append('..') def test_install_package(): runner = CliRunner() result = runner.invoke(run_application, ['install', 'click']) assert result.exit_code == 0 def test_install_package_req(): runner = CliRunner() result = runner.invoke( run_application, [ 'install', '-r', 'requirements_test.txt']) assert result.exit_code == 0 def test_uninstall_package(): runner = CliRunner() result = runner.invoke(run_application, ['uninstall', 'click']) assert result.exit_code == 0 def test_update_package(): runner = CliRunner() result = runner.invoke(run_application, ['install', '-u', 'click']) assert result.exit_code == 0
24.5625
71
0.680662
from keepr.__main__ import run_application from click.testing import CliRunner import sys sys.path.append('..') def test_install_package(): runner = CliRunner() result = runner.invoke(run_application, ['install', 'click']) assert result.exit_code == 0 def test_install_package_req(): runner = CliRunner() result = runner.invoke( run_application, [ 'install', '-r', 'requirements_test.txt']) assert result.exit_code == 0 def test_uninstall_package(): runner = CliRunner() result = runner.invoke(run_application, ['uninstall', 'click']) assert result.exit_code == 0 def test_update_package(): runner = CliRunner() result = runner.invoke(run_application, ['install', '-u', 'click']) assert result.exit_code == 0
true
true
f71361d02421cf51f190bfe7f9563b3fbcb8760a
335
py
Python
cci.py
oren0e/check-crypto-investments
19cb05d76452d5ce5439e8b2ef3655e50c27ec94
[ "MIT" ]
1
2021-03-28T13:21:02.000Z
2021-03-28T13:21:02.000Z
cci.py
oren0e/check-crypto-investments
19cb05d76452d5ce5439e8b2ef3655e50c27ec94
[ "MIT" ]
null
null
null
cci.py
oren0e/check-crypto-investments
19cb05d76452d5ce5439e8b2ef3655e50c27ec94
[ "MIT" ]
null
null
null
import click from backend.bots import CCIBot, CGroupBot @click.command() @click.argument("bot_name", nargs=1) def cci(bot_name): if bot_name == "cci_bot": CCIBot().run() elif bot_name == "cgroup_bot": CGroupBot().run() else: click.echo("No such bot yet...") if __name__ == '__main__': cci()
18.611111
42
0.614925
import click from backend.bots import CCIBot, CGroupBot @click.command() @click.argument("bot_name", nargs=1) def cci(bot_name): if bot_name == "cci_bot": CCIBot().run() elif bot_name == "cgroup_bot": CGroupBot().run() else: click.echo("No such bot yet...") if __name__ == '__main__': cci()
true
true
f71361fcf73b8b6a9b8d5f7a349d6cfe6f0d0144
1,139
py
Python
tests/__init__.py
GregShiner/aiobungie
c938f2570718d35525b57e68ef4fe81d7495e117
[ "MIT" ]
36
2021-07-09T19:26:18.000Z
2022-03-26T09:12:43.000Z
tests/__init__.py
GregShiner/aiobungie
c938f2570718d35525b57e68ef4fe81d7495e117
[ "MIT" ]
149
2021-07-13T21:46:21.000Z
2022-03-29T12:21:38.000Z
tests/__init__.py
GregShiner/aiobungie
c938f2570718d35525b57e68ef4fe81d7495e117
[ "MIT" ]
4
2021-07-21T05:33:11.000Z
2022-02-19T11:15:31.000Z
# MIT License # # Copyright (c) 2020 - Present nxtlo # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Aiobungie unit tests."""
47.458333
80
0.772608
true
true
f71362440659e8e224c53e9f126cbeed6adf1674
1,970
py
Python
examples/Python/Utility/file.py
martinruenz/Open3D
30983e89956dcd233531870ca20e87e6769ba903
[ "MIT" ]
3
2018-04-24T21:17:24.000Z
2018-06-02T12:44:06.000Z
examples/Python/Utility/file.py
martinruenz/Open3D
30983e89956dcd233531870ca20e87e6769ba903
[ "MIT" ]
null
null
null
examples/Python/Utility/file.py
martinruenz/Open3D
30983e89956dcd233531870ca20e87e6769ba903
[ "MIT" ]
1
2020-03-31T14:30:40.000Z
2020-03-31T14:30:40.000Z
# Open3D: www.open3d.org # The MIT License (MIT) # See license file or visit www.open3d.org for details # examples/Python/Utility/file.py from os import listdir, makedirs from os.path import exists, isfile, join, splitext import shutil import re def sorted_alphanum(file_list_ordered): convert = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] return sorted(file_list_ordered, key=alphanum_key) def get_file_list(path, extension=None): if extension is None: file_list = [path + f for f in listdir(path) if isfile(join(path, f))] else: file_list = [path + f for f in listdir(path) if isfile(join(path, f)) and splitext(f)[1] == extension] file_list = sorted_alphanum(file_list) return file_list def add_if_exists(path_dataset, folder_names): for folder_name in folder_names: if exists(join(path_dataset, folder_name)): path = join(path_dataset, folder_name) return path def get_rgbd_folders(path_dataset): path_color = add_if_exists(path_dataset, ["image/", "rgb/", "color/"]) path_depth = join(path_dataset, "depth/") return path_color, path_depth def get_rgbd_file_lists(path_dataset): path_color, path_depth = get_rgbd_folders(path_dataset) color_files = get_file_list(path_color, ".jpg") + \ get_file_list(path_color, ".png") depth_files = get_file_list(path_depth, ".png") return color_files, depth_files def make_clean_folder(path_folder): if not exists(path_folder): makedirs(path_folder) else: shutil.rmtree(path_folder) makedirs(path_folder) def check_folder_structure(path_dataset): path_color, path_depth = get_rgbd_folders(path_dataset) assert exists(path_depth), \ "Path %s is not exist!" % path_depth assert exists(path_color), \ "Path %s is not exist!" % path_color
30.78125
78
0.694924
from os import listdir, makedirs from os.path import exists, isfile, join, splitext import shutil import re def sorted_alphanum(file_list_ordered): convert = lambda text: int(text) if text.isdigit() else text alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] return sorted(file_list_ordered, key=alphanum_key) def get_file_list(path, extension=None): if extension is None: file_list = [path + f for f in listdir(path) if isfile(join(path, f))] else: file_list = [path + f for f in listdir(path) if isfile(join(path, f)) and splitext(f)[1] == extension] file_list = sorted_alphanum(file_list) return file_list def add_if_exists(path_dataset, folder_names): for folder_name in folder_names: if exists(join(path_dataset, folder_name)): path = join(path_dataset, folder_name) return path def get_rgbd_folders(path_dataset): path_color = add_if_exists(path_dataset, ["image/", "rgb/", "color/"]) path_depth = join(path_dataset, "depth/") return path_color, path_depth def get_rgbd_file_lists(path_dataset): path_color, path_depth = get_rgbd_folders(path_dataset) color_files = get_file_list(path_color, ".jpg") + \ get_file_list(path_color, ".png") depth_files = get_file_list(path_depth, ".png") return color_files, depth_files def make_clean_folder(path_folder): if not exists(path_folder): makedirs(path_folder) else: shutil.rmtree(path_folder) makedirs(path_folder) def check_folder_structure(path_dataset): path_color, path_depth = get_rgbd_folders(path_dataset) assert exists(path_depth), \ "Path %s is not exist!" % path_depth assert exists(path_color), \ "Path %s is not exist!" % path_color
true
true
f71362b0a4e90908b800515208bd4b73487ecd9e
1,823
py
Python
RiotGames/API/Match.py
Timohiho/RiotGames
f75256cca1b5c224393dca99296a6163b70b335f
[ "MIT" ]
2
2021-05-05T12:33:51.000Z
2021-12-15T13:08:44.000Z
RiotGames/API/Match.py
Timohiho/RiotGames
f75256cca1b5c224393dca99296a6163b70b335f
[ "MIT" ]
null
null
null
RiotGames/API/Match.py
Timohiho/RiotGames
f75256cca1b5c224393dca99296a6163b70b335f
[ "MIT" ]
null
null
null
# Copyright (c) 2021. # The copyright lies with Timo Hirsch-Hoffmann, the further use is only permitted with reference to source import urllib.request from RiotGames.API.RiotApi import RiotApi class Match(RiotApi): __timeline_by_match_id_url: str = "https://{}.api.riotgames.com/lol/match/v4/timelines/by-match/{}?api_key={}" def __init__(self, apikey: str): """ :param apikey: """ super().__init__(apikey) self.__super = super() def by_id(self, match_id: int, region: str): """ Special Function still in development https://developer.riotgames.com/apis#match-v4/GET_getMatchlist TODO :param match_id: :param region: :return: """ pass def matchlist_by_account_id(self, account_id: str, begin_time: int = None, end_time: int = None, begin_index: int = None, end_index: int = None, champions: list = None, queue: list = None, season: list = None): """ Special Function still in development https://developer.riotgames.com/apis#match-v4/GET_getMatchlist TODO format url :param account_id: encrypted account id :param begin_time: :param end_time: :param begin_index: :param end_index: :param champions: :param queue: :param season: :return: """ pass def timeline_by_match_id(self, match_id: int, region: str) -> dict: """ :param match_id: :param region: :return: """ return eval(bytes( urllib.request.urlopen( self.__timeline_by_match_id_url.format(region, match_id, super()._get_key())).read()).decode())
28.936508
114
0.580362
import urllib.request from RiotGames.API.RiotApi import RiotApi class Match(RiotApi): __timeline_by_match_id_url: str = "https://{}.api.riotgames.com/lol/match/v4/timelines/by-match/{}?api_key={}" def __init__(self, apikey: str): super().__init__(apikey) self.__super = super() def by_id(self, match_id: int, region: str): pass def matchlist_by_account_id(self, account_id: str, begin_time: int = None, end_time: int = None, begin_index: int = None, end_index: int = None, champions: list = None, queue: list = None, season: list = None): pass def timeline_by_match_id(self, match_id: int, region: str) -> dict: return eval(bytes( urllib.request.urlopen( self.__timeline_by_match_id_url.format(region, match_id, super()._get_key())).read()).decode())
true
true
f71364a4d58f9ac4ab6679957580fe8d4a36271d
506
py
Python
modules/ai-codes/modules/knn/src/iris-v1.py
drigols/Studies
9c293156935b491ded24be6b511daac67fd43538
[ "MIT" ]
1
2020-09-06T22:17:19.000Z
2020-09-06T22:17:19.000Z
modules/ai-codes/modules/knn/src/iris-v1.py
drigols/Studies
9c293156935b491ded24be6b511daac67fd43538
[ "MIT" ]
null
null
null
modules/ai-codes/modules/knn/src/iris-v1.py
drigols/Studies
9c293156935b491ded24be6b511daac67fd43538
[ "MIT" ]
null
null
null
######################################################## # Rodrigo Leite - drigols # # Last update: 31/10/2021 # ######################################################## from sklearn.datasets import load_iris import pandas as pd iris = load_iris() x = pd.DataFrame(iris.data, columns=[iris.feature_names]) y = pd.Series(iris.target) print("Load Iris dataset dimensions: {0}".format(x.shape)) print("Load Iris dataset features:\n", x.head(10))
29.764706
58
0.478261
true
true
f71364d313a7e53a9208f401358f30cc8bff56e4
312
py
Python
abesit/urls.py
ravigoel08/tetrahedron18
06da927732a3c15e174acb1b4832f984dad6260e
[ "MIT" ]
null
null
null
abesit/urls.py
ravigoel08/tetrahedron18
06da927732a3c15e174acb1b4832f984dad6260e
[ "MIT" ]
4
2020-02-11T23:20:04.000Z
2021-06-10T23:44:02.000Z
abesit/urls.py
ravigoel08/tetrahedron18
06da927732a3c15e174acb1b4832f984dad6260e
[ "MIT" ]
1
2018-10-19T11:35:23.000Z
2018-10-19T11:35:23.000Z
from . import views from django.urls import path urlpatterns = [ path('',views.Home,name="Home"), path('index/',views.index,name="index"), path('registered/',views.registered,name="registered"), path('exportmeout/',views.export,name="export"), # path('',views.Registered,name="registered") ]
28.363636
59
0.676282
from . import views from django.urls import path urlpatterns = [ path('',views.Home,name="Home"), path('index/',views.index,name="index"), path('registered/',views.registered,name="registered"), path('exportmeout/',views.export,name="export"), ]
true
true
f71364fa0d5b808b314ebc8bbb334f13964857db
12,817
py
Python
flask/camera.py
tableClothed/face-filters
8b236643b4e22a925df6a1c299f3887fdedb3e8e
[ "MIT" ]
1
2021-09-08T07:11:36.000Z
2021-09-08T07:11:36.000Z
flask/camera.py
tableClothed/face-filters
8b236643b4e22a925df6a1c299f3887fdedb3e8e
[ "MIT" ]
null
null
null
flask/camera.py
tableClothed/face-filters
8b236643b4e22a925df6a1c299f3887fdedb3e8e
[ "MIT" ]
1
2020-07-24T01:18:37.000Z
2020-07-24T01:18:37.000Z
import cv2 import numpy as np import dlib from imutils import face_utils, translate class Camera(object): def __init__(self): self.camera = cv2.VideoCapture(0) p = "../data/shape_predictor_68_face_landmarks.dat" self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(p) self.effect = "contours" def __del__(self): self.camera.release() def return_jpg(self, frame): ret, jpeg = cv2.imencode('.jpeg', frame) return jpeg.tobytes() def return_effect(self): if self.effect == "contours": frame = self.effect_canny() elif self.effect == "baby": frame = self.effect_baby_face() elif self.effect == "blurr": frame = self.effect_bluring_face() elif self.effect == "cartoon": frame = self.effect_cartoon() elif self.effect == "doggy": frame = self.effect_dog_face() elif self.effect == "large": frame = self.effect_enlarged() elif self.effect == "mirrors": frame = self.effect_mirror() elif self.effect == "triangle": frame = self.effect_delaunay_triangle() elif self.effect == "glasses": frame = self.effect_glasses() return frame # --------------- # BABY FACE # --------------- def effect_baby_face(self): ret, frame = self.camera.read() if not ret: return False offset = 4 scale = 1.3 frame_2 = frame.copy() mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) mask = np.zeros(frame.shape, frame.dtype) eye_mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) eye_mask = np.zeros(frame.shape, frame.dtype) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) l_eye, r_eye = shape[36:42], shape[42:48] (lx, ly, lw, lh) = cv2.boundingRect(l_eye) (rx, ry, rw, rh) = cv2.boundingRect(r_eye) l_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset] r_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset] center_ly = lx + int(lw / 2) center_lx = ly + int(lh / 2) + 20 center_ry = rx + int(rw / 2) center_rx = ry + int(rh / 2) + 20 mouth = shape[48:69] (mx, my, mw, mh) = cv2.boundingRect(mouth) mouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset] center_my = mx + int(mw / 2) center_mx = my + int(mh / 2) ly_scaled = int((l_eye.shape[1]*scale)/2) lx_scaled = int((l_eye.shape[0]*scale)/2) ry_scaled = int((r_eye.shape[1]*scale)/2) rx_scaled = int((r_eye.shape[0]*scale)/2) l_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA) r_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA) frame[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye mask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255 frame[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye mask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255 final_center_x = int(np.mean([center_lx, center_rx])) final_center_y = int(np.mean([center_ly, center_ry])) frame = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE) return self.return_jpg(frame) # ------------------ # ENLARGED EYES # ------------------ def effect_enlarged(self): offset = 4 scale = 2 ret, frame = self.camera.read() if not ret: return False frame_2 = frame.copy() mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) mask = np.zeros(frame.shape, frame.dtype) l_eye, r_eye = 0, 0 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) l_eye, r_eye = shape[36:42], shape[42:48] (lx, ly, lw, lh) = cv2.boundingRect(l_eye) (rx, ry, rw, rh) = cv2.boundingRect(r_eye) l_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset] r_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset] center_ly = lx + int(lw / 2) center_lx = ly + int(lh / 2) + 20 center_ry = rx + int(rw / 2) center_rx = ry + int(rh / 2) + 20 mouth = shape[48:69] (mx, my, mw, mh) = cv2.boundingRect(mouth) mouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset] center_my = mx + int(mw / 2) center_mx = my + int(mh / 2) ly_scaled = int((l_eye.shape[1]*1.7)/2) lx_scaled = int((l_eye.shape[0]*1.7)/2) ry_scaled = int((r_eye.shape[1]*1.7)/2) rx_scaled = int((r_eye.shape[0]*1.7)/2) l_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA) r_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA) my_scaled = int((mouth.shape[1]*scale)/2) mx_scaled = int((mouth.shape[0]*scale)/2) mouth = cv2.resize(mouth, (my_scaled*2, mx_scaled*2), interpolation = cv2.INTER_AREA) frame[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = mouth mask[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = 255 frame[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye mask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255 frame[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye mask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255 final_center_x = int(np.mean([center_lx, center_mx, center_rx])) final_center_y = int(np.mean([center_ly, center_my, center_ry])) frame = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE) return self.return_jpg(frame) # ------------------ # BLURRING FACE # ------------------ def effect_bluring_face(self): ret, frame = self.camera.read() if not ret: return False face = 0 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) (x, y, w, h) = face_utils.rect_to_bb(rect) face = frame[y:y+h, x:x+w] face = blurr_face(face) face = pixel_face(face) frame[y:y+h, x:x+w] = face return self.return_jpg(frame) # ------------------------ # DELAUNAY TRIANGLE # ------------------------ def effect_delaunay_triangle(self): ret, frame = self.camera.read() if not ret: return False jaw = [0, 17] r_eyebrow, l_eyebrow = [18, 22], [23, 27] nose = [28, 36] r_eye, l_eye = [37, 42], [43, 48] mouth = [49, 68] gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) mask = np.zeros_like(gray) faces = self.detector(gray, 0) for face in faces: landmark = self.predictor(gray, face) landmark_points = [] for n in range(68): x = landmark.part(n).x y = landmark.part(n).y landmark_points.append((x, y)) points = np.array(landmark_points, np.int32) convexhull = cv2.convexHull(points) cv2.fillConvexPoly(mask, convexhull, 255) face = cv2.bitwise_and(frame, frame, mask=mask) gray = delaunay_traingle(convexhull, landmark_points, gray, landmark_points) return self.return_jpg(gray) # -------------- # DOG FACE # -------------- def effect_dog_face(self): ret, frame = self.camera.read() if not ret: return False dog_nose = cv2.imread("../images/nose.png", -1) dog_ears = cv2.imread("../images/ears.png", -1) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) ears_width = int(abs(shape[0][0] - shape[16][0]) * 1.5) ears_height = int(ears_width * 0.4) ears_x = int((shape[22][0] + shape[23][0])/2) ears_y = shape[20][1] - 50 half_width = int(ears_width/2.0) half_height = int(ears_height/2.0) y1, y2 = ears_y - half_height, ears_y + half_height x1, x2 = ears_x - half_width, ears_x + half_width dog_ears = cv2.resize(dog_ears, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA) alpha_s = dog_ears[:, :, 3] / 255.0 alpha_l = 1.0 - alpha_s for c in range(0, 3): frame[y1:y2, x1:x2, c] = (alpha_s * dog_ears[:, :, c] + alpha_l * frame[y1:y2, x1:x2, c]) nose_width = int(abs(shape[36][0] - shape[32][0]) * 1.7) nose_height = int(nose_width * 0.7) (nose_x, nose_y) = shape[30] half_width = int(nose_width/2.0) half_height = int(nose_height/2.0) y1, y2 = nose_y - half_height, nose_y + half_height x1, x2 = nose_x - half_width, nose_x + half_width dog_nose = cv2.resize(dog_nose, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA) alpha_s = dog_nose[:, :, 3] / 255.0 alpha_l = 1.0 - alpha_s for c in range(0, 3): frame[y1:y2, x1:x2, c] = (alpha_s * dog_nose[:, :, c] + alpha_l * frame[y1:y2, x1:x2, c]) return self.return_jpg(frame) # ----------------- # FUNNY GLASSES # ----------------- def effect_glasses(self): ret, frame = self.camera.read() if not ret: return False glasses = cv2.imread("../images/glasses.png", -1) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) glasses_width = int(abs(shape[36][0] - shape[32][0]) * 4) glasses_height = int(glasses_width * 0.7) (glasses_x, glasses_y) = shape[30] glasses_y -= 20 half_width = int(glasses_width/2.0) half_height = int(glasses_height/2.0) y1, y2 = glasses_y - half_height, glasses_y + half_height x1, x2 = glasses_x - half_width, glasses_x + half_width glasses = cv2.resize(glasses, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA) alpha_s = glasses[:, :, 3] / 255.0 alpha_l = 1.0 - alpha_s for c in range(0, 3): frame[y1:y2, x1:x2, c] = (alpha_s * glasses[:, :, c] + alpha_l * frame[y1:y2, x1:x2, c]) return self.return_jpg(frame) # ---------------------- # CARTOON-ISH # ---------------------- def effect_cartoon(self): ret, frame = self.camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.medianBlur(gray, 5) edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 6) color = cv2.bilateralFilter(frame, 9, 150, 0.25) cartoon = cv2.bitwise_and(color, color, mask=edges) return self.return_jpg(cartoon) # ------------ # CANNY # ------------ def effect_canny(self): ret, frame = self.camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) median = np.median(blurred) l_edge = int(max(0, 0.77 * median)) u_edge = int(max(0, 1.33 * median)) canny = cv2.Canny(blurred, l_edge, u_edge) return self.return_jpg(canny) # ------------ # MIRRORS # ------------ def effect_mirror(self): ret, frame = self.camera.read() split = frame.shape[1] // 2 one_half = frame[:, :split, :] sec_half = cv2.flip(one_half, 1) frame = np.hstack((one_half, sec_half)) return self.return_jpg(frame) # --------------------- # ADDITIONAL FUNCTIONS # --------------------- def blurr_face(image): (h, w) = image.shape[:2] kernel_w = int(w/3.0) kernel_h = int(h/3.0) if kernel_w % 2 == 0: kernel_w -= 1 else: kernel_w = 5 if kernel_h % 2 == 0: kernel_h -= 1 else: kernel_h = 5 img = cv2.GaussianBlur(image, (kernel_w, kernel_h), 0) return img def pixel_face(image): blocks = 16 (h, w) = image.shape[:2] xSteps = np.linspace(0, w, blocks+1, dtype="int") ySteps = np.linspace(0, h, blocks+1, dtype="int") for i in range(1, len(ySteps)): for j in range(1, len(xSteps)): startX = xSteps[j - 1] startY = ySteps[i - 1] endX = xSteps[j] endY = ySteps[i] roi = image[startY:endY, startX:endX] (B, G, R) = [int(x) for x in cv2.mean(roi)[:3]] cv2.rectangle(image, (startX, startY), (endX, endY), (B, G, R), -1) return image def delaunay_traingle(convexHull, points, frame, landmark_points): rect = cv2.boundingRect(convexHull) subdiv = cv2.Subdiv2D(rect) subdiv.insert(landmark_points) triangles = subdiv.getTriangleList() triangles = np.array(triangles, dtype=np.int32) for t in triangles: A, B, C = (t[0], t[1]), (t[2], t[3]), (t[4], t[5]) cv2.line(frame, A, B, (255, 255, 255), 1, cv2.LINE_AA, 0) cv2.line(frame, B, C, (255, 255, 255), 1, cv2.LINE_AA, 0) cv2.line(frame, A, C, (255, 255, 255), 1, cv2.LINE_AA, 0) return frame
26.983158
102
0.640556
import cv2 import numpy as np import dlib from imutils import face_utils, translate class Camera(object): def __init__(self): self.camera = cv2.VideoCapture(0) p = "../data/shape_predictor_68_face_landmarks.dat" self.detector = dlib.get_frontal_face_detector() self.predictor = dlib.shape_predictor(p) self.effect = "contours" def __del__(self): self.camera.release() def return_jpg(self, frame): ret, jpeg = cv2.imencode('.jpeg', frame) return jpeg.tobytes() def return_effect(self): if self.effect == "contours": frame = self.effect_canny() elif self.effect == "baby": frame = self.effect_baby_face() elif self.effect == "blurr": frame = self.effect_bluring_face() elif self.effect == "cartoon": frame = self.effect_cartoon() elif self.effect == "doggy": frame = self.effect_dog_face() elif self.effect == "large": frame = self.effect_enlarged() elif self.effect == "mirrors": frame = self.effect_mirror() elif self.effect == "triangle": frame = self.effect_delaunay_triangle() elif self.effect == "glasses": frame = self.effect_glasses() return frame def effect_baby_face(self): ret, frame = self.camera.read() if not ret: return False offset = 4 scale = 1.3 frame_2 = frame.copy() mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) mask = np.zeros(frame.shape, frame.dtype) eye_mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) eye_mask = np.zeros(frame.shape, frame.dtype) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) l_eye, r_eye = shape[36:42], shape[42:48] (lx, ly, lw, lh) = cv2.boundingRect(l_eye) (rx, ry, rw, rh) = cv2.boundingRect(r_eye) l_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset] r_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset] center_ly = lx + int(lw / 2) center_lx = ly + int(lh / 2) + 20 center_ry = rx + int(rw / 2) center_rx = ry + int(rh / 2) + 20 mouth = shape[48:69] (mx, my, mw, mh) = cv2.boundingRect(mouth) mouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset] center_my = mx + int(mw / 2) center_mx = my + int(mh / 2) ly_scaled = int((l_eye.shape[1]*scale)/2) lx_scaled = int((l_eye.shape[0]*scale)/2) ry_scaled = int((r_eye.shape[1]*scale)/2) rx_scaled = int((r_eye.shape[0]*scale)/2) l_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA) r_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA) frame[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye mask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255 frame[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye mask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255 final_center_x = int(np.mean([center_lx, center_rx])) final_center_y = int(np.mean([center_ly, center_ry])) frame = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE) return self.return_jpg(frame) def effect_enlarged(self): offset = 4 scale = 2 ret, frame = self.camera.read() if not ret: return False frame_2 = frame.copy() mask = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) mask = np.zeros(frame.shape, frame.dtype) l_eye, r_eye = 0, 0 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) l_eye, r_eye = shape[36:42], shape[42:48] (lx, ly, lw, lh) = cv2.boundingRect(l_eye) (rx, ry, rw, rh) = cv2.boundingRect(r_eye) l_eye = frame[ly-offset:ly+lh+offset, lx-offset:lx+lw+offset] r_eye = frame[ry-offset:ry+rh+offset, rx-offset:rx+rw+offset] center_ly = lx + int(lw / 2) center_lx = ly + int(lh / 2) + 20 center_ry = rx + int(rw / 2) center_rx = ry + int(rh / 2) + 20 mouth = shape[48:69] (mx, my, mw, mh) = cv2.boundingRect(mouth) mouth = frame[my-offset:my+mh+offset, mx-offset:mx+mw+offset] center_my = mx + int(mw / 2) center_mx = my + int(mh / 2) ly_scaled = int((l_eye.shape[1]*1.7)/2) lx_scaled = int((l_eye.shape[0]*1.7)/2) ry_scaled = int((r_eye.shape[1]*1.7)/2) rx_scaled = int((r_eye.shape[0]*1.7)/2) l_eye = cv2.resize(l_eye, (ly_scaled*2, lx_scaled*2), interpolation = cv2.INTER_AREA) r_eye = cv2.resize(r_eye, (ry_scaled*2, rx_scaled*2), interpolation = cv2.INTER_AREA) my_scaled = int((mouth.shape[1]*scale)/2) mx_scaled = int((mouth.shape[0]*scale)/2) mouth = cv2.resize(mouth, (my_scaled*2, mx_scaled*2), interpolation = cv2.INTER_AREA) frame[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = mouth mask[center_mx-mx_scaled:center_mx+mx_scaled, center_my-my_scaled:center_my+my_scaled] = 255 frame[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = l_eye mask[center_lx-lx_scaled:center_lx+lx_scaled, center_ly-ly_scaled:center_ly+ly_scaled] = 255 frame[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = r_eye mask[center_rx-rx_scaled:center_rx+rx_scaled, center_ry-ry_scaled:center_ry+ry_scaled] = 255 final_center_x = int(np.mean([center_lx, center_mx, center_rx])) final_center_y = int(np.mean([center_ly, center_my, center_ry])) frame = cv2.seamlessClone(frame, frame_2, mask, (final_center_y, final_center_x), cv2.NORMAL_CLONE) return self.return_jpg(frame) def effect_bluring_face(self): ret, frame = self.camera.read() if not ret: return False face = 0 gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) (x, y, w, h) = face_utils.rect_to_bb(rect) face = frame[y:y+h, x:x+w] face = blurr_face(face) face = pixel_face(face) frame[y:y+h, x:x+w] = face return self.return_jpg(frame) def effect_delaunay_triangle(self): ret, frame = self.camera.read() if not ret: return False jaw = [0, 17] r_eyebrow, l_eyebrow = [18, 22], [23, 27] nose = [28, 36] r_eye, l_eye = [37, 42], [43, 48] mouth = [49, 68] gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) mask = np.zeros_like(gray) faces = self.detector(gray, 0) for face in faces: landmark = self.predictor(gray, face) landmark_points = [] for n in range(68): x = landmark.part(n).x y = landmark.part(n).y landmark_points.append((x, y)) points = np.array(landmark_points, np.int32) convexhull = cv2.convexHull(points) cv2.fillConvexPoly(mask, convexhull, 255) face = cv2.bitwise_and(frame, frame, mask=mask) gray = delaunay_traingle(convexhull, landmark_points, gray, landmark_points) return self.return_jpg(gray) def effect_dog_face(self): ret, frame = self.camera.read() if not ret: return False dog_nose = cv2.imread("../images/nose.png", -1) dog_ears = cv2.imread("../images/ears.png", -1) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) ears_width = int(abs(shape[0][0] - shape[16][0]) * 1.5) ears_height = int(ears_width * 0.4) ears_x = int((shape[22][0] + shape[23][0])/2) ears_y = shape[20][1] - 50 half_width = int(ears_width/2.0) half_height = int(ears_height/2.0) y1, y2 = ears_y - half_height, ears_y + half_height x1, x2 = ears_x - half_width, ears_x + half_width dog_ears = cv2.resize(dog_ears, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA) alpha_s = dog_ears[:, :, 3] / 255.0 alpha_l = 1.0 - alpha_s for c in range(0, 3): frame[y1:y2, x1:x2, c] = (alpha_s * dog_ears[:, :, c] + alpha_l * frame[y1:y2, x1:x2, c]) nose_width = int(abs(shape[36][0] - shape[32][0]) * 1.7) nose_height = int(nose_width * 0.7) (nose_x, nose_y) = shape[30] half_width = int(nose_width/2.0) half_height = int(nose_height/2.0) y1, y2 = nose_y - half_height, nose_y + half_height x1, x2 = nose_x - half_width, nose_x + half_width dog_nose = cv2.resize(dog_nose, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA) alpha_s = dog_nose[:, :, 3] / 255.0 alpha_l = 1.0 - alpha_s for c in range(0, 3): frame[y1:y2, x1:x2, c] = (alpha_s * dog_nose[:, :, c] + alpha_l * frame[y1:y2, x1:x2, c]) return self.return_jpg(frame) def effect_glasses(self): ret, frame = self.camera.read() if not ret: return False glasses = cv2.imread("../images/glasses.png", -1) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) rects = self.detector(gray, 0) for rect in rects: shape = self.predictor(gray, rect) shape = face_utils.shape_to_np(shape) glasses_width = int(abs(shape[36][0] - shape[32][0]) * 4) glasses_height = int(glasses_width * 0.7) (glasses_x, glasses_y) = shape[30] glasses_y -= 20 half_width = int(glasses_width/2.0) half_height = int(glasses_height/2.0) y1, y2 = glasses_y - half_height, glasses_y + half_height x1, x2 = glasses_x - half_width, glasses_x + half_width glasses = cv2.resize(glasses, (half_width*2, half_height*2), interpolation = cv2.INTER_AREA) alpha_s = glasses[:, :, 3] / 255.0 alpha_l = 1.0 - alpha_s for c in range(0, 3): frame[y1:y2, x1:x2, c] = (alpha_s * glasses[:, :, c] + alpha_l * frame[y1:y2, x1:x2, c]) return self.return_jpg(frame) def effect_cartoon(self): ret, frame = self.camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.medianBlur(gray, 5) edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 6) color = cv2.bilateralFilter(frame, 9, 150, 0.25) cartoon = cv2.bitwise_and(color, color, mask=edges) return self.return_jpg(cartoon) def effect_canny(self): ret, frame = self.camera.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (3, 3), 0) median = np.median(blurred) l_edge = int(max(0, 0.77 * median)) u_edge = int(max(0, 1.33 * median)) canny = cv2.Canny(blurred, l_edge, u_edge) return self.return_jpg(canny) def effect_mirror(self): ret, frame = self.camera.read() split = frame.shape[1] // 2 one_half = frame[:, :split, :] sec_half = cv2.flip(one_half, 1) frame = np.hstack((one_half, sec_half)) return self.return_jpg(frame) def blurr_face(image): (h, w) = image.shape[:2] kernel_w = int(w/3.0) kernel_h = int(h/3.0) if kernel_w % 2 == 0: kernel_w -= 1 else: kernel_w = 5 if kernel_h % 2 == 0: kernel_h -= 1 else: kernel_h = 5 img = cv2.GaussianBlur(image, (kernel_w, kernel_h), 0) return img def pixel_face(image): blocks = 16 (h, w) = image.shape[:2] xSteps = np.linspace(0, w, blocks+1, dtype="int") ySteps = np.linspace(0, h, blocks+1, dtype="int") for i in range(1, len(ySteps)): for j in range(1, len(xSteps)): startX = xSteps[j - 1] startY = ySteps[i - 1] endX = xSteps[j] endY = ySteps[i] roi = image[startY:endY, startX:endX] (B, G, R) = [int(x) for x in cv2.mean(roi)[:3]] cv2.rectangle(image, (startX, startY), (endX, endY), (B, G, R), -1) return image def delaunay_traingle(convexHull, points, frame, landmark_points): rect = cv2.boundingRect(convexHull) subdiv = cv2.Subdiv2D(rect) subdiv.insert(landmark_points) triangles = subdiv.getTriangleList() triangles = np.array(triangles, dtype=np.int32) for t in triangles: A, B, C = (t[0], t[1]), (t[2], t[3]), (t[4], t[5]) cv2.line(frame, A, B, (255, 255, 255), 1, cv2.LINE_AA, 0) cv2.line(frame, B, C, (255, 255, 255), 1, cv2.LINE_AA, 0) cv2.line(frame, A, C, (255, 255, 255), 1, cv2.LINE_AA, 0) return frame
true
true
f7136501de57c4039a29d7b880a710979c3ee30a
3,125
py
Python
api_reface.py
andrewBatutin/emotional-ui-bot
efd429ac31ea707df0296648a82838568a938f46
[ "MIT" ]
null
null
null
api_reface.py
andrewBatutin/emotional-ui-bot
efd429ac31ea707df0296648a82838568a938f46
[ "MIT" ]
null
null
null
api_reface.py
andrewBatutin/emotional-ui-bot
efd429ac31ea707df0296648a82838568a938f46
[ "MIT" ]
null
null
null
import requests url = 'https://api.wildwildhack.ai/api/signedurl?extension=jpg' headers = {"Authorization": "Bearer 211acc3c360b4dccaffefbab0b14d0c4"} auth_token = '211acc3c360b4dccaffefbab0b14d0c4' json_headers = { 'authorization': f'Bearer {auth_token}', 'content-type': 'application/json', } signed_url_first = requests.post(url, json={"extension": "jpg"}, headers=headers).json() image_url_first = signed_url_first['url'] # "https://storage.googleapis.com/prod-reflect-videos/data/images/f590a9cb-172f-4fb0-8021-914e7afaa48d.jpg?GoogleAccessId=prod-images-admin@reface-prod-stage.iam.gserviceaccount.com&Expires=1631376256&Signature=0WBqKY1pfnU3oPP8AooDiMgmY9VPBi3LBVlrg%2BO9VGnoxytzX87dz%2FPS2mksb5GqHPVzWAsiIQBdGPPE2O1wUjCHOuH8gUpl5spgJnFPZGX2LlYx%2FxDyLcKOHpJ%2BrIcWNdaUMxlz%2B4K%2F2gHyUmd5bh5VdkodlPxmy59P5t3iIC8xBalu8fHxxPNBrftCKiF%2B6giAoe3l39MMkDBGyQi3yKs2xFHVj9pqcgAw0Ja5xcBpqxBAw0xS81L4efl%2Fe%2B1csanIMOvBRuGYiXHkTvhwu%2BRf2oMXr5L%2FPMakO0ElTxpKEH4%2BciIGbX6PrFzVYG4IGhsAsYemJShy5bFbnVRNVw==" print(image_url_first) img_put_headers = { 'content-type': 'image/jpeg', } # files = {'media': open('files/photo_2021-09-11_17-53-41.jpg', 'rb')} with open('/Users/admin/Documents/RF_hckt/t_bot/files/photo_2021-09-11_17-53-41.jpg', 'rb') as f: img_data = f.read() res = requests.put(image_url_first, headers=img_put_headers, data=img_data) img_path = res.url.split('?')[0] img_put_headers = { 'Content-Type': 'application/json', 'accept': 'application/json', 'authorization': f'Bearer {auth_token}', } res = requests.post('https://api.wildwildhack.ai/api/face', headers=img_put_headers, json={ 'path': 'https://storage.googleapis.com/prod-reflect-videos/data/images/81f2f429-2cca-464f-98a4-f5ae2c1e57e0.jpg'}) video_json_headers = { 'authorization': f'Bearer {auth_token}' } signed_video_url_first = requests.post(url, json={"extension": "mp4"}, headers=video_json_headers).json() video_url_first = signed_video_url_first['url'] print(video_url_first) video_put_headers = { 'content-type': 'video/mp4', } # files = {'media': open('files/photo_2021-09-11_17-53-41.jpg', 'rb')} with open('files/IMG_5344.mp4', 'rb') as f: video_data = f.read() res = requests.put(video_url_first, headers=video_put_headers, data=video_data) video_path = res.url.split('?')[0] print(video_path) video_put_headers = { 'Content-Type': 'application/json', 'accept': 'application/json', 'authorization': f'Bearer {auth_token}', } res = requests.post('https://api.wildwildhack.ai/api/video', headers=video_put_headers, json={ 'path': 'https://storage.googleapis.com/prod-reflect-videos/data/inputs/b894f8dc-df16-4e24-bf76-63446fb01ebd.mp4'}) print(res) #wait res = requests.get('https://api.wildwildhack.ai/api/video/b894f8dc-df16-4e24-bf76-63446fb01ebd', headers=headers) print(res) #swap res = requests.post('https://api.wildwildhack.ai/api/swap-video', headers=headers, json={ "video_id": "b894f8dc-df16-4e24-bf76-63446fb01ebd", "facemapping":{"47f3b946-1358-4e28-b20b-c01e4b84750b":["e7c64439-bc38-4a50-9937-cf0d215e4c69"]}}) print(res)
38.580247
580
0.7568
import requests url = 'https://api.wildwildhack.ai/api/signedurl?extension=jpg' headers = {"Authorization": "Bearer 211acc3c360b4dccaffefbab0b14d0c4"} auth_token = '211acc3c360b4dccaffefbab0b14d0c4' json_headers = { 'authorization': f'Bearer {auth_token}', 'content-type': 'application/json', } signed_url_first = requests.post(url, json={"extension": "jpg"}, headers=headers).json() image_url_first = signed_url_first['url'] print(image_url_first) img_put_headers = { 'content-type': 'image/jpeg', } with open('/Users/admin/Documents/RF_hckt/t_bot/files/photo_2021-09-11_17-53-41.jpg', 'rb') as f: img_data = f.read() res = requests.put(image_url_first, headers=img_put_headers, data=img_data) img_path = res.url.split('?')[0] img_put_headers = { 'Content-Type': 'application/json', 'accept': 'application/json', 'authorization': f'Bearer {auth_token}', } res = requests.post('https://api.wildwildhack.ai/api/face', headers=img_put_headers, json={ 'path': 'https://storage.googleapis.com/prod-reflect-videos/data/images/81f2f429-2cca-464f-98a4-f5ae2c1e57e0.jpg'}) video_json_headers = { 'authorization': f'Bearer {auth_token}' } signed_video_url_first = requests.post(url, json={"extension": "mp4"}, headers=video_json_headers).json() video_url_first = signed_video_url_first['url'] print(video_url_first) video_put_headers = { 'content-type': 'video/mp4', } with open('files/IMG_5344.mp4', 'rb') as f: video_data = f.read() res = requests.put(video_url_first, headers=video_put_headers, data=video_data) video_path = res.url.split('?')[0] print(video_path) video_put_headers = { 'Content-Type': 'application/json', 'accept': 'application/json', 'authorization': f'Bearer {auth_token}', } res = requests.post('https://api.wildwildhack.ai/api/video', headers=video_put_headers, json={ 'path': 'https://storage.googleapis.com/prod-reflect-videos/data/inputs/b894f8dc-df16-4e24-bf76-63446fb01ebd.mp4'}) print(res) res = requests.get('https://api.wildwildhack.ai/api/video/b894f8dc-df16-4e24-bf76-63446fb01ebd', headers=headers) print(res) res = requests.post('https://api.wildwildhack.ai/api/swap-video', headers=headers, json={ "video_id": "b894f8dc-df16-4e24-bf76-63446fb01ebd", "facemapping":{"47f3b946-1358-4e28-b20b-c01e4b84750b":["e7c64439-bc38-4a50-9937-cf0d215e4c69"]}}) print(res)
true
true
f713651c03224ed89fcb1b8c38fddd4caabb17ed
507
py
Python
solutions/solution121.py
Satily/leetcode_python_solution
3f05fff7758d650469862bc28df9e4aa7b1d3203
[ "MIT" ]
3
2018-11-22T10:31:09.000Z
2019-05-05T15:53:48.000Z
solutions/solution121.py
Satily/leetcode_python_solution
3f05fff7758d650469862bc28df9e4aa7b1d3203
[ "MIT" ]
null
null
null
solutions/solution121.py
Satily/leetcode_python_solution
3f05fff7758d650469862bc28df9e4aa7b1d3203
[ "MIT" ]
null
null
null
class Solution: def maxProfit(self, prices): """ :type prices: List[int] :rtype: int """ if len(prices) == 0: return 0 min_price = prices[0] result = 0 for price in prices[1:]: result = max(price - min_price, result) min_price = min(price, min_price) return result if __name__ == "__main__": print(Solution().maxProfit([7, 1, 5, 3, 6, 4])) print(Solution().maxProfit([7, 6, 4, 3, 1]))
25.35
51
0.510848
class Solution: def maxProfit(self, prices): if len(prices) == 0: return 0 min_price = prices[0] result = 0 for price in prices[1:]: result = max(price - min_price, result) min_price = min(price, min_price) return result if __name__ == "__main__": print(Solution().maxProfit([7, 1, 5, 3, 6, 4])) print(Solution().maxProfit([7, 6, 4, 3, 1]))
true
true
f713659e60c2fed0cbbd86a1377c4de7d0d73233
9,413
py
Python
src/pyquickhelper/sphinxext/revealjs/directives.py
janjagusch/pyquickhelper
d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b
[ "MIT" ]
18
2015-11-10T08:09:23.000Z
2022-02-16T11:46:45.000Z
src/pyquickhelper/sphinxext/revealjs/directives.py
janjagusch/pyquickhelper
d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b
[ "MIT" ]
321
2015-06-14T21:34:28.000Z
2021-11-28T17:10:03.000Z
src/pyquickhelper/sphinxext/revealjs/directives.py
janjagusch/pyquickhelper
d42e1579ea20f5add9a9cd2b6d2d0a3533aee40b
[ "MIT" ]
10
2015-06-20T01:35:00.000Z
2022-01-19T15:54:32.000Z
# -*- coding: utf-8 -*- """ sphinxjp.themes.revealjs.directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :author: tell-k <ffk2005@gmail.com> :copyright: tell-k. All Rights Reserved. """ from docutils import nodes from docutils.parsers.rst import directives from docutils.parsers.rst.roles import set_classes from docutils.parsers.rst import Directive from . import compat __docformat__ = 'reStructuredText' class revealjs(nodes.General, nodes.Element): """ node for revealjs """ class rv_code(nodes.General, nodes.Element): """ node for revealjs code section """ class rv_small(nodes.General, nodes.Element): """ node for revealjs small text section """ class rv_note(nodes.General, nodes.Element): """ node for revealjs presentation note """ def heading(argument): """ directives choices for heading tag """ return directives.choice(argument, ('h1', 'h2', 'h3', 'h4', 'h5', 'h6')) class RevealjsDirective(Directive): """ Reveal.JS slide entry """ has_content = True required_arguments = 0 optional_arguments = 100 final_argument_whitespace = False option_spec = { 'id': directives.unchanged, 'class': directives.class_option, 'noheading': directives.flag, 'title-heading': heading, 'subtitle': directives.unchanged, 'subtitle-heading': directives.unchanged, 'data-autoslide': directives.unchanged, 'data-markdown': directives.unchanged, 'data-transition': directives.unchanged, 'data-transition-speed': directives.unchanged, 'data-background': directives.unchanged, 'data-background-repeat': directives.unchanged, 'data-background-size': directives.unchanged, 'data-background-transition': directives.unchanged, 'data-state': directives.unchanged, 'data-separator': directives.unchanged, 'data-separator-vertical': directives.unchanged, 'data-separator-notes': directives.unchanged, 'data-charset': directives.unchanged, } node_class = revealjs def run(self): """ build revealjs node """ set_classes(self.options) text = '\n'.join(self.content) node = self.node_class(text, **self.options) self.add_name(node) if "data-markdown" not in self.options: self.state.nested_parse(self.content, self.content_offset, node) if self.arguments: node['title'] = " ".join(self.arguments) node['noheading'] = ('noheading' in self.options) options_list = ( 'id', 'title-heading', 'subtitle-heading', 'data-autoslide', 'data-transition', 'data-transition-speed', 'data-background', 'data-background-repeat', 'data-background-size', 'data-background-transition', 'data-state', 'data-markdown', 'data-separator', 'data-separator-vertical', 'data-separator-notes', 'data-charset', ) for option in options_list: if option in self.options: node[option] = self.options.get(option) return [node] class RvSmallDirective(Directive): """ Create small text tag. """ has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'class': directives.class_option, } node_class = rv_small def run(self): """ build rv_small node """ set_classes(self.options) self.assert_has_content() text = '\n'.join(self.content) node = self.node_class(text, **self.options) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) return [node] class RvNoteDirective(Directive): """ Directive for a notes tag. """ has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'class': directives.class_option, } node_class = rv_note def run(self): """ build rv_note node """ set_classes(self.options) self.assert_has_content() text = '\n'.join(self.content) node = self.node_class(text, **self.options) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) return [node] class RvCodeDirective(Directive): """ Directive for a code block with highlight.js """ has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'id': directives.unchanged, 'class': directives.class_option, } node_class = rv_code def run(self): """ build rv_code node """ set_classes(self.options) self.assert_has_content() node = self.node_class('\n'.join(self.content), **self.options) return [node] def visit_revealjs(self, node): """ build start tag for revealjs """ section_attr = {} markdown_headings = {"h1": "#", "h2": "##", "h3": "###", "h4": "####", "h5": "#####", "h6": "######"} if node.get("id"): section_attr.update({"ids": [node.get("id")]}) attr_list = ( 'data-autoslide', 'data-transition', 'data-transition-speed', 'data-background', 'data-background-repeat', 'data-background-size', 'data-background-transition', 'data-state', 'data-markdown', 'data-separator', 'data-separator-vertical', 'data-separator-notes', 'data-charset', ) for attr in attr_list: if node.get(attr) is not None: section_attr.update({attr: node.get(attr)}) title = None if node.get("title") and not node.get('noheading'): title = node.get("title") title_heading = node.get('title-heading', 'h2') subtitle = node.get("subtitle") subtitle_heading = node.get('subtitle-heading', 'h3') if node.get("data-markdown") is not None: title_base = compat.text("%(heading)s %(title)s \n") title_text = None if title: title_text = title_base % dict( heading=markdown_headings.get(title_heading), title=title ) subtitle_text = None if subtitle: subtitle_text = title_base % dict( heading=markdown_headings.get(subtitle_heading), title=subtitle ) else: title_base = compat.text("<%(heading)s>%(title)s</%(heading)s>\n") title_text = None if title: title_text = title_base % dict( title=title, heading=title_heading) subtitle_text = None if subtitle: subtitle_text = title_base % dict( title=subtitle, heading=subtitle_heading) if node.get("data-markdown") is not None: self.body.append(self.starttag(node, 'section', **section_attr)) if node.get("data-markdown") == compat.text(""): self.body.append("<script type='text/template'>\n") if title_text: self.body.append(title_text) if subtitle_text: self.body.append(subtitle_text) self.body.append(node.rawsource) self.body.append("</script>\n") else: self.body.append(self.starttag(node, 'section', **section_attr)) if title_text: self.body.append(title_text) if subtitle_text: self.body.append(subtitle_text) self.set_first_last(node) def depart_revealjs(self, node=None): """ build end tag for revealjs """ self.body.append('</section>\n') def visit_rv_code(self, node): """ build start tag for rv_code """ self.body.append(self.starttag(node, 'pre')) self.body.append("<code data-trim contenteditable>") self.body.append(compat.escape_html(node.rawsource)) def depart_rv_code(self, node=None): """ build end tag for rv_code """ self.body.append("</code>") self.body.append("</pre>\n") def visit_rv_small(self, node): """ build start tag for rv_small """ self.body.append(self.starttag(node, 'small')) self.set_first_last(node) def depart_rv_small(self, node=None): """ build end tag for rv_small""" self.body.append("</small>\n") def visit_rv_note(self, node): """ build start tag for rv_note """ self.body.append(self.starttag(node, 'aside', **{'class': 'notes'})) self.set_first_last(node) def depart_rv_note(self, node=None): """ build end tag for rv_note """ self.body.append("</aside>\n") def setup(app): """Initialize """ app.add_node(revealjs, html=(visit_revealjs, depart_revealjs)) app.add_node(rv_code, html=(visit_rv_code, depart_rv_code)) app.add_node(rv_note, html=(visit_rv_note, depart_rv_note)) app.add_node(rv_small, html=(visit_rv_small, depart_rv_small)) app.add_directive('revealjs', RevealjsDirective) app.add_directive('rv_code', RvCodeDirective) app.add_directive('rv_note', RvNoteDirective) app.add_directive('rv_small', RvSmallDirective) return app
29.052469
76
0.605014
from docutils import nodes from docutils.parsers.rst import directives from docutils.parsers.rst.roles import set_classes from docutils.parsers.rst import Directive from . import compat __docformat__ = 'reStructuredText' class revealjs(nodes.General, nodes.Element): class rv_code(nodes.General, nodes.Element): class rv_small(nodes.General, nodes.Element): class rv_note(nodes.General, nodes.Element): def heading(argument): return directives.choice(argument, ('h1', 'h2', 'h3', 'h4', 'h5', 'h6')) class RevealjsDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 100 final_argument_whitespace = False option_spec = { 'id': directives.unchanged, 'class': directives.class_option, 'noheading': directives.flag, 'title-heading': heading, 'subtitle': directives.unchanged, 'subtitle-heading': directives.unchanged, 'data-autoslide': directives.unchanged, 'data-markdown': directives.unchanged, 'data-transition': directives.unchanged, 'data-transition-speed': directives.unchanged, 'data-background': directives.unchanged, 'data-background-repeat': directives.unchanged, 'data-background-size': directives.unchanged, 'data-background-transition': directives.unchanged, 'data-state': directives.unchanged, 'data-separator': directives.unchanged, 'data-separator-vertical': directives.unchanged, 'data-separator-notes': directives.unchanged, 'data-charset': directives.unchanged, } node_class = revealjs def run(self): set_classes(self.options) text = '\n'.join(self.content) node = self.node_class(text, **self.options) self.add_name(node) if "data-markdown" not in self.options: self.state.nested_parse(self.content, self.content_offset, node) if self.arguments: node['title'] = " ".join(self.arguments) node['noheading'] = ('noheading' in self.options) options_list = ( 'id', 'title-heading', 'subtitle-heading', 'data-autoslide', 'data-transition', 'data-transition-speed', 'data-background', 'data-background-repeat', 'data-background-size', 'data-background-transition', 'data-state', 'data-markdown', 'data-separator', 'data-separator-vertical', 'data-separator-notes', 'data-charset', ) for option in options_list: if option in self.options: node[option] = self.options.get(option) return [node] class RvSmallDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'class': directives.class_option, } node_class = rv_small def run(self): set_classes(self.options) self.assert_has_content() text = '\n'.join(self.content) node = self.node_class(text, **self.options) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) return [node] class RvNoteDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'class': directives.class_option, } node_class = rv_note def run(self): set_classes(self.options) self.assert_has_content() text = '\n'.join(self.content) node = self.node_class(text, **self.options) self.add_name(node) self.state.nested_parse(self.content, self.content_offset, node) return [node] class RvCodeDirective(Directive): has_content = True required_arguments = 0 optional_arguments = 0 final_argument_whitespace = False option_spec = { 'id': directives.unchanged, 'class': directives.class_option, } node_class = rv_code def run(self): set_classes(self.options) self.assert_has_content() node = self.node_class('\n'.join(self.content), **self.options) return [node] def visit_revealjs(self, node): section_attr = {} markdown_headings = {"h1": "#", "h2": "##", "h3": "###", "h4": "####", "h5": "#####", "h6": "######"} if node.get("id"): section_attr.update({"ids": [node.get("id")]}) attr_list = ( 'data-autoslide', 'data-transition', 'data-transition-speed', 'data-background', 'data-background-repeat', 'data-background-size', 'data-background-transition', 'data-state', 'data-markdown', 'data-separator', 'data-separator-vertical', 'data-separator-notes', 'data-charset', ) for attr in attr_list: if node.get(attr) is not None: section_attr.update({attr: node.get(attr)}) title = None if node.get("title") and not node.get('noheading'): title = node.get("title") title_heading = node.get('title-heading', 'h2') subtitle = node.get("subtitle") subtitle_heading = node.get('subtitle-heading', 'h3') if node.get("data-markdown") is not None: title_base = compat.text("%(heading)s %(title)s \n") title_text = None if title: title_text = title_base % dict( heading=markdown_headings.get(title_heading), title=title ) subtitle_text = None if subtitle: subtitle_text = title_base % dict( heading=markdown_headings.get(subtitle_heading), title=subtitle ) else: title_base = compat.text("<%(heading)s>%(title)s</%(heading)s>\n") title_text = None if title: title_text = title_base % dict( title=title, heading=title_heading) subtitle_text = None if subtitle: subtitle_text = title_base % dict( title=subtitle, heading=subtitle_heading) if node.get("data-markdown") is not None: self.body.append(self.starttag(node, 'section', **section_attr)) if node.get("data-markdown") == compat.text(""): self.body.append("<script type='text/template'>\n") if title_text: self.body.append(title_text) if subtitle_text: self.body.append(subtitle_text) self.body.append(node.rawsource) self.body.append("</script>\n") else: self.body.append(self.starttag(node, 'section', **section_attr)) if title_text: self.body.append(title_text) if subtitle_text: self.body.append(subtitle_text) self.set_first_last(node) def depart_revealjs(self, node=None): self.body.append('</section>\n') def visit_rv_code(self, node): self.body.append(self.starttag(node, 'pre')) self.body.append("<code data-trim contenteditable>") self.body.append(compat.escape_html(node.rawsource)) def depart_rv_code(self, node=None): self.body.append("</code>") self.body.append("</pre>\n") def visit_rv_small(self, node): self.body.append(self.starttag(node, 'small')) self.set_first_last(node) def depart_rv_small(self, node=None): self.body.append("</small>\n") def visit_rv_note(self, node): self.body.append(self.starttag(node, 'aside', **{'class': 'notes'})) self.set_first_last(node) def depart_rv_note(self, node=None): self.body.append("</aside>\n") def setup(app): app.add_node(revealjs, html=(visit_revealjs, depart_revealjs)) app.add_node(rv_code, html=(visit_rv_code, depart_rv_code)) app.add_node(rv_note, html=(visit_rv_note, depart_rv_note)) app.add_node(rv_small, html=(visit_rv_small, depart_rv_small)) app.add_directive('revealjs', RevealjsDirective) app.add_directive('rv_code', RvCodeDirective) app.add_directive('rv_note', RvNoteDirective) app.add_directive('rv_small', RvSmallDirective) return app
true
true
f713678e890e9c07893c1fa2762e7b73dc4d8fbd
1,323
py
Python
src/rollit/runtime/base.py
russells-crockpot/roll-with-it
09b386553a151e19194f48b9384f97953a90953a
[ "MIT" ]
2
2020-08-28T13:27:37.000Z
2020-09-28T03:36:44.000Z
src/rollit/runtime/base.py
russells-crockpot/rollit
09b386553a151e19194f48b9384f97953a90953a
[ "MIT" ]
null
null
null
src/rollit/runtime/base.py
russells-crockpot/rollit
09b386553a151e19194f48b9384f97953a90953a
[ "MIT" ]
null
null
null
""" """ import contextvars from ..langref import ATOM_TYPES __all__ = ['is_atom', 'context'] _CURRENT_CONTEXT = contextvars.ContextVar('current-runtime-context', default=None) def is_atom(value): """ """ return isinstance(value, ATOM_TYPES) class _CurrentContextProxy: __slots__ = () __getattr__ = lambda s, n: getattr(_CURRENT_CONTEXT.get(), n) __setattr__ = lambda s, n, v: setattr(_CURRENT_CONTEXT.get(), n, v) __delattr__ = lambda s, n: delattr(_CURRENT_CONTEXT.get(), n) __getitem__ = lambda s, n: _CURRENT_CONTEXT.get().__getitem__(n) __setitem__ = lambda s, n, v: _CURRENT_CONTEXT.get().__setitem__(n, v) __delitem__ = lambda s, n: _CURRENT_CONTEXT.get().__delitem__(n) __enter__ = lambda s: _CURRENT_CONTEXT.get().__enter__() __exit__ = lambda s: _CURRENT_CONTEXT.get().__exit__() __contains__ = lambda s, n: _CURRENT_CONTEXT.get().__contains__(n) __dir__ = lambda s: dir(_CURRENT_CONTEXT.get()) __call__ = lambda s, v: _CURRENT_CONTEXT.get()(v) __str__ = lambda s: _CURRENT_CONTEXT.get().__str__() __repr__ = lambda s: _CURRENT_CONTEXT.get().__repr__() __bool__ = lambda s: _CURRENT_CONTEXT.get() is not None context = _CurrentContextProxy() """The current :class:`~rollit.runtime.core.RuntimeContext`. """ del _CurrentContextProxy
32.268293
82
0.708239
import contextvars from ..langref import ATOM_TYPES __all__ = ['is_atom', 'context'] _CURRENT_CONTEXT = contextvars.ContextVar('current-runtime-context', default=None) def is_atom(value): return isinstance(value, ATOM_TYPES) class _CurrentContextProxy: __slots__ = () __getattr__ = lambda s, n: getattr(_CURRENT_CONTEXT.get(), n) __setattr__ = lambda s, n, v: setattr(_CURRENT_CONTEXT.get(), n, v) __delattr__ = lambda s, n: delattr(_CURRENT_CONTEXT.get(), n) __getitem__ = lambda s, n: _CURRENT_CONTEXT.get().__getitem__(n) __setitem__ = lambda s, n, v: _CURRENT_CONTEXT.get().__setitem__(n, v) __delitem__ = lambda s, n: _CURRENT_CONTEXT.get().__delitem__(n) __enter__ = lambda s: _CURRENT_CONTEXT.get().__enter__() __exit__ = lambda s: _CURRENT_CONTEXT.get().__exit__() __contains__ = lambda s, n: _CURRENT_CONTEXT.get().__contains__(n) __dir__ = lambda s: dir(_CURRENT_CONTEXT.get()) __call__ = lambda s, v: _CURRENT_CONTEXT.get()(v) __str__ = lambda s: _CURRENT_CONTEXT.get().__str__() __repr__ = lambda s: _CURRENT_CONTEXT.get().__repr__() __bool__ = lambda s: _CURRENT_CONTEXT.get() is not None context = _CurrentContextProxy() del _CurrentContextProxy
true
true
f713684d37c44543e335be7022c376236a889bc2
23,150
py
Python
mindhome_alpha/erpnext/erpnext_integrations/doctype/amazon_mws_settings/amazon_mws_api.py
Mindhome/field_service
3aea428815147903eb9af1d0c1b4b9fc7faed057
[ "MIT" ]
1
2021-04-29T14:55:29.000Z
2021-04-29T14:55:29.000Z
mindhome_alpha/erpnext/erpnext_integrations/doctype/amazon_mws_settings/amazon_mws_api.py
Mindhome/field_service
3aea428815147903eb9af1d0c1b4b9fc7faed057
[ "MIT" ]
null
null
null
mindhome_alpha/erpnext/erpnext_integrations/doctype/amazon_mws_settings/amazon_mws_api.py
Mindhome/field_service
3aea428815147903eb9af1d0c1b4b9fc7faed057
[ "MIT" ]
1
2021-04-29T14:39:01.000Z
2021-04-29T14:39:01.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Basic interface to Amazon MWS # Based on http://code.google.com/p/amazon-mws-python # Extended to include finances object from __future__ import unicode_literals import urllib import hashlib import hmac import base64 import six from erpnext.erpnext_integrations.doctype.amazon_mws_settings import xml_utils import re try: from xml.etree.ElementTree import ParseError as XMLError except ImportError: from xml.parsers.expat import ExpatError as XMLError from time import strftime, gmtime from requests import request from requests.exceptions import HTTPError __all__ = [ 'Feeds', 'Inventory', 'MWSError', 'Reports', 'Orders', 'Products', 'Recommendations', 'Sellers', 'Finances' ] # See https://images-na.ssl-images-amazon.com/images/G/01/mwsportal/doc/en_US/bde/MWSDeveloperGuide._V357736853_.pdf page 8 # for a list of the end points and marketplace IDs MARKETPLACES = { "CA": "https://mws.amazonservices.ca", #A2EUQ1WTGCTBG2 "US": "https://mws.amazonservices.com", #ATVPDKIKX0DER", "DE": "https://mws-eu.amazonservices.com", #A1PA6795UKMFR9 "ES": "https://mws-eu.amazonservices.com", #A1RKKUPIHCS9HS "FR": "https://mws-eu.amazonservices.com", #A13V1IB3VIYZZH "IN": "https://mws.amazonservices.in", #A21TJRUUN4KGV "IT": "https://mws-eu.amazonservices.com", #APJ6JRA9NG5V4 "UK": "https://mws-eu.amazonservices.com", #A1F83G8C2ARO7P "JP": "https://mws.amazonservices.jp", #A1VC38T7YXB528 "CN": "https://mws.amazonservices.com.cn", #AAHKV2X7AFYLW "AE": " https://mws.amazonservices.ae", #A2VIGQ35RCS4UG "MX": "https://mws.amazonservices.com.mx", #A1AM78C64UM0Y8 "BR": "https://mws.amazonservices.com", #A2Q3Y263D00KWC } class MWSError(Exception): """ Main MWS Exception class """ # Allows quick access to the response object. # Do not rely on this attribute, always check if its not None. response = None def calc_md5(string): """Calculates the MD5 encryption for the given string """ md = hashlib.md5() md.update(string) return base64.encodestring(md.digest()).strip('\n') if six.PY2 \ else base64.encodebytes(md.digest()).decode().strip() def remove_empty(d): """ Helper function that removes all keys from a dictionary (d), that have an empty value. """ for key in list(d): if not d[key]: del d[key] return d def remove_namespace(xml): xml = xml.decode('utf-8') regex = re.compile(' xmlns(:ns2)?="[^"]+"|(ns2:)|(xml:)') return regex.sub('', xml) class DictWrapper(object): def __init__(self, xml, rootkey=None): self.original = xml self._rootkey = rootkey self._mydict = xml_utils.xml2dict().fromstring(remove_namespace(xml)) self._response_dict = self._mydict.get(list(self._mydict)[0], self._mydict) @property def parsed(self): if self._rootkey: return self._response_dict.get(self._rootkey) else: return self._response_dict class DataWrapper(object): """ Text wrapper in charge of validating the hash sent by Amazon. """ def __init__(self, data, header): self.original = data if 'content-md5' in header: hash_ = calc_md5(self.original) if header['content-md5'] != hash_: raise MWSError("Wrong Contentlength, maybe amazon error...") @property def parsed(self): return self.original class MWS(object): """ Base Amazon API class """ # This is used to post/get to the different uris used by amazon per api # ie. /Orders/2011-01-01 # All subclasses must define their own URI only if needed URI = "/" # The API version varies in most amazon APIs VERSION = "2009-01-01" # There seem to be some xml namespace issues. therefore every api subclass # is recommended to define its namespace, so that it can be referenced # like so AmazonAPISubclass.NS. # For more information see http://stackoverflow.com/a/8719461/389453 NS = '' # Some APIs are available only to either a "Merchant" or "Seller" # the type of account needs to be sent in every call to the amazon MWS. # This constant defines the exact name of the parameter Amazon expects # for the specific API being used. # All subclasses need to define this if they require another account type # like "Merchant" in which case you define it like so. # ACCOUNT_TYPE = "Merchant" # Which is the name of the parameter for that specific account type. ACCOUNT_TYPE = "SellerId" def __init__(self, access_key, secret_key, account_id, region='US', domain='', uri="", version=""): self.access_key = access_key self.secret_key = secret_key self.account_id = account_id self.version = version or self.VERSION self.uri = uri or self.URI if domain: self.domain = domain elif region in MARKETPLACES: self.domain = MARKETPLACES[region] else: error_msg = "Incorrect region supplied ('%(region)s'). Must be one of the following: %(marketplaces)s" % { "marketplaces" : ', '.join(MARKETPLACES.keys()), "region" : region, } raise MWSError(error_msg) def make_request(self, extra_data, method="GET", **kwargs): """Make request to Amazon MWS API with these parameters """ # Remove all keys with an empty value because # Amazon's MWS does not allow such a thing. extra_data = remove_empty(extra_data) params = { 'AWSAccessKeyId': self.access_key, self.ACCOUNT_TYPE: self.account_id, 'SignatureVersion': '2', 'Timestamp': self.get_timestamp(), 'Version': self.version, 'SignatureMethod': 'HmacSHA256', } params.update(extra_data) quote = urllib.quote if six.PY2 else urllib.parse.quote request_description = '&'.join(['%s=%s' % (k, quote(params[k], safe='-_.~')) for k in sorted(params)]) signature = self.calc_signature(method, request_description) url = '%s%s?%s&Signature=%s' % (self.domain, self.uri, request_description, quote(signature)) headers = {'User-Agent': 'python-amazon-mws/0.0.1 (Language=Python)'} headers.update(kwargs.get('extra_headers', {})) try: # Some might wonder as to why i don't pass the params dict as the params argument to request. # My answer is, here i have to get the url parsed string of params in order to sign it, so # if i pass the params dict as params to request, request will repeat that step because it will need # to convert the dict to a url parsed string, so why do it twice if i can just pass the full url :). response = request(method, url, data=kwargs.get('body', ''), headers=headers) response.raise_for_status() # When retrieving data from the response object, # be aware that response.content returns the content in bytes while response.text calls # response.content and converts it to unicode. data = response.content # I do not check the headers to decide which content structure to server simply because sometimes # Amazon's MWS API returns XML error responses with "text/plain" as the Content-Type. try: parsed_response = DictWrapper(data, extra_data.get("Action") + "Result") except XMLError: parsed_response = DataWrapper(data, response.headers) except HTTPError as e: error = MWSError(str(e)) error.response = e.response raise error # Store the response object in the parsed_response for quick access parsed_response.response = response return parsed_response def get_service_status(self): """ Returns a GREEN, GREEN_I, YELLOW or RED status. Depending on the status/availability of the API its being called from. """ return self.make_request(extra_data=dict(Action='GetServiceStatus')) def calc_signature(self, method, request_description): """Calculate MWS signature to interface with Amazon """ sig_data = method + '\n' + self.domain.replace('https://', '').lower() + '\n' + self.uri + '\n' + request_description sig_data = sig_data.encode('utf-8') secret_key = self.secret_key.encode('utf-8') digest = hmac.new(secret_key, sig_data, hashlib.sha256).digest() return base64.b64encode(digest).decode('utf-8') def get_timestamp(self): """ Returns the current timestamp in proper format. """ return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime()) def enumerate_param(self, param, values): """ Builds a dictionary of an enumerated parameter. Takes any iterable and returns a dictionary. ie. enumerate_param('MarketplaceIdList.Id', (123, 345, 4343)) returns { MarketplaceIdList.Id.1: 123, MarketplaceIdList.Id.2: 345, MarketplaceIdList.Id.3: 4343 } """ params = {} if values is not None: if not param.endswith('.'): param = "%s." % param for num, value in enumerate(values): params['%s%d' % (param, (num + 1))] = value return params class Feeds(MWS): """ Amazon MWS Feeds API """ ACCOUNT_TYPE = "Merchant" def submit_feed(self, feed, feed_type, marketplaceids=None, content_type="text/xml", purge='false'): """ Uploads a feed ( xml or .tsv ) to the seller's inventory. Can be used for creating/updating products on Amazon. """ data = dict(Action='SubmitFeed', FeedType=feed_type, PurgeAndReplace=purge) data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids)) md = calc_md5(feed) return self.make_request(data, method="POST", body=feed, extra_headers={'Content-MD5': md, 'Content-Type': content_type}) def get_feed_submission_list(self, feedids=None, max_count=None, feedtypes=None, processingstatuses=None, fromdate=None, todate=None): """ Returns a list of all feed submissions submitted in the previous 90 days. That match the query parameters. """ data = dict(Action='GetFeedSubmissionList', MaxCount=max_count, SubmittedFromDate=fromdate, SubmittedToDate=todate,) data.update(self.enumerate_param('FeedSubmissionIdList.Id', feedids)) data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes)) data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def get_submission_list_by_next_token(self, token): data = dict(Action='GetFeedSubmissionListByNextToken', NextToken=token) return self.make_request(data) def get_feed_submission_count(self, feedtypes=None, processingstatuses=None, fromdate=None, todate=None): data = dict(Action='GetFeedSubmissionCount', SubmittedFromDate=fromdate, SubmittedToDate=todate) data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes)) data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def cancel_feed_submissions(self, feedids=None, feedtypes=None, fromdate=None, todate=None): data = dict(Action='CancelFeedSubmissions', SubmittedFromDate=fromdate, SubmittedToDate=todate) data.update(self.enumerate_param('FeedSubmissionIdList.Id.', feedids)) data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes)) return self.make_request(data) def get_feed_submission_result(self, feedid): data = dict(Action='GetFeedSubmissionResult', FeedSubmissionId=feedid) return self.make_request(data) class Reports(MWS): """ Amazon MWS Reports API """ ACCOUNT_TYPE = "Merchant" ## REPORTS ### def get_report(self, report_id): data = dict(Action='GetReport', ReportId=report_id) return self.make_request(data) def get_report_count(self, report_types=(), acknowledged=None, fromdate=None, todate=None): data = dict(Action='GetReportCount', Acknowledged=acknowledged, AvailableFromDate=fromdate, AvailableToDate=todate) data.update(self.enumerate_param('ReportTypeList.Type.', report_types)) return self.make_request(data) def get_report_list(self, requestids=(), max_count=None, types=(), acknowledged=None, fromdate=None, todate=None): data = dict(Action='GetReportList', Acknowledged=acknowledged, AvailableFromDate=fromdate, AvailableToDate=todate, MaxCount=max_count) data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids)) data.update(self.enumerate_param('ReportTypeList.Type.', types)) return self.make_request(data) def get_report_list_by_next_token(self, token): data = dict(Action='GetReportListByNextToken', NextToken=token) return self.make_request(data) def get_report_request_count(self, report_types=(), processingstatuses=(), fromdate=None, todate=None): data = dict(Action='GetReportRequestCount', RequestedFromDate=fromdate, RequestedToDate=todate) data.update(self.enumerate_param('ReportTypeList.Type.', report_types)) data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def get_report_request_list(self, requestids=(), types=(), processingstatuses=(), max_count=None, fromdate=None, todate=None): data = dict(Action='GetReportRequestList', MaxCount=max_count, RequestedFromDate=fromdate, RequestedToDate=todate) data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids)) data.update(self.enumerate_param('ReportTypeList.Type.', types)) data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def get_report_request_list_by_next_token(self, token): data = dict(Action='GetReportRequestListByNextToken', NextToken=token) return self.make_request(data) def request_report(self, report_type, start_date=None, end_date=None, marketplaceids=()): data = dict(Action='RequestReport', ReportType=report_type, StartDate=start_date, EndDate=end_date) data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids)) return self.make_request(data) ### ReportSchedule ### def get_report_schedule_list(self, types=()): data = dict(Action='GetReportScheduleList') data.update(self.enumerate_param('ReportTypeList.Type.', types)) return self.make_request(data) def get_report_schedule_count(self, types=()): data = dict(Action='GetReportScheduleCount') data.update(self.enumerate_param('ReportTypeList.Type.', types)) return self.make_request(data) class Orders(MWS): """ Amazon Orders API """ URI = "/Orders/2013-09-01" VERSION = "2013-09-01" NS = '{https://mws.amazonservices.com/Orders/2011-01-01}' def list_orders(self, marketplaceids, created_after=None, created_before=None, lastupdatedafter=None, lastupdatedbefore=None, orderstatus=(), fulfillment_channels=(), payment_methods=(), buyer_email=None, seller_orderid=None, max_results='100'): data = dict(Action='ListOrders', CreatedAfter=created_after, CreatedBefore=created_before, LastUpdatedAfter=lastupdatedafter, LastUpdatedBefore=lastupdatedbefore, BuyerEmail=buyer_email, SellerOrderId=seller_orderid, MaxResultsPerPage=max_results, ) data.update(self.enumerate_param('OrderStatus.Status.', orderstatus)) data.update(self.enumerate_param('MarketplaceId.Id.', marketplaceids)) data.update(self.enumerate_param('FulfillmentChannel.Channel.', fulfillment_channels)) data.update(self.enumerate_param('PaymentMethod.Method.', payment_methods)) return self.make_request(data) def list_orders_by_next_token(self, token): data = dict(Action='ListOrdersByNextToken', NextToken=token) return self.make_request(data) def get_order(self, amazon_order_ids): data = dict(Action='GetOrder') data.update(self.enumerate_param('AmazonOrderId.Id.', amazon_order_ids)) return self.make_request(data) def list_order_items(self, amazon_order_id): data = dict(Action='ListOrderItems', AmazonOrderId=amazon_order_id) return self.make_request(data) def list_order_items_by_next_token(self, token): data = dict(Action='ListOrderItemsByNextToken', NextToken=token) return self.make_request(data) class Products(MWS): """ Amazon MWS Products API """ URI = '/Products/2011-10-01' VERSION = '2011-10-01' NS = '{http://mws.amazonservices.com/schema/Products/2011-10-01}' def list_matching_products(self, marketplaceid, query, contextid=None): """ Returns a list of products and their attributes, ordered by relevancy, based on a search query that you specify. Your search query can be a phrase that describes the product or it can be a product identifier such as a UPC, EAN, ISBN, or JAN. """ data = dict(Action='ListMatchingProducts', MarketplaceId=marketplaceid, Query=query, QueryContextId=contextid) return self.make_request(data) def get_matching_product(self, marketplaceid, asins): """ Returns a list of products and their attributes, based on a list of ASIN values that you specify. """ data = dict(Action='GetMatchingProduct', MarketplaceId=marketplaceid) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) def get_matching_product_for_id(self, marketplaceid, type, id): """ Returns a list of products and their attributes, based on a list of product identifier values (asin, sellersku, upc, ean, isbn and JAN) Added in Fourth Release, API version 2011-10-01 """ data = dict(Action='GetMatchingProductForId', MarketplaceId=marketplaceid, IdType=type) data.update(self.enumerate_param('IdList.Id', id)) return self.make_request(data) def get_competitive_pricing_for_sku(self, marketplaceid, skus): """ Returns the current competitive pricing of a product, based on the SellerSKU and MarketplaceId that you specify. """ data = dict(Action='GetCompetitivePricingForSKU', MarketplaceId=marketplaceid) data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus)) return self.make_request(data) def get_competitive_pricing_for_asin(self, marketplaceid, asins): """ Returns the current competitive pricing of a product, based on the ASIN and MarketplaceId that you specify. """ data = dict(Action='GetCompetitivePricingForASIN', MarketplaceId=marketplaceid) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) def get_lowest_offer_listings_for_sku(self, marketplaceid, skus, condition="Any", excludeme="False"): data = dict(Action='GetLowestOfferListingsForSKU', MarketplaceId=marketplaceid, ItemCondition=condition, ExcludeMe=excludeme) data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus)) return self.make_request(data) def get_lowest_offer_listings_for_asin(self, marketplaceid, asins, condition="Any", excludeme="False"): data = dict(Action='GetLowestOfferListingsForASIN', MarketplaceId=marketplaceid, ItemCondition=condition, ExcludeMe=excludeme) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) def get_product_categories_for_sku(self, marketplaceid, sku): data = dict(Action='GetProductCategoriesForSKU', MarketplaceId=marketplaceid, SellerSKU=sku) return self.make_request(data) def get_product_categories_for_asin(self, marketplaceid, asin): data = dict(Action='GetProductCategoriesForASIN', MarketplaceId=marketplaceid, ASIN=asin) return self.make_request(data) def get_my_price_for_sku(self, marketplaceid, skus, condition=None): data = dict(Action='GetMyPriceForSKU', MarketplaceId=marketplaceid, ItemCondition=condition) data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus)) return self.make_request(data) def get_my_price_for_asin(self, marketplaceid, asins, condition=None): data = dict(Action='GetMyPriceForASIN', MarketplaceId=marketplaceid, ItemCondition=condition) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) class Sellers(MWS): """ Amazon MWS Sellers API """ URI = '/Sellers/2011-07-01' VERSION = '2011-07-01' NS = '{http://mws.amazonservices.com/schema/Sellers/2011-07-01}' def list_marketplace_participations(self): """ Returns a list of marketplaces a seller can participate in and a list of participations that include seller-specific information in that marketplace. The operation returns only those marketplaces where the seller's account is in an active state. """ data = dict(Action='ListMarketplaceParticipations') return self.make_request(data) def list_marketplace_participations_by_next_token(self, token): """ Takes a "NextToken" and returns the same information as "list_marketplace_participations". Based on the "NextToken". """ data = dict(Action='ListMarketplaceParticipations', NextToken=token) return self.make_request(data) #### Fulfillment APIs #### class InboundShipments(MWS): URI = "/FulfillmentInboundShipment/2010-10-01" VERSION = '2010-10-01' # To be completed class Inventory(MWS): """ Amazon MWS Inventory Fulfillment API """ URI = '/FulfillmentInventory/2010-10-01' VERSION = '2010-10-01' NS = "{http://mws.amazonaws.com/FulfillmentInventory/2010-10-01}" def list_inventory_supply(self, skus=(), datetime=None, response_group='Basic'): """ Returns information on available inventory """ data = dict(Action='ListInventorySupply', QueryStartDateTime=datetime, ResponseGroup=response_group, ) data.update(self.enumerate_param('SellerSkus.member.', skus)) return self.make_request(data, "POST") def list_inventory_supply_by_next_token(self, token): data = dict(Action='ListInventorySupplyByNextToken', NextToken=token) return self.make_request(data, "POST") class OutboundShipments(MWS): URI = "/FulfillmentOutboundShipment/2010-10-01" VERSION = "2010-10-01" # To be completed class Recommendations(MWS): """ Amazon MWS Recommendations API """ URI = '/Recommendations/2013-04-01' VERSION = '2013-04-01' NS = "{https://mws.amazonservices.com/Recommendations/2013-04-01}" def get_last_updated_time_for_recommendations(self, marketplaceid): """ Checks whether there are active recommendations for each category for the given marketplace, and if there are, returns the time when recommendations were last updated for each category. """ data = dict(Action='GetLastUpdatedTimeForRecommendations', MarketplaceId=marketplaceid) return self.make_request(data, "POST") def list_recommendations(self, marketplaceid, recommendationcategory=None): """ Returns your active recommendations for a specific category or for all categories for a specific marketplace. """ data = dict(Action="ListRecommendations", MarketplaceId=marketplaceid, RecommendationCategory=recommendationcategory) return self.make_request(data, "POST") def list_recommendations_by_next_token(self, token): """ Returns the next page of recommendations using the NextToken parameter. """ data = dict(Action="ListRecommendationsByNextToken", NextToken=token) return self.make_request(data, "POST") class Finances(MWS): """ Amazon Finances API""" URI = '/Finances/2015-05-01' VERSION = '2015-05-01' NS = "{https://mws.amazonservices.com/Finances/2015-05-01}" def list_financial_events(self , posted_after=None, posted_before=None, amazon_order_id=None, max_results='100'): data = dict(Action='ListFinancialEvents', PostedAfter=posted_after, PostedBefore=posted_before, AmazonOrderId=amazon_order_id, MaxResultsPerPage=max_results, ) return self.make_request(data)
35.451761
123
0.742073
from __future__ import unicode_literals import urllib import hashlib import hmac import base64 import six from erpnext.erpnext_integrations.doctype.amazon_mws_settings import xml_utils import re try: from xml.etree.ElementTree import ParseError as XMLError except ImportError: from xml.parsers.expat import ExpatError as XMLError from time import strftime, gmtime from requests import request from requests.exceptions import HTTPError __all__ = [ 'Feeds', 'Inventory', 'MWSError', 'Reports', 'Orders', 'Products', 'Recommendations', 'Sellers', 'Finances' ] MARKETPLACES = { "CA": "https://mws.amazonservices.ca", "US": "https://mws.amazonservices.com", "DE": "https://mws-eu.amazonservices.com", #A1PA6795UKMFR9 "ES": "https://mws-eu.amazonservices.com", #A1RKKUPIHCS9HS "FR": "https://mws-eu.amazonservices.com", #A13V1IB3VIYZZH "IN": "https://mws.amazonservices.in", #A21TJRUUN4KGV "IT": "https://mws-eu.amazonservices.com", #APJ6JRA9NG5V4 "UK": "https://mws-eu.amazonservices.com", #A1F83G8C2ARO7P "JP": "https://mws.amazonservices.jp", #A1VC38T7YXB528 "CN": "https://mws.amazonservices.com.cn", #AAHKV2X7AFYLW "AE": " https://mws.amazonservices.ae", #A2VIGQ35RCS4UG "MX": "https://mws.amazonservices.com.mx", #A1AM78C64UM0Y8 "BR": "https://mws.amazonservices.com", #A2Q3Y263D00KWC } class MWSError(Exception): # Allows quick access to the response object. # Do not rely on this attribute, always check if its not None. response = None def calc_md5(string): md = hashlib.md5() md.update(string) return base64.encodestring(md.digest()).strip('\n') if six.PY2 \ else base64.encodebytes(md.digest()).decode().strip() def remove_empty(d): for key in list(d): if not d[key]: del d[key] return d def remove_namespace(xml): xml = xml.decode('utf-8') regex = re.compile(' xmlns(:ns2)?="[^"]+"|(ns2:)|(xml:)') return regex.sub('', xml) class DictWrapper(object): def __init__(self, xml, rootkey=None): self.original = xml self._rootkey = rootkey self._mydict = xml_utils.xml2dict().fromstring(remove_namespace(xml)) self._response_dict = self._mydict.get(list(self._mydict)[0], self._mydict) @property def parsed(self): if self._rootkey: return self._response_dict.get(self._rootkey) else: return self._response_dict class DataWrapper(object): def __init__(self, data, header): self.original = data if 'content-md5' in header: hash_ = calc_md5(self.original) if header['content-md5'] != hash_: raise MWSError("Wrong Contentlength, maybe amazon error...") @property def parsed(self): return self.original class MWS(object): URI = "/" VERSION = "2009-01-01" NS = '' ACCOUNT_TYPE = "SellerId" def __init__(self, access_key, secret_key, account_id, region='US', domain='', uri="", version=""): self.access_key = access_key self.secret_key = secret_key self.account_id = account_id self.version = version or self.VERSION self.uri = uri or self.URI if domain: self.domain = domain elif region in MARKETPLACES: self.domain = MARKETPLACES[region] else: error_msg = "Incorrect region supplied ('%(region)s'). Must be one of the following: %(marketplaces)s" % { "marketplaces" : ', '.join(MARKETPLACES.keys()), "region" : region, } raise MWSError(error_msg) def make_request(self, extra_data, method="GET", **kwargs): extra_data = remove_empty(extra_data) params = { 'AWSAccessKeyId': self.access_key, self.ACCOUNT_TYPE: self.account_id, 'SignatureVersion': '2', 'Timestamp': self.get_timestamp(), 'Version': self.version, 'SignatureMethod': 'HmacSHA256', } params.update(extra_data) quote = urllib.quote if six.PY2 else urllib.parse.quote request_description = '&'.join(['%s=%s' % (k, quote(params[k], safe='-_.~')) for k in sorted(params)]) signature = self.calc_signature(method, request_description) url = '%s%s?%s&Signature=%s' % (self.domain, self.uri, request_description, quote(signature)) headers = {'User-Agent': 'python-amazon-mws/0.0.1 (Language=Python)'} headers.update(kwargs.get('extra_headers', {})) try: # Some might wonder as to why i don't pass the params dict as the params argument to request. response = request(method, url, data=kwargs.get('body', ''), headers=headers) response.raise_for_status() data = response.content try: parsed_response = DictWrapper(data, extra_data.get("Action") + "Result") except XMLError: parsed_response = DataWrapper(data, response.headers) except HTTPError as e: error = MWSError(str(e)) error.response = e.response raise error # Store the response object in the parsed_response for quick access parsed_response.response = response return parsed_response def get_service_status(self): return self.make_request(extra_data=dict(Action='GetServiceStatus')) def calc_signature(self, method, request_description): sig_data = method + '\n' + self.domain.replace('https://', '').lower() + '\n' + self.uri + '\n' + request_description sig_data = sig_data.encode('utf-8') secret_key = self.secret_key.encode('utf-8') digest = hmac.new(secret_key, sig_data, hashlib.sha256).digest() return base64.b64encode(digest).decode('utf-8') def get_timestamp(self): return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime()) def enumerate_param(self, param, values): params = {} if values is not None: if not param.endswith('.'): param = "%s." % param for num, value in enumerate(values): params['%s%d' % (param, (num + 1))] = value return params class Feeds(MWS): ACCOUNT_TYPE = "Merchant" def submit_feed(self, feed, feed_type, marketplaceids=None, content_type="text/xml", purge='false'): data = dict(Action='SubmitFeed', FeedType=feed_type, PurgeAndReplace=purge) data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids)) md = calc_md5(feed) return self.make_request(data, method="POST", body=feed, extra_headers={'Content-MD5': md, 'Content-Type': content_type}) def get_feed_submission_list(self, feedids=None, max_count=None, feedtypes=None, processingstatuses=None, fromdate=None, todate=None): data = dict(Action='GetFeedSubmissionList', MaxCount=max_count, SubmittedFromDate=fromdate, SubmittedToDate=todate,) data.update(self.enumerate_param('FeedSubmissionIdList.Id', feedids)) data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes)) data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def get_submission_list_by_next_token(self, token): data = dict(Action='GetFeedSubmissionListByNextToken', NextToken=token) return self.make_request(data) def get_feed_submission_count(self, feedtypes=None, processingstatuses=None, fromdate=None, todate=None): data = dict(Action='GetFeedSubmissionCount', SubmittedFromDate=fromdate, SubmittedToDate=todate) data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes)) data.update(self.enumerate_param('FeedProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def cancel_feed_submissions(self, feedids=None, feedtypes=None, fromdate=None, todate=None): data = dict(Action='CancelFeedSubmissions', SubmittedFromDate=fromdate, SubmittedToDate=todate) data.update(self.enumerate_param('FeedSubmissionIdList.Id.', feedids)) data.update(self.enumerate_param('FeedTypeList.Type.', feedtypes)) return self.make_request(data) def get_feed_submission_result(self, feedid): data = dict(Action='GetFeedSubmissionResult', FeedSubmissionId=feedid) return self.make_request(data) class Reports(MWS): ACCOUNT_TYPE = "Merchant" ## REPORTS ### def get_report(self, report_id): data = dict(Action='GetReport', ReportId=report_id) return self.make_request(data) def get_report_count(self, report_types=(), acknowledged=None, fromdate=None, todate=None): data = dict(Action='GetReportCount', Acknowledged=acknowledged, AvailableFromDate=fromdate, AvailableToDate=todate) data.update(self.enumerate_param('ReportTypeList.Type.', report_types)) return self.make_request(data) def get_report_list(self, requestids=(), max_count=None, types=(), acknowledged=None, fromdate=None, todate=None): data = dict(Action='GetReportList', Acknowledged=acknowledged, AvailableFromDate=fromdate, AvailableToDate=todate, MaxCount=max_count) data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids)) data.update(self.enumerate_param('ReportTypeList.Type.', types)) return self.make_request(data) def get_report_list_by_next_token(self, token): data = dict(Action='GetReportListByNextToken', NextToken=token) return self.make_request(data) def get_report_request_count(self, report_types=(), processingstatuses=(), fromdate=None, todate=None): data = dict(Action='GetReportRequestCount', RequestedFromDate=fromdate, RequestedToDate=todate) data.update(self.enumerate_param('ReportTypeList.Type.', report_types)) data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def get_report_request_list(self, requestids=(), types=(), processingstatuses=(), max_count=None, fromdate=None, todate=None): data = dict(Action='GetReportRequestList', MaxCount=max_count, RequestedFromDate=fromdate, RequestedToDate=todate) data.update(self.enumerate_param('ReportRequestIdList.Id.', requestids)) data.update(self.enumerate_param('ReportTypeList.Type.', types)) data.update(self.enumerate_param('ReportProcessingStatusList.Status.', processingstatuses)) return self.make_request(data) def get_report_request_list_by_next_token(self, token): data = dict(Action='GetReportRequestListByNextToken', NextToken=token) return self.make_request(data) def request_report(self, report_type, start_date=None, end_date=None, marketplaceids=()): data = dict(Action='RequestReport', ReportType=report_type, StartDate=start_date, EndDate=end_date) data.update(self.enumerate_param('MarketplaceIdList.Id.', marketplaceids)) return self.make_request(data) ### ReportSchedule ### def get_report_schedule_list(self, types=()): data = dict(Action='GetReportScheduleList') data.update(self.enumerate_param('ReportTypeList.Type.', types)) return self.make_request(data) def get_report_schedule_count(self, types=()): data = dict(Action='GetReportScheduleCount') data.update(self.enumerate_param('ReportTypeList.Type.', types)) return self.make_request(data) class Orders(MWS): URI = "/Orders/2013-09-01" VERSION = "2013-09-01" NS = '{https://mws.amazonservices.com/Orders/2011-01-01}' def list_orders(self, marketplaceids, created_after=None, created_before=None, lastupdatedafter=None, lastupdatedbefore=None, orderstatus=(), fulfillment_channels=(), payment_methods=(), buyer_email=None, seller_orderid=None, max_results='100'): data = dict(Action='ListOrders', CreatedAfter=created_after, CreatedBefore=created_before, LastUpdatedAfter=lastupdatedafter, LastUpdatedBefore=lastupdatedbefore, BuyerEmail=buyer_email, SellerOrderId=seller_orderid, MaxResultsPerPage=max_results, ) data.update(self.enumerate_param('OrderStatus.Status.', orderstatus)) data.update(self.enumerate_param('MarketplaceId.Id.', marketplaceids)) data.update(self.enumerate_param('FulfillmentChannel.Channel.', fulfillment_channels)) data.update(self.enumerate_param('PaymentMethod.Method.', payment_methods)) return self.make_request(data) def list_orders_by_next_token(self, token): data = dict(Action='ListOrdersByNextToken', NextToken=token) return self.make_request(data) def get_order(self, amazon_order_ids): data = dict(Action='GetOrder') data.update(self.enumerate_param('AmazonOrderId.Id.', amazon_order_ids)) return self.make_request(data) def list_order_items(self, amazon_order_id): data = dict(Action='ListOrderItems', AmazonOrderId=amazon_order_id) return self.make_request(data) def list_order_items_by_next_token(self, token): data = dict(Action='ListOrderItemsByNextToken', NextToken=token) return self.make_request(data) class Products(MWS): URI = '/Products/2011-10-01' VERSION = '2011-10-01' NS = '{http://mws.amazonservices.com/schema/Products/2011-10-01}' def list_matching_products(self, marketplaceid, query, contextid=None): data = dict(Action='ListMatchingProducts', MarketplaceId=marketplaceid, Query=query, QueryContextId=contextid) return self.make_request(data) def get_matching_product(self, marketplaceid, asins): data = dict(Action='GetMatchingProduct', MarketplaceId=marketplaceid) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) def get_matching_product_for_id(self, marketplaceid, type, id): data = dict(Action='GetMatchingProductForId', MarketplaceId=marketplaceid, IdType=type) data.update(self.enumerate_param('IdList.Id', id)) return self.make_request(data) def get_competitive_pricing_for_sku(self, marketplaceid, skus): data = dict(Action='GetCompetitivePricingForSKU', MarketplaceId=marketplaceid) data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus)) return self.make_request(data) def get_competitive_pricing_for_asin(self, marketplaceid, asins): data = dict(Action='GetCompetitivePricingForASIN', MarketplaceId=marketplaceid) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) def get_lowest_offer_listings_for_sku(self, marketplaceid, skus, condition="Any", excludeme="False"): data = dict(Action='GetLowestOfferListingsForSKU', MarketplaceId=marketplaceid, ItemCondition=condition, ExcludeMe=excludeme) data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus)) return self.make_request(data) def get_lowest_offer_listings_for_asin(self, marketplaceid, asins, condition="Any", excludeme="False"): data = dict(Action='GetLowestOfferListingsForASIN', MarketplaceId=marketplaceid, ItemCondition=condition, ExcludeMe=excludeme) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) def get_product_categories_for_sku(self, marketplaceid, sku): data = dict(Action='GetProductCategoriesForSKU', MarketplaceId=marketplaceid, SellerSKU=sku) return self.make_request(data) def get_product_categories_for_asin(self, marketplaceid, asin): data = dict(Action='GetProductCategoriesForASIN', MarketplaceId=marketplaceid, ASIN=asin) return self.make_request(data) def get_my_price_for_sku(self, marketplaceid, skus, condition=None): data = dict(Action='GetMyPriceForSKU', MarketplaceId=marketplaceid, ItemCondition=condition) data.update(self.enumerate_param('SellerSKUList.SellerSKU.', skus)) return self.make_request(data) def get_my_price_for_asin(self, marketplaceid, asins, condition=None): data = dict(Action='GetMyPriceForASIN', MarketplaceId=marketplaceid, ItemCondition=condition) data.update(self.enumerate_param('ASINList.ASIN.', asins)) return self.make_request(data) class Sellers(MWS): URI = '/Sellers/2011-07-01' VERSION = '2011-07-01' NS = '{http://mws.amazonservices.com/schema/Sellers/2011-07-01}' def list_marketplace_participations(self): data = dict(Action='ListMarketplaceParticipations') return self.make_request(data) def list_marketplace_participations_by_next_token(self, token): data = dict(Action='ListMarketplaceParticipations', NextToken=token) return self.make_request(data) #### Fulfillment APIs #### class InboundShipments(MWS): URI = "/FulfillmentInboundShipment/2010-10-01" VERSION = '2010-10-01' # To be completed class Inventory(MWS): URI = '/FulfillmentInventory/2010-10-01' VERSION = '2010-10-01' NS = "{http://mws.amazonaws.com/FulfillmentInventory/2010-10-01}" def list_inventory_supply(self, skus=(), datetime=None, response_group='Basic'): data = dict(Action='ListInventorySupply', QueryStartDateTime=datetime, ResponseGroup=response_group, ) data.update(self.enumerate_param('SellerSkus.member.', skus)) return self.make_request(data, "POST") def list_inventory_supply_by_next_token(self, token): data = dict(Action='ListInventorySupplyByNextToken', NextToken=token) return self.make_request(data, "POST") class OutboundShipments(MWS): URI = "/FulfillmentOutboundShipment/2010-10-01" VERSION = "2010-10-01" # To be completed class Recommendations(MWS): URI = '/Recommendations/2013-04-01' VERSION = '2013-04-01' NS = "{https://mws.amazonservices.com/Recommendations/2013-04-01}" def get_last_updated_time_for_recommendations(self, marketplaceid): data = dict(Action='GetLastUpdatedTimeForRecommendations', MarketplaceId=marketplaceid) return self.make_request(data, "POST") def list_recommendations(self, marketplaceid, recommendationcategory=None): data = dict(Action="ListRecommendations", MarketplaceId=marketplaceid, RecommendationCategory=recommendationcategory) return self.make_request(data, "POST") def list_recommendations_by_next_token(self, token): data = dict(Action="ListRecommendationsByNextToken", NextToken=token) return self.make_request(data, "POST") class Finances(MWS): URI = '/Finances/2015-05-01' VERSION = '2015-05-01' NS = "{https://mws.amazonservices.com/Finances/2015-05-01}" def list_financial_events(self , posted_after=None, posted_before=None, amazon_order_id=None, max_results='100'): data = dict(Action='ListFinancialEvents', PostedAfter=posted_after, PostedBefore=posted_before, AmazonOrderId=amazon_order_id, MaxResultsPerPage=max_results, ) return self.make_request(data)
true
true
f713697ec665439fe91bca9ca26c5120fda30287
13,019
py
Python
dali/test/python/test_operator_gaussian_blur.py
RudyVenguswamy/DALI
1456689cbb06a6d6f2c46c3fd231d1c296808e00
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
dali/test/python/test_operator_gaussian_blur.py
RudyVenguswamy/DALI
1456689cbb06a6d6f2c46c3fd231d1c296808e00
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
dali/test/python/test_operator_gaussian_blur.py
RudyVenguswamy/DALI
1456689cbb06a6d6f2c46c3fd231d1c296808e00
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from nvidia.dali.pipeline import Pipeline import nvidia.dali.types as types import nvidia.dali.fn as fn import numpy as np import cv2 from scipy.ndimage import convolve1d import os from nose.tools import raises from nose.plugins.attrib import attr from test_utils import get_dali_extra_path, check_batch, compare_pipelines, RandomlyShapedDataIterator, dali_type data_root = get_dali_extra_path() images_dir = os.path.join(data_root, 'db', 'single', 'jpeg') test_iters = 4 shape_layout_axes_cases = [((20, 20, 30, 3), "DHWC", 3), ((20, 20, 30), "", 3), ((20, 30, 3), "HWC", 2), ((20, 30), "HW", 2), ((3, 30, 20), "CWH", 2), ((5, 20, 30, 3), "FHWC", 2), ((5, 10, 10, 7, 3), "FDHWC", 3), ((5, 3, 20, 30), "FCHW", 2), ((3, 5, 10, 10, 7), "CFDHW", 3)] def to_batch(tl, batch_size): return [np.array(tl[i]) for i in range(batch_size)] def to_cv_sigma(sigma, axes=2): if sigma is None: return (0,) * axes elif isinstance(sigma, (int, float)): return (sigma,) * axes elif (isinstance(sigma, np.ndarray) and len(sigma.shape) == 0): return (float(sigma),) * axes elif len(sigma) == 1: return (sigma[0],) * axes return tuple(reversed(sigma)) def to_cv_win_size(window_size, axes=2, sigma=None): if window_size is None: # when using cv2.getGaussianKernel we need to always provide window size if sigma is not None: sigma = to_cv_sigma(sigma, axes) return tuple([int(3 * s + 0.5) * 2 + 1 for s in sigma]) return (0,) * axes elif isinstance(window_size, int): return (int(window_size),) * axes elif (isinstance(window_size, np.ndarray) and len(window_size.shape) == 0): return (int(window_size),) * axes elif len(window_size) == 1: return (int(window_size[0]),) * axes # OpenCV shape is the other way round: (width, height) return tuple(int(x) for x in reversed(window_size)) def gaussian_cv(image, sigma, window_size): sigma_x, sigma_y = to_cv_sigma(sigma) window_size_cv = to_cv_win_size(window_size) # compute on floats and round like a sane person (in mathematically complicit way) blurred = cv2.GaussianBlur(np.float32(image), window_size_cv, sigmaX=sigma_x, sigmaY=sigma_y) return np.uint8(blurred + 0.5) def gaussian_baseline(image, sigma, window_size, axes=2, skip_axes=0, dtype=np.uint8): sigma_xyz = to_cv_sigma(sigma, axes) win_xyz = to_cv_win_size(window_size, axes, sigma) filters = [cv2.getGaussianKernel(win_xyz[i], sigma_xyz[i]) for i in range(axes)] filters = [np.float32(f).squeeze() for f in filters] filters.reverse() for i in reversed(range(axes)): axis = i + skip_axes image = convolve1d(np.float32(image), filters[i], axis, mode="mirror") if dtype == np.float32: return image else: return dtype(image + 0.5) def get_gaussian_pipe(batch_size, sigma, window_size, op_type): pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0) with pipe: input, _ = fn.file_reader(file_root=images_dir, shard_id=0, num_shards=1) decoded = fn.image_decoder(input, device="cpu", output_type=types.RGB) if op_type == "gpu": decoded = decoded.gpu() blurred = fn.gaussian_blur(decoded, device=op_type, sigma=sigma, window_size=window_size) pipe.set_outputs(blurred, decoded) return pipe def check_gaussian_blur(batch_size, sigma, window_size, op_type="cpu"): pipe = get_gaussian_pipe(batch_size, sigma, window_size, op_type) pipe.build() for _ in range(test_iters): result, input = pipe.run() if op_type == "gpu": result = result.as_cpu() input = input.as_cpu() input = to_batch(input, batch_size) baseline_cv = [gaussian_cv(img, sigma, window_size) for img in input] check_batch(result, baseline_cv, batch_size, max_allowed_error=1, expected_layout="HWC") def test_image_gaussian_blur(): for dev in ["cpu", "gpu"]: for sigma in [1.0]: for window_size in [3, 5, None]: if sigma is None and window_size is None: continue yield check_gaussian_blur, 10, sigma, window_size, dev # OpenCv uses fixed values for small windows that are different that Gaussian funcion yield check_gaussian_blur, 10, None, 11, dev @attr('slow') def test_image_gaussian_blur_slow(): for dev in ["cpu", "gpu"]: for sigma in [1.0, [1.0, 2.0]]: for window_size in [3, 5, [7, 5], [5, 9], None]: if sigma is None and window_size is None: continue yield check_gaussian_blur, 10, sigma, window_size, dev # OpenCv uses fixed values for small windows that are different that Gaussian funcion for window_size in [15, [17, 31]]: yield check_gaussian_blur, 10, None, window_size, dev def check_gaussian_blur_cpu_gpu(batch_size, sigma, window_size): cpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "cpu") gpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "gpu") compare_pipelines(cpu_pipe, gpu_pipe, batch_size, 16, max_allowed_error=1) def test_gaussian_blur_cpu_gpu(): for window_size in [5, [7, 13]]: yield check_gaussian_blur_cpu_gpu, 10, None, window_size @attr('slow') def test_gaussian_blur_cpu_gpu_slow(): for sigma in [1.0, [1.0, 2.0], None]: for window_size in [3, 5, [7, 5], [5, 9], 11, 15, 31, None]: if sigma is None and window_size is None: continue yield check_gaussian_blur_cpu_gpu, 10, sigma, window_size def count_skip_axes(layout): if layout.startswith("FC") or layout.startswith("CF"): return 2 elif layout.startswith("F") or layout.startswith("C"): return 1 else: return 0 def check_generic_gaussian_blur( batch_size, sigma, window_size, shape, layout, axes, op_type="cpu", in_dtype=np.uint8, out_dtype=types.NO_TYPE): pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0) data = RandomlyShapedDataIterator(batch_size, max_shape=shape, dtype=in_dtype) # Extract the numpy type from DALI, we can have float32 or the same as input if out_dtype == types.NO_TYPE: result_type = in_dtype elif dali_type(in_dtype) == out_dtype: result_type = in_dtype else: result_type = np.float32 with pipe: input = fn.external_source(data, layout=layout) if op_type == "gpu": input = input.gpu() blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma, window_size=window_size, dtype=out_dtype) pipe.set_outputs(blurred, input) pipe.build() for _ in range(test_iters): result, input = pipe.run() if op_type == "gpu": result = result.as_cpu() input = input.as_cpu() input = to_batch(input, batch_size) skip_axes = count_skip_axes(layout) baseline = [ gaussian_baseline(img, sigma, window_size, axes, skip_axes, dtype=result_type) for img in input] max_error = 1 if result_type != np.float32 else 1e-04 check_batch(result, baseline, batch_size, max_allowed_error=max_error, expected_layout=layout) # Generate tests for single or per-axis sigma and window_size arguments def generate_generic_cases(dev, t_in, t_out): for shape, layout, axes in shape_layout_axes_cases: for sigma in [1.0, [1.0, 2.0, 3.0]]: for window_size in [3, 5, [7, 5, 9], [3, 5, 9], None]: if isinstance(sigma, list): sigma = sigma[0:axes] if isinstance(window_size, list): window_size = window_size[0:axes] yield check_generic_gaussian_blur, 10, sigma, window_size, shape, layout, axes, dev, t_in, t_out for window_size in [11, 15]: yield check_generic_gaussian_blur, 10, None, window_size, shape, layout, axes, dev, t_in, t_out def test_generic_gaussian_blur(): for dev in ["cpu", "gpu"]: for (t_in, t_out) in [(np.uint8, types.NO_TYPE), (np.float32, types.FLOAT), (np.uint8, types.FLOAT)]: yield from generate_generic_cases(dev, t_in, t_out) @attr('slow') def test_generic_gaussian_blur_slow(): for dev in ["cpu", "gpu"]: for t_in in [np.uint8, np.int32, np.float32]: for t_out in [types.NO_TYPE, types.FLOAT, dali_type(t_in)]: yield from generate_generic_cases(dev, t_in, t_out) def check_per_sample_gaussian_blur( batch_size, sigma_dim, window_size_dim, shape, layout, axes, op_type="cpu"): pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0) data = RandomlyShapedDataIterator(batch_size, max_shape=shape) with pipe: if sigma_dim is not None: sigma = fn.random.uniform(range=[0.5, 3], shape=[sigma_dim]) sigma_arg = sigma else: # placeholder, so we can return something sigma = fn.coin_flip(probability=0) sigma_arg = None if window_size_dim is not None: window_radius = fn.random.uniform(range=[5, 10], shape=[window_size_dim]) window_size = fn.cast(window_radius, dtype=types.INT32) * 2 + 1 window_arg = window_size else: window_size = fn.coin_flip(probability=0) window_arg = None input = fn.external_source(data, layout=layout) if op_type == "gpu": input = input.gpu() blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma_arg, window_size=window_arg) pipe.set_outputs(blurred, input, sigma, window_size) pipe.build() for _ in range(test_iters): result, input, sigma, window_size = pipe.run() if op_type == "gpu": result = result.as_cpu() input = input.as_cpu() input = to_batch(input, batch_size) sigma = to_batch(sigma, batch_size) window_size = to_batch(window_size, batch_size) baseline = [] for i in range(batch_size): sigma_arg = sigma[i] if sigma is not None else None window_arg = window_size[i] if window_size_dim is not None else None skip_axes = count_skip_axes(layout) baseline.append(gaussian_baseline(input[i], sigma_arg, window_arg, axes, skip_axes)) check_batch(result, baseline, batch_size, max_allowed_error=1, expected_layout=layout) # TODO(klecki): consider checking mixed ArgumentInput/Scalar value cases def test_per_sample_gaussian_blur(): for dev in ["cpu", "gpu"]: for shape, layout, axes in shape_layout_axes_cases: for sigma_dim in [None, 1, axes]: for window_size_dim in [None, 1, axes]: if sigma_dim is None and window_size_dim is None: continue yield check_per_sample_gaussian_blur, 10, sigma_dim, window_size_dim, shape, layout, axes, dev @raises(RuntimeError) def check_fail_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype=np.uint8, out_dtype=types.NO_TYPE): check_generic_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype, out_dtype) def test_fail_gaussian_blur(): for dev in ["cpu", "gpu"]: # Check layout and channel placement errors for shape, layout, axes in [((20, 20, 30, 3), "DHCW", 3), ((5, 20, 30, 3), "HFWC", 2), ((5, 10, 10, 10, 7, 3), "FWXYZC", 4), ((5, 3, 20, 3, 30), "FCHCW", 2), ((5, 3, 20, 3, 30), "FCCHW", 2)]: yield check_fail_gaussian_blur, 10, 1.0, 11, shape, layout, axes, dev # Negative, disallowed or both unspecified values of sigma and window size yield check_fail_gaussian_blur, 10, 0.0, 0, (100, 20, 3), "HWC", 3, dev yield check_fail_gaussian_blur, 10, -1.0, 0, (100, 20, 3), "HWC", 3, dev yield check_fail_gaussian_blur, 10, 0.0, -11, (100, 20, 3), "HWC", 3, dev yield check_fail_gaussian_blur, 10, 0.0, 2, (100, 20, 3), "HWC", 3, dev
42.132686
135
0.637837
from nvidia.dali.pipeline import Pipeline import nvidia.dali.types as types import nvidia.dali.fn as fn import numpy as np import cv2 from scipy.ndimage import convolve1d import os from nose.tools import raises from nose.plugins.attrib import attr from test_utils import get_dali_extra_path, check_batch, compare_pipelines, RandomlyShapedDataIterator, dali_type data_root = get_dali_extra_path() images_dir = os.path.join(data_root, 'db', 'single', 'jpeg') test_iters = 4 shape_layout_axes_cases = [((20, 20, 30, 3), "DHWC", 3), ((20, 20, 30), "", 3), ((20, 30, 3), "HWC", 2), ((20, 30), "HW", 2), ((3, 30, 20), "CWH", 2), ((5, 20, 30, 3), "FHWC", 2), ((5, 10, 10, 7, 3), "FDHWC", 3), ((5, 3, 20, 30), "FCHW", 2), ((3, 5, 10, 10, 7), "CFDHW", 3)] def to_batch(tl, batch_size): return [np.array(tl[i]) for i in range(batch_size)] def to_cv_sigma(sigma, axes=2): if sigma is None: return (0,) * axes elif isinstance(sigma, (int, float)): return (sigma,) * axes elif (isinstance(sigma, np.ndarray) and len(sigma.shape) == 0): return (float(sigma),) * axes elif len(sigma) == 1: return (sigma[0],) * axes return tuple(reversed(sigma)) def to_cv_win_size(window_size, axes=2, sigma=None): if window_size is None: if sigma is not None: sigma = to_cv_sigma(sigma, axes) return tuple([int(3 * s + 0.5) * 2 + 1 for s in sigma]) return (0,) * axes elif isinstance(window_size, int): return (int(window_size),) * axes elif (isinstance(window_size, np.ndarray) and len(window_size.shape) == 0): return (int(window_size),) * axes elif len(window_size) == 1: return (int(window_size[0]),) * axes return tuple(int(x) for x in reversed(window_size)) def gaussian_cv(image, sigma, window_size): sigma_x, sigma_y = to_cv_sigma(sigma) window_size_cv = to_cv_win_size(window_size) blurred = cv2.GaussianBlur(np.float32(image), window_size_cv, sigmaX=sigma_x, sigmaY=sigma_y) return np.uint8(blurred + 0.5) def gaussian_baseline(image, sigma, window_size, axes=2, skip_axes=0, dtype=np.uint8): sigma_xyz = to_cv_sigma(sigma, axes) win_xyz = to_cv_win_size(window_size, axes, sigma) filters = [cv2.getGaussianKernel(win_xyz[i], sigma_xyz[i]) for i in range(axes)] filters = [np.float32(f).squeeze() for f in filters] filters.reverse() for i in reversed(range(axes)): axis = i + skip_axes image = convolve1d(np.float32(image), filters[i], axis, mode="mirror") if dtype == np.float32: return image else: return dtype(image + 0.5) def get_gaussian_pipe(batch_size, sigma, window_size, op_type): pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0) with pipe: input, _ = fn.file_reader(file_root=images_dir, shard_id=0, num_shards=1) decoded = fn.image_decoder(input, device="cpu", output_type=types.RGB) if op_type == "gpu": decoded = decoded.gpu() blurred = fn.gaussian_blur(decoded, device=op_type, sigma=sigma, window_size=window_size) pipe.set_outputs(blurred, decoded) return pipe def check_gaussian_blur(batch_size, sigma, window_size, op_type="cpu"): pipe = get_gaussian_pipe(batch_size, sigma, window_size, op_type) pipe.build() for _ in range(test_iters): result, input = pipe.run() if op_type == "gpu": result = result.as_cpu() input = input.as_cpu() input = to_batch(input, batch_size) baseline_cv = [gaussian_cv(img, sigma, window_size) for img in input] check_batch(result, baseline_cv, batch_size, max_allowed_error=1, expected_layout="HWC") def test_image_gaussian_blur(): for dev in ["cpu", "gpu"]: for sigma in [1.0]: for window_size in [3, 5, None]: if sigma is None and window_size is None: continue yield check_gaussian_blur, 10, sigma, window_size, dev yield check_gaussian_blur, 10, None, 11, dev @attr('slow') def test_image_gaussian_blur_slow(): for dev in ["cpu", "gpu"]: for sigma in [1.0, [1.0, 2.0]]: for window_size in [3, 5, [7, 5], [5, 9], None]: if sigma is None and window_size is None: continue yield check_gaussian_blur, 10, sigma, window_size, dev for window_size in [15, [17, 31]]: yield check_gaussian_blur, 10, None, window_size, dev def check_gaussian_blur_cpu_gpu(batch_size, sigma, window_size): cpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "cpu") gpu_pipe = get_gaussian_pipe(batch_size, sigma, window_size, "gpu") compare_pipelines(cpu_pipe, gpu_pipe, batch_size, 16, max_allowed_error=1) def test_gaussian_blur_cpu_gpu(): for window_size in [5, [7, 13]]: yield check_gaussian_blur_cpu_gpu, 10, None, window_size @attr('slow') def test_gaussian_blur_cpu_gpu_slow(): for sigma in [1.0, [1.0, 2.0], None]: for window_size in [3, 5, [7, 5], [5, 9], 11, 15, 31, None]: if sigma is None and window_size is None: continue yield check_gaussian_blur_cpu_gpu, 10, sigma, window_size def count_skip_axes(layout): if layout.startswith("FC") or layout.startswith("CF"): return 2 elif layout.startswith("F") or layout.startswith("C"): return 1 else: return 0 def check_generic_gaussian_blur( batch_size, sigma, window_size, shape, layout, axes, op_type="cpu", in_dtype=np.uint8, out_dtype=types.NO_TYPE): pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0) data = RandomlyShapedDataIterator(batch_size, max_shape=shape, dtype=in_dtype) if out_dtype == types.NO_TYPE: result_type = in_dtype elif dali_type(in_dtype) == out_dtype: result_type = in_dtype else: result_type = np.float32 with pipe: input = fn.external_source(data, layout=layout) if op_type == "gpu": input = input.gpu() blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma, window_size=window_size, dtype=out_dtype) pipe.set_outputs(blurred, input) pipe.build() for _ in range(test_iters): result, input = pipe.run() if op_type == "gpu": result = result.as_cpu() input = input.as_cpu() input = to_batch(input, batch_size) skip_axes = count_skip_axes(layout) baseline = [ gaussian_baseline(img, sigma, window_size, axes, skip_axes, dtype=result_type) for img in input] max_error = 1 if result_type != np.float32 else 1e-04 check_batch(result, baseline, batch_size, max_allowed_error=max_error, expected_layout=layout) def generate_generic_cases(dev, t_in, t_out): for shape, layout, axes in shape_layout_axes_cases: for sigma in [1.0, [1.0, 2.0, 3.0]]: for window_size in [3, 5, [7, 5, 9], [3, 5, 9], None]: if isinstance(sigma, list): sigma = sigma[0:axes] if isinstance(window_size, list): window_size = window_size[0:axes] yield check_generic_gaussian_blur, 10, sigma, window_size, shape, layout, axes, dev, t_in, t_out for window_size in [11, 15]: yield check_generic_gaussian_blur, 10, None, window_size, shape, layout, axes, dev, t_in, t_out def test_generic_gaussian_blur(): for dev in ["cpu", "gpu"]: for (t_in, t_out) in [(np.uint8, types.NO_TYPE), (np.float32, types.FLOAT), (np.uint8, types.FLOAT)]: yield from generate_generic_cases(dev, t_in, t_out) @attr('slow') def test_generic_gaussian_blur_slow(): for dev in ["cpu", "gpu"]: for t_in in [np.uint8, np.int32, np.float32]: for t_out in [types.NO_TYPE, types.FLOAT, dali_type(t_in)]: yield from generate_generic_cases(dev, t_in, t_out) def check_per_sample_gaussian_blur( batch_size, sigma_dim, window_size_dim, shape, layout, axes, op_type="cpu"): pipe = Pipeline(batch_size=batch_size, num_threads=4, device_id=0) data = RandomlyShapedDataIterator(batch_size, max_shape=shape) with pipe: if sigma_dim is not None: sigma = fn.random.uniform(range=[0.5, 3], shape=[sigma_dim]) sigma_arg = sigma else: sigma = fn.coin_flip(probability=0) sigma_arg = None if window_size_dim is not None: window_radius = fn.random.uniform(range=[5, 10], shape=[window_size_dim]) window_size = fn.cast(window_radius, dtype=types.INT32) * 2 + 1 window_arg = window_size else: window_size = fn.coin_flip(probability=0) window_arg = None input = fn.external_source(data, layout=layout) if op_type == "gpu": input = input.gpu() blurred = fn.gaussian_blur(input, device=op_type, sigma=sigma_arg, window_size=window_arg) pipe.set_outputs(blurred, input, sigma, window_size) pipe.build() for _ in range(test_iters): result, input, sigma, window_size = pipe.run() if op_type == "gpu": result = result.as_cpu() input = input.as_cpu() input = to_batch(input, batch_size) sigma = to_batch(sigma, batch_size) window_size = to_batch(window_size, batch_size) baseline = [] for i in range(batch_size): sigma_arg = sigma[i] if sigma is not None else None window_arg = window_size[i] if window_size_dim is not None else None skip_axes = count_skip_axes(layout) baseline.append(gaussian_baseline(input[i], sigma_arg, window_arg, axes, skip_axes)) check_batch(result, baseline, batch_size, max_allowed_error=1, expected_layout=layout) def test_per_sample_gaussian_blur(): for dev in ["cpu", "gpu"]: for shape, layout, axes in shape_layout_axes_cases: for sigma_dim in [None, 1, axes]: for window_size_dim in [None, 1, axes]: if sigma_dim is None and window_size_dim is None: continue yield check_per_sample_gaussian_blur, 10, sigma_dim, window_size_dim, shape, layout, axes, dev @raises(RuntimeError) def check_fail_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype=np.uint8, out_dtype=types.NO_TYPE): check_generic_gaussian_blur(batch_size, sigma, window_size, shape, layout, axes, op_type, in_dtype, out_dtype) def test_fail_gaussian_blur(): for dev in ["cpu", "gpu"]: for shape, layout, axes in [((20, 20, 30, 3), "DHCW", 3), ((5, 20, 30, 3), "HFWC", 2), ((5, 10, 10, 10, 7, 3), "FWXYZC", 4), ((5, 3, 20, 3, 30), "FCHCW", 2), ((5, 3, 20, 3, 30), "FCCHW", 2)]: yield check_fail_gaussian_blur, 10, 1.0, 11, shape, layout, axes, dev yield check_fail_gaussian_blur, 10, 0.0, 0, (100, 20, 3), "HWC", 3, dev yield check_fail_gaussian_blur, 10, -1.0, 0, (100, 20, 3), "HWC", 3, dev yield check_fail_gaussian_blur, 10, 0.0, -11, (100, 20, 3), "HWC", 3, dev yield check_fail_gaussian_blur, 10, 0.0, 2, (100, 20, 3), "HWC", 3, dev
true
true
f71369f35fdbde0279d19ddb91e01971bbafc6cb
73,791
py
Python
python/pyarrow/tests/test_convert_pandas.py
stephenpascoe/arrow
3efd08f0cbaa40d0d3a329b8613fb80ac022b985
[ "Apache-2.0" ]
null
null
null
python/pyarrow/tests/test_convert_pandas.py
stephenpascoe/arrow
3efd08f0cbaa40d0d3a329b8613fb80ac022b985
[ "Apache-2.0" ]
null
null
null
python/pyarrow/tests/test_convert_pandas.py
stephenpascoe/arrow
3efd08f0cbaa40d0d3a329b8613fb80ac022b985
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import decimal import json from collections import OrderedDict from datetime import date, datetime, time, timedelta import numpy as np import numpy.testing as npt import pandas as pd import pandas.util.testing as tm import pytest import pyarrow as pa import pyarrow.types as patypes from pyarrow.compat import PY2 from .pandas_examples import dataframe_with_arrays, dataframe_with_lists def _alltypes_example(size=100): return pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, # TODO(wesm): Pandas only support ns resolution, Arrow supports s, ms, # us, ns 'datetime': np.arange("2016-01-01T00:00:00.001", size, dtype='datetime64[ms]'), 'str': [str(x) for x in range(size)], 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], 'empty_str': [''] * size }) def _check_pandas_roundtrip(df, expected=None, use_threads=False, expected_schema=None, check_dtype=True, schema=None, preserve_index=False, as_batch=False): klass = pa.RecordBatch if as_batch else pa.Table table = klass.from_pandas(df, schema=schema, preserve_index=preserve_index, nthreads=2 if use_threads else 1) result = table.to_pandas(use_threads=use_threads) if expected_schema: # all occurences of _check_pandas_roundtrip passes expected_schema # without the pandas generated key-value metadata, so we need to # add it before checking schema equality expected_schema = expected_schema.add_metadata(table.schema.metadata) assert table.schema.equals(expected_schema) if expected is None: expected = df tm.assert_frame_equal(result, expected, check_dtype=check_dtype, check_index_type=('equiv' if preserve_index else False)) def _check_series_roundtrip(s, type_=None, expected_pa_type=None): arr = pa.array(s, from_pandas=True, type=type_) if type_ is not None and expected_pa_type is None: expected_pa_type = type_ if expected_pa_type is not None: assert arr.type == expected_pa_type result = pd.Series(arr.to_pandas(), name=s.name) if patypes.is_timestamp(arr.type) and arr.type.tz is not None: result = (result.dt.tz_localize('utc') .dt.tz_convert(arr.type.tz)) tm.assert_series_equal(s, result) def _check_array_roundtrip(values, expected=None, mask=None, type=None): arr = pa.array(values, from_pandas=True, mask=mask, type=type) result = arr.to_pandas() values_nulls = pd.isnull(values) if mask is None: assert arr.null_count == values_nulls.sum() else: assert arr.null_count == (mask | values_nulls).sum() if mask is None: tm.assert_series_equal(pd.Series(result), pd.Series(values), check_names=False) else: expected = pd.Series(np.ma.masked_array(values, mask=mask)) tm.assert_series_equal(pd.Series(result), expected, check_names=False) def _check_array_from_pandas_roundtrip(np_array): arr = pa.array(np_array, from_pandas=True) result = arr.to_pandas() npt.assert_array_equal(result, np_array) class TestConvertMetadata(object): """ Conversion tests for Pandas metadata & indices. """ def test_non_string_columns(self): df = pd.DataFrame({0: [1, 2, 3]}) table = pa.Table.from_pandas(df) assert table.column(0).name == '0' def test_from_pandas_with_columns(self): df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]}) table = pa.Table.from_pandas(df, columns=[0, 1]) expected = pa.Table.from_pandas(df[[0, 1]]) assert expected.equals(table) record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1]) record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]]) assert record_batch_expected.equals(record_batch_table) def test_column_index_names_are_preserved(self): df = pd.DataFrame({'data': [1, 2, 3]}) df.columns.names = ['a'] _check_pandas_roundtrip(df, preserve_index=True) def test_multiindex_columns(self): columns = pd.MultiIndex.from_arrays([ ['one', 'two'], ['X', 'Y'] ]) df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) _check_pandas_roundtrip(df, preserve_index=True) def test_multiindex_columns_with_dtypes(self): columns = pd.MultiIndex.from_arrays( [ ['one', 'two'], pd.DatetimeIndex(['2017-08-01', '2017-08-02']), ], names=['level_1', 'level_2'], ) df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) _check_pandas_roundtrip(df, preserve_index=True) def test_multiindex_columns_unicode(self): columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']]) df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) _check_pandas_roundtrip(df, preserve_index=True) def test_integer_index_column(self): df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')]) _check_pandas_roundtrip(df, preserve_index=True) def test_index_metadata_field_name(self): # test None case, and strangely named non-index columns df = pd.DataFrame( [(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)], index=pd.MultiIndex.from_arrays( [['c', 'b', 'a'], [3, 2, 1]], names=[None, 'foo'] ), columns=['a', None, '__index_level_0__'], ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) col1, col2, col3, idx0, foo = js['columns'] assert col1['name'] == 'a' assert col1['name'] == col1['field_name'] assert col2['name'] is None assert col2['field_name'] == 'None' assert col3['name'] == '__index_level_0__' assert col3['name'] == col3['field_name'] idx0_name, foo_name = js['index_columns'] assert idx0_name == '__index_level_0__' assert idx0['field_name'] == idx0_name assert idx0['name'] is None assert foo_name == 'foo' assert foo['field_name'] == foo_name assert foo['name'] == foo_name def test_categorical_column_index(self): df = pd.DataFrame( [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], columns=pd.Index(list('def'), dtype='category') ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) column_indexes, = js['column_indexes'] assert column_indexes['name'] is None assert column_indexes['pandas_type'] == 'categorical' assert column_indexes['numpy_type'] == 'int8' md = column_indexes['metadata'] assert md['num_categories'] == 3 assert md['ordered'] is False def test_string_column_index(self): df = pd.DataFrame( [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], columns=pd.Index(list('def'), name='stringz') ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) column_indexes, = js['column_indexes'] assert column_indexes['name'] == 'stringz' assert column_indexes['name'] == column_indexes['field_name'] assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode') assert column_indexes['numpy_type'] == 'object' md = column_indexes['metadata'] if not PY2: assert len(md) == 1 assert md['encoding'] == 'UTF-8' else: assert md is None or 'encoding' not in md def test_datetimetz_column_index(self): df = pd.DataFrame( [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], columns=pd.date_range( start='2017-01-01', periods=3, tz='America/New_York' ) ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) column_indexes, = js['column_indexes'] assert column_indexes['name'] is None assert column_indexes['pandas_type'] == 'datetimetz' assert column_indexes['numpy_type'] == 'datetime64[ns]' md = column_indexes['metadata'] assert md['timezone'] == 'America/New_York' def test_datetimetz_row_index(self): df = pd.DataFrame({ 'a': pd.date_range( start='2017-01-01', periods=3, tz='America/New_York' ) }) df = df.set_index('a') _check_pandas_roundtrip(df, preserve_index=True) def test_categorical_row_index(self): df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}) df['a'] = df.a.astype('category') df = df.set_index('a') _check_pandas_roundtrip(df, preserve_index=True) def test_duplicate_column_names_does_not_crash(self): df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa')) with pytest.raises(ValueError): pa.Table.from_pandas(df) def test_dictionary_indices_boundscheck(self): # ARROW-1658. No validation of indices leads to segfaults in pandas indices = [[0, 1], [0, -1]] for inds in indices: arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False) batch = pa.RecordBatch.from_arrays([arr], ['foo']) table = pa.Table.from_batches([batch, batch, batch]) with pytest.raises(pa.ArrowInvalid): arr.to_pandas() with pytest.raises(pa.ArrowInvalid): table.to_pandas() def test_unicode_with_unicode_column_and_index(self): df = pd.DataFrame({u'あ': [u'い']}, index=[u'う']) _check_pandas_roundtrip(df, preserve_index=True) def test_mixed_unicode_column_names(self): df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う']) # TODO(phillipc): Should this raise? with pytest.raises(AssertionError): _check_pandas_roundtrip(df, preserve_index=True) def test_binary_column_name(self): column_data = [u'い'] key = u'あ'.encode('utf8') data = {key: column_data} df = pd.DataFrame(data) # we can't use _check_pandas_roundtrip here because our metdata # is always decoded as utf8: even if binary goes in, utf8 comes out t = pa.Table.from_pandas(df, preserve_index=True) df2 = t.to_pandas() assert df.values[0] == df2.values[0] assert df.index.values[0] == df2.index.values[0] assert df.columns[0] == key def test_multiindex_duplicate_values(self): num_rows = 3 numbers = list(range(num_rows)) index = pd.MultiIndex.from_arrays( [['foo', 'foo', 'bar'], numbers], names=['foobar', 'some_numbers'], ) df = pd.DataFrame({'numbers': numbers}, index=index) table = pa.Table.from_pandas(df) result_df = table.to_pandas() tm.assert_frame_equal(result_df, df) def test_metadata_with_mixed_types(self): df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']}) table = pa.Table.from_pandas(df) metadata = table.schema.metadata assert b'mixed' not in metadata[b'pandas'] js = json.loads(metadata[b'pandas'].decode('utf8')) data_column = js['columns'][0] assert data_column['pandas_type'] == 'bytes' assert data_column['numpy_type'] == 'object' def test_list_metadata(self): df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]}) schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))]) table = pa.Table.from_pandas(df, schema=schema) metadata = table.schema.metadata assert b'mixed' not in metadata[b'pandas'] js = json.loads(metadata[b'pandas'].decode('utf8')) data_column = js['columns'][0] assert data_column['pandas_type'] == 'list[int64]' assert data_column['numpy_type'] == 'object' def test_decimal_metadata(self): expected = pd.DataFrame({ 'decimals': [ decimal.Decimal('394092382910493.12341234678'), -decimal.Decimal('314292388910493.12343437128'), ] }) table = pa.Table.from_pandas(expected) metadata = table.schema.metadata assert b'mixed' not in metadata[b'pandas'] js = json.loads(metadata[b'pandas'].decode('utf8')) data_column = js['columns'][0] assert data_column['pandas_type'] == 'decimal' assert data_column['numpy_type'] == 'object' assert data_column['metadata'] == {'precision': 26, 'scale': 11} def test_table_column_subset_metadata(self): # ARROW-1883 df = pd.DataFrame({ 'a': [1, 2, 3], 'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')}) table = pa.Table.from_pandas(df) table_subset = table.remove_column(1) result = table_subset.to_pandas() tm.assert_frame_equal(result, df[['a']]) table_subset2 = table_subset.remove_column(1) result = table_subset2.to_pandas() tm.assert_frame_equal(result, df[['a']]) # non-default index for index in [ pd.Index(['a', 'b', 'c'], name='index'), pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]: df = pd.DataFrame({'a': [1, 2, 3], 'b': [.1, .2, .3]}, index=index) table = pa.Table.from_pandas(df) table_subset = table.remove_column(1) result = table_subset.to_pandas() tm.assert_frame_equal(result, df[['a']]) table_subset2 = table_subset.remove_column(1) result = table_subset2.to_pandas() tm.assert_frame_equal(result, df[['a']].reset_index(drop=True)) def test_empty_list_metadata(self): # Create table with array of empty lists, forced to have type # list(string) in pyarrow c1 = [["test"], ["a", "b"], None] c2 = [[], [], []] arrays = OrderedDict([ ('c1', pa.array(c1, type=pa.list_(pa.string()))), ('c2', pa.array(c2, type=pa.list_(pa.string()))), ]) rb = pa.RecordBatch.from_arrays( list(arrays.values()), list(arrays.keys()) ) tbl = pa.Table.from_batches([rb]) # First roundtrip changes schema, because pandas cannot preserve the # type of empty lists df = tbl.to_pandas() tbl2 = pa.Table.from_pandas(df, preserve_index=True) md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8')) # Second roundtrip df2 = tbl2.to_pandas() expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)])) tm.assert_frame_equal(df2, expected) assert md2['columns'] == [ { 'name': 'c1', 'field_name': 'c1', 'metadata': None, 'numpy_type': 'object', 'pandas_type': 'list[unicode]', }, { 'name': 'c2', 'field_name': 'c2', 'metadata': None, 'numpy_type': 'object', 'pandas_type': 'list[empty]', }, { 'name': None, 'field_name': '__index_level_0__', 'metadata': None, 'numpy_type': 'int64', 'pandas_type': 'int64', } ] class TestConvertPrimitiveTypes(object): """ Conversion tests for primitive (e.g. numeric) types. """ def test_float_no_nulls(self): data = {} fields = [] dtypes = [('f2', pa.float16()), ('f4', pa.float32()), ('f8', pa.float64())] num_values = 100 for numpy_dtype, arrow_dtype in dtypes: values = np.random.randn(num_values) data[numpy_dtype] = values.astype(numpy_dtype) fields.append(pa.field(numpy_dtype, arrow_dtype)) df = pd.DataFrame(data) schema = pa.schema(fields) _check_pandas_roundtrip(df, expected_schema=schema) def test_float_nulls(self): num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 dtypes = [('f2', pa.float16()), ('f4', pa.float32()), ('f8', pa.float64())] names = ['f2', 'f4', 'f8'] expected_cols = [] arrays = [] fields = [] for name, arrow_dtype in dtypes: values = np.random.randn(num_values).astype(name) arr = pa.array(values, from_pandas=True, mask=null_mask) arrays.append(arr) fields.append(pa.field(name, arrow_dtype)) values[null_mask] = np.nan expected_cols.append(values) ex_frame = pd.DataFrame(dict(zip(names, expected_cols)), columns=names) table = pa.Table.from_arrays(arrays, names) assert table.schema.equals(pa.schema(fields)) result = table.to_pandas() tm.assert_frame_equal(result, ex_frame) def test_float_nulls_to_ints(self): # ARROW-2135 df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]}) schema = pa.schema([pa.field("a", pa.int16(), nullable=True)]) table = pa.Table.from_pandas(df, schema=schema) assert table[0].to_pylist() == [1, 2, None] tm.assert_frame_equal(df, table.to_pandas()) def test_integer_no_nulls(self): data = OrderedDict() fields = [] numpy_dtypes = [ ('i1', pa.int8()), ('i2', pa.int16()), ('i4', pa.int32()), ('i8', pa.int64()), ('u1', pa.uint8()), ('u2', pa.uint16()), ('u4', pa.uint32()), ('u8', pa.uint64()), ('longlong', pa.int64()), ('ulonglong', pa.uint64()) ] num_values = 100 for dtype, arrow_dtype in numpy_dtypes: info = np.iinfo(dtype) values = np.random.randint(max(info.min, np.iinfo(np.int_).min), min(info.max, np.iinfo(np.int_).max), size=num_values) data[dtype] = values.astype(dtype) fields.append(pa.field(dtype, arrow_dtype)) df = pd.DataFrame(data) schema = pa.schema(fields) _check_pandas_roundtrip(df, expected_schema=schema) def test_all_integer_types(self): # Test all Numpy integer aliases data = OrderedDict() numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc', 'int_', 'uint', 'longlong', 'ulonglong'] for dtype in numpy_dtypes: data[dtype] = np.arange(12, dtype=dtype) df = pd.DataFrame(data) _check_pandas_roundtrip(df) def test_integer_with_nulls(self): # pandas requires upcast to float dtype int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'] num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 expected_cols = [] arrays = [] for name in int_dtypes: values = np.random.randint(0, 100, size=num_values) arr = pa.array(values, mask=null_mask) arrays.append(arr) expected = values.astype('f8') expected[null_mask] = np.nan expected_cols.append(expected) ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)), columns=int_dtypes) table = pa.Table.from_arrays(arrays, int_dtypes) result = table.to_pandas() tm.assert_frame_equal(result, ex_frame) def test_array_from_pandas_type_cast(self): arr = np.arange(10, dtype='int64') target_type = pa.int8() result = pa.array(arr, type=target_type) expected = pa.array(arr.astype('int8')) assert result.equals(expected) def test_boolean_no_nulls(self): num_values = 100 np.random.seed(0) df = pd.DataFrame({'bools': np.random.randn(num_values) > 0}) field = pa.field('bools', pa.bool_()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected_schema=schema) def test_boolean_nulls(self): # pandas requires upcast to object dtype num_values = 100 np.random.seed(0) mask = np.random.randint(0, 10, size=num_values) < 3 values = np.random.randint(0, 10, size=num_values) < 5 arr = pa.array(values, mask=mask) expected = values.astype(object) expected[mask] = None field = pa.field('bools', pa.bool_()) schema = pa.schema([field]) ex_frame = pd.DataFrame({'bools': expected}) table = pa.Table.from_arrays([arr], ['bools']) assert table.schema.equals(schema) result = table.to_pandas() tm.assert_frame_equal(result, ex_frame) def test_float_object_nulls(self): arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object) df = pd.DataFrame({'floats': arr}) expected = pd.DataFrame({'floats': pd.to_numeric(arr)}) field = pa.field('floats', pa.float64()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected=expected, expected_schema=schema) def test_int_object_nulls(self): arr = np.array([None, 1, np.int64(3)] * 5, dtype=object) df = pd.DataFrame({'ints': arr}) expected = pd.DataFrame({'ints': pd.to_numeric(arr)}) field = pa.field('ints', pa.int64()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected=expected, expected_schema=schema) def test_boolean_object_nulls(self): arr = np.array([False, None, True] * 100, dtype=object) df = pd.DataFrame({'bools': arr}) field = pa.field('bools', pa.bool_()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected_schema=schema) def test_all_nulls_cast_numeric(self): arr = np.array([None], dtype=object) def _check_type(t): a2 = pa.array(arr, type=t) assert a2.type == t assert a2[0].as_py() is None _check_type(pa.int32()) _check_type(pa.float64()) def test_half_floats_from_numpy(self): arr = np.array([1.5, np.nan], dtype=np.float16) a = pa.array(arr, type=pa.float16()) x, y = a.to_pylist() assert isinstance(x, np.float16) assert x == 1.5 assert isinstance(y, np.float16) assert np.isnan(y) a = pa.array(arr, type=pa.float16(), from_pandas=True) x, y = a.to_pylist() assert isinstance(x, np.float16) assert x == 1.5 assert y is None @pytest.mark.parametrize('dtype', ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']) def test_array_integer_object_nulls_option(dtype): num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 values = np.random.randint(0, 100, size=num_values, dtype=dtype) array = pa.array(values, mask=null_mask) if null_mask.any(): expected = values.astype('O') expected[null_mask] = None else: expected = values result = array.to_pandas(integer_object_nulls=True) np.testing.assert_equal(result, expected) @pytest.mark.parametrize('dtype', ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']) def test_table_integer_object_nulls_option(dtype): num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 values = np.random.randint(0, 100, size=num_values, dtype=dtype) array = pa.array(values, mask=null_mask) if null_mask.any(): expected = values.astype('O') expected[null_mask] = None else: expected = values expected = pd.DataFrame({dtype: expected}) table = pa.Table.from_arrays([array], [dtype]) result = table.to_pandas(integer_object_nulls=True) tm.assert_frame_equal(result, expected) class TestConvertDateTimeLikeTypes(object): """ Conversion tests for datetime- and timestamp-like types (date64, etc.). """ def test_timestamps_notimezone_no_nulls(self): df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123456789', '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') }) field = pa.field('datetime64', pa.timestamp('ns')) schema = pa.schema([field]) _check_pandas_roundtrip( df, expected_schema=schema, ) def test_timestamps_notimezone_nulls(self): df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123456789', None, '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') }) field = pa.field('datetime64', pa.timestamp('ns')) schema = pa.schema([field]) _check_pandas_roundtrip( df, expected_schema=schema, ) def test_timestamps_with_timezone(self): df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123', '2006-01-13T12:34:56.432', '2010-08-13T05:46:57.437'], dtype='datetime64[ms]') }) df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern') .to_frame()) _check_pandas_roundtrip(df) _check_series_roundtrip(df['datetime64']) # drop-in a null and ns instead of ms df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123456789', None, '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') }) df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern') .to_frame()) _check_pandas_roundtrip(df) def test_python_datetime(self): # ARROW-2106 date_array = [datetime.today() + timedelta(days=x) for x in range(10)] df = pd.DataFrame({ 'datetime': pd.Series(date_array, dtype=object) }) table = pa.Table.from_pandas(df) assert isinstance(table[0].data.chunk(0), pa.TimestampArray) result = table.to_pandas() expected_df = pd.DataFrame({ 'datetime': date_array }) tm.assert_frame_equal(expected_df, result) def test_python_datetime_subclass(self): class MyDatetime(datetime): # see https://github.com/pandas-dev/pandas/issues/21142 nanosecond = 0.0 date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)] df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)}) table = pa.Table.from_pandas(df) assert isinstance(table[0].data.chunk(0), pa.TimestampArray) result = table.to_pandas() expected_df = pd.DataFrame({"datetime": date_array}) # https://github.com/pandas-dev/pandas/issues/21142 expected_df["datetime"] = pd.to_datetime(expected_df["datetime"]) tm.assert_frame_equal(expected_df, result) def test_python_date_subclass(self): class MyDate(date): pass date_array = [MyDate(2000, 1, 1)] df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)}) table = pa.Table.from_pandas(df) assert isinstance(table[0].data.chunk(0), pa.Date32Array) result = table.to_pandas() expected_df = pd.DataFrame( {"date": np.array(["2000-01-01"], dtype="datetime64[ns]")} ) tm.assert_frame_equal(expected_df, result) def test_datetime64_to_date32(self): # ARROW-1718 arr = pa.array([date(2017, 10, 23), None]) c = pa.Column.from_array("d", arr) s = c.to_pandas() arr2 = pa.Array.from_pandas(s, type=pa.date32()) assert arr2.equals(arr.cast('date32')) @pytest.mark.parametrize('mask', [ None, np.ones(3), np.array([True, False, False]), ]) def test_pandas_datetime_to_date64(self, mask): s = pd.to_datetime([ '2018-05-10T00:00:00', '2018-05-11T00:00:00', '2018-05-12T00:00:00', ]) arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask) data = np.array([ date(2018, 5, 10), date(2018, 5, 11), date(2018, 5, 12) ]) expected = pa.array(data, mask=mask, type=pa.date64()) assert arr.equals(expected) @pytest.mark.parametrize('mask', [ None, np.ones(3), np.array([True, False, False]) ]) def test_pandas_datetime_to_date64_failures(self, mask): s = pd.to_datetime([ '2018-05-10T10:24:01', '2018-05-11T10:24:01', '2018-05-12T10:24:01', ]) expected_msg = 'Timestamp value had non-zero intraday milliseconds' with pytest.raises(pa.ArrowInvalid, match=expected_msg): pa.Array.from_pandas(s, type=pa.date64(), mask=mask) def test_date_infer(self): df = pd.DataFrame({ 'date': [date(2000, 1, 1), None, date(1970, 1, 1), date(2040, 2, 26)]}) table = pa.Table.from_pandas(df, preserve_index=False) field = pa.field('date', pa.date32()) # schema's metadata is generated by from_pandas conversion expected_schema = pa.schema([field], metadata=table.schema.metadata) assert table.schema.equals(expected_schema) result = table.to_pandas() expected = df.copy() expected['date'] = pd.to_datetime(df['date']) tm.assert_frame_equal(result, expected) def test_date_mask(self): arr = np.array([date(2017, 4, 3), date(2017, 4, 4)], dtype='datetime64[D]') mask = [True, False] result = pa.array(arr, mask=np.array(mask)) expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]') expected = pa.array(expected, from_pandas=True) assert expected.equals(result) def test_date_objects_typed(self): arr = np.array([ date(2017, 4, 3), None, date(2017, 4, 4), date(2017, 4, 5)], dtype=object) arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32') arr_i8 = arr_i4.astype('int64') * 86400000 mask = np.array([False, True, False, False]) t32 = pa.date32() t64 = pa.date64() a32 = pa.array(arr, type=t32) a64 = pa.array(arr, type=t64) a32_expected = pa.array(arr_i4, mask=mask, type=t32) a64_expected = pa.array(arr_i8, mask=mask, type=t64) assert a32.equals(a32_expected) assert a64.equals(a64_expected) # Test converting back to pandas colnames = ['date32', 'date64'] table = pa.Table.from_arrays([a32, a64], colnames) table_pandas = table.to_pandas() ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04', '2017-04-05'], dtype='datetime64[D]') .astype('datetime64[ns]')) ex_values[1] = pd.NaT.value expected_pandas = pd.DataFrame({'date32': ex_values, 'date64': ex_values}, columns=colnames) tm.assert_frame_equal(table_pandas, expected_pandas) def test_dates_from_integers(self): t1 = pa.date32() t2 = pa.date64() arr = np.array([17259, 17260, 17261], dtype='int32') arr2 = arr.astype('int64') * 86400000 a1 = pa.array(arr, type=t1) a2 = pa.array(arr2, type=t2) expected = date(2017, 4, 3) assert a1[0].as_py() == expected assert a2[0].as_py() == expected @pytest.mark.xfail(reason="not supported ATM", raises=NotImplementedError) def test_timedelta(self): # TODO(jreback): Pandas only support ns resolution # Arrow supports ??? for resolution df = pd.DataFrame({ 'timedelta': np.arange(start=0, stop=3 * 86400000, step=86400000, dtype='timedelta64[ms]') }) pa.Table.from_pandas(df) def test_pytime_from_pandas(self): pytimes = [time(1, 2, 3, 1356), time(4, 5, 6, 1356)] # microseconds t1 = pa.time64('us') aobjs = np.array(pytimes + [None], dtype=object) parr = pa.array(aobjs) assert parr.type == t1 assert parr[0].as_py() == pytimes[0] assert parr[1].as_py() == pytimes[1] assert parr[2] is pa.NA # DataFrame df = pd.DataFrame({'times': aobjs}) batch = pa.RecordBatch.from_pandas(df) assert batch[0].equals(parr) # Test ndarray of int64 values arr = np.array([_pytime_to_micros(v) for v in pytimes], dtype='int64') a1 = pa.array(arr, type=pa.time64('us')) assert a1[0].as_py() == pytimes[0] a2 = pa.array(arr * 1000, type=pa.time64('ns')) assert a2[0].as_py() == pytimes[0] a3 = pa.array((arr / 1000).astype('i4'), type=pa.time32('ms')) assert a3[0].as_py() == pytimes[0].replace(microsecond=1000) a4 = pa.array((arr / 1000000).astype('i4'), type=pa.time32('s')) assert a4[0].as_py() == pytimes[0].replace(microsecond=0) def test_arrow_time_to_pandas(self): pytimes = [time(1, 2, 3, 1356), time(4, 5, 6, 1356), time(0, 0, 0)] expected = np.array(pytimes[:2] + [None]) expected_ms = np.array([x.replace(microsecond=1000) for x in pytimes[:2]] + [None]) expected_s = np.array([x.replace(microsecond=0) for x in pytimes[:2]] + [None]) arr = np.array([_pytime_to_micros(v) for v in pytimes], dtype='int64') arr = np.array([_pytime_to_micros(v) for v in pytimes], dtype='int64') null_mask = np.array([False, False, True], dtype=bool) a1 = pa.array(arr, mask=null_mask, type=pa.time64('us')) a2 = pa.array(arr * 1000, mask=null_mask, type=pa.time64('ns')) a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask, type=pa.time32('ms')) a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask, type=pa.time32('s')) names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]'] batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names) arr = a1.to_pandas() assert (arr == expected).all() arr = a2.to_pandas() assert (arr == expected).all() arr = a3.to_pandas() assert (arr == expected_ms).all() arr = a4.to_pandas() assert (arr == expected_s).all() df = batch.to_pandas() expected_df = pd.DataFrame({'time64[us]': expected, 'time64[ns]': expected, 'time32[ms]': expected_ms, 'time32[s]': expected_s}, columns=names) tm.assert_frame_equal(df, expected_df) def test_numpy_datetime64_columns(self): datetime64_ns = np.array([ '2007-07-13T01:23:34.123456789', None, '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') _check_array_from_pandas_roundtrip(datetime64_ns) datetime64_us = np.array([ '2007-07-13T01:23:34.123456', None, '2006-01-13T12:34:56.432539', '2010-08-13T05:46:57.437699'], dtype='datetime64[us]') _check_array_from_pandas_roundtrip(datetime64_us) datetime64_ms = np.array([ '2007-07-13T01:23:34.123', None, '2006-01-13T12:34:56.432', '2010-08-13T05:46:57.437'], dtype='datetime64[ms]') _check_array_from_pandas_roundtrip(datetime64_ms) datetime64_s = np.array([ '2007-07-13T01:23:34', None, '2006-01-13T12:34:56', '2010-08-13T05:46:57'], dtype='datetime64[s]') _check_array_from_pandas_roundtrip(datetime64_s) def test_numpy_datetime64_day_unit(self): datetime64_d = np.array([ '2007-07-13', None, '2006-01-15', '2010-08-19'], dtype='datetime64[D]') _check_array_from_pandas_roundtrip(datetime64_d) def test_array_from_pandas_date_with_mask(self): m = np.array([True, False, True]) data = pd.Series([ date(1990, 1, 1), date(1991, 1, 1), date(1992, 1, 1) ]) result = pa.Array.from_pandas(data, mask=m) expected = pd.Series([None, date(1991, 1, 1), None]) assert pa.Array.from_pandas(expected).equals(result) def test_fixed_offset_timezone(self): df = pd.DataFrame({ 'a': [ pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT ] }) _check_pandas_roundtrip(df) _check_serialize_components_roundtrip(df) class TestConvertStringLikeTypes(object): """ Conversion tests for string and binary types. """ def test_unicode(self): repeats = 1000 values = [u'foo', None, u'bar', u'mañana', np.nan] df = pd.DataFrame({'strings': values * repeats}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected_schema=schema) def test_bytes_to_binary(self): values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan] df = pd.DataFrame({'strings': values}) table = pa.Table.from_pandas(df) assert table[0].type == pa.binary() values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan] expected = pd.DataFrame({'strings': values2}) _check_pandas_roundtrip(df, expected) @pytest.mark.large_memory def test_bytes_exceed_2gb(self): v1 = b'x' * 100000000 v2 = b'x' * 147483646 # ARROW-2227, hit exactly 2GB on the nose df = pd.DataFrame({ 'strings': [v1] * 20 + [v2] + ['x'] * 20 }) arr = pa.array(df['strings']) assert isinstance(arr, pa.ChunkedArray) assert arr.num_chunks == 2 arr = None table = pa.Table.from_pandas(df) assert table[0].data.num_chunks == 2 def test_fixed_size_bytes(self): values = [b'foo', None, bytearray(b'bar'), None, None, b'hey'] df = pd.DataFrame({'strings': values}) schema = pa.schema([pa.field('strings', pa.binary(3))]) table = pa.Table.from_pandas(df, schema=schema) assert table.schema[0].type == schema[0].type assert table.schema[0].name == schema[0].name result = table.to_pandas() tm.assert_frame_equal(result, df) def test_fixed_size_bytes_does_not_accept_varying_lengths(self): values = [b'foo', None, b'ba', None, None, b'hey'] df = pd.DataFrame({'strings': values}) schema = pa.schema([pa.field('strings', pa.binary(3))]) with pytest.raises(pa.ArrowInvalid): pa.Table.from_pandas(df, schema=schema) def test_variable_size_bytes(self): s = pd.Series([b'123', b'', b'a', None]) _check_series_roundtrip(s, type_=pa.binary()) def test_binary_from_bytearray(self): s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'), None]) # Explicitly set type _check_series_roundtrip(s, type_=pa.binary()) # Infer type from bytearrays _check_series_roundtrip(s, expected_pa_type=pa.binary()) def test_table_empty_str(self): values = ['', '', '', '', ''] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) result1 = table.to_pandas(strings_to_categorical=False) expected1 = pd.DataFrame({'strings': values}) tm.assert_frame_equal(result1, expected1, check_dtype=True) result2 = table.to_pandas(strings_to_categorical=True) expected2 = pd.DataFrame({'strings': pd.Categorical(values)}) tm.assert_frame_equal(result2, expected2, check_dtype=True) def test_selective_categoricals(self): values = ['', '', '', '', ''] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) expected_str = pd.DataFrame({'strings': values}) expected_cat = pd.DataFrame({'strings': pd.Categorical(values)}) result1 = table.to_pandas(categories=['strings']) tm.assert_frame_equal(result1, expected_cat, check_dtype=True) result2 = table.to_pandas(categories=[]) tm.assert_frame_equal(result2, expected_str, check_dtype=True) result3 = table.to_pandas(categories=('strings',)) tm.assert_frame_equal(result3, expected_cat, check_dtype=True) result4 = table.to_pandas(categories=tuple()) tm.assert_frame_equal(result4, expected_str, check_dtype=True) def test_table_str_to_categorical_without_na(self): values = ['a', 'a', 'b', 'b', 'c'] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) result = table.to_pandas(strings_to_categorical=True) expected = pd.DataFrame({'strings': pd.Categorical(values)}) tm.assert_frame_equal(result, expected, check_dtype=True) with pytest.raises(pa.ArrowInvalid): table.to_pandas(strings_to_categorical=True, zero_copy_only=True) def test_table_str_to_categorical_with_na(self): values = [None, 'a', 'b', np.nan] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) result = table.to_pandas(strings_to_categorical=True) expected = pd.DataFrame({'strings': pd.Categorical(values)}) tm.assert_frame_equal(result, expected, check_dtype=True) with pytest.raises(pa.ArrowInvalid): table.to_pandas(strings_to_categorical=True, zero_copy_only=True) # Regression test for ARROW-2101 def test_array_of_bytes_to_strings(self): converted = pa.array(np.array([b'x'], dtype=object), pa.string()) assert converted.type == pa.string() # Make sure that if an ndarray of bytes is passed to the array # constructor and the type is string, it will fail if those bytes # cannot be converted to utf-8 def test_array_of_bytes_to_strings_bad_data(self): with pytest.raises( pa.lib.ArrowInvalid, match=("'(utf8|utf-8)' codec can't decode byte 0x80 " "in position 0: invalid start byte")): pa.array(np.array([b'\x80\x81'], dtype=object), pa.string()) def test_numpy_string_array_to_fixed_size_binary(self): arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3') converted = pa.array(arr, type=pa.binary(3)) expected = pa.array(list(arr), type=pa.binary(3)) assert converted.equals(expected) mask = np.array([True, False, True]) converted = pa.array(arr, type=pa.binary(3), mask=mask) expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3)) assert converted.equals(expected) with pytest.raises(pa.lib.ArrowInvalid, match='Got bytestring of length 3 \(expected 4\)'): arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3') pa.array(arr, type=pa.binary(4)) with pytest.raises(pa.lib.ArrowInvalid, match='Got bytestring of length 12 \(expected 3\)'): arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3') pa.array(arr, type=pa.binary(3)) class TestConvertDecimalTypes(object): """ Conversion test for decimal types. """ decimal32 = [ decimal.Decimal('-1234.123'), decimal.Decimal('1234.439') ] decimal64 = [ decimal.Decimal('-129934.123331'), decimal.Decimal('129534.123731') ] decimal128 = [ decimal.Decimal('394092382910493.12341234678'), decimal.Decimal('-314292388910493.12343437128') ] @pytest.mark.parametrize(('values', 'expected_type'), [ pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'), pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'), pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128') ]) def test_decimal_from_pandas(self, values, expected_type): expected = pd.DataFrame({'decimals': values}) table = pa.Table.from_pandas(expected, preserve_index=False) field = pa.field('decimals', expected_type) # schema's metadata is generated by from_pandas conversion expected_schema = pa.schema([field], metadata=table.schema.metadata) assert table.schema.equals(expected_schema) @pytest.mark.parametrize('values', [ pytest.param(decimal32, id='decimal32'), pytest.param(decimal64, id='decimal64'), pytest.param(decimal128, id='decimal128') ]) def test_decimal_to_pandas(self, values): expected = pd.DataFrame({'decimals': values}) converted = pa.Table.from_pandas(expected) df = converted.to_pandas() tm.assert_frame_equal(df, expected) def test_decimal_fails_with_truncation(self): data1 = [decimal.Decimal('1.234')] type1 = pa.decimal128(10, 2) with pytest.raises(pa.ArrowInvalid): pa.array(data1, type=type1) data2 = [decimal.Decimal('1.2345')] type2 = pa.decimal128(10, 3) with pytest.raises(pa.ArrowInvalid): pa.array(data2, type=type2) def test_decimal_with_different_precisions(self): data = [ decimal.Decimal('0.01'), decimal.Decimal('0.001'), ] series = pd.Series(data) array = pa.array(series) assert array.to_pylist() == data assert array.type == pa.decimal128(3, 3) array = pa.array(data, type=pa.decimal128(12, 5)) expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')] assert array.to_pylist() == expected def test_decimal_with_None_explicit_type(self): series = pd.Series([decimal.Decimal('3.14'), None]) _check_series_roundtrip(series, type_=pa.decimal128(12, 5)) # Test that having all None values still produces decimal array series = pd.Series([None] * 2) _check_series_roundtrip(series, type_=pa.decimal128(12, 5)) def test_decimal_with_None_infer_type(self): series = pd.Series([decimal.Decimal('3.14'), None]) _check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2)) class TestListTypes(object): """ Conversion tests for list<> types. """ def test_column_of_arrays(self): df, schema = dataframe_with_arrays() _check_pandas_roundtrip(df, schema=schema, expected_schema=schema) table = pa.Table.from_pandas(df, schema=schema, preserve_index=False) # schema's metadata is generated by from_pandas conversion expected_schema = schema.add_metadata(table.schema.metadata) assert table.schema.equals(expected_schema) for column in df.columns: field = schema.field_by_name(column) _check_array_roundtrip(df[column], type=field.type) def test_column_of_arrays_to_py(self): # Test regression in ARROW-1199 not caught in above test dtype = 'i1' arr = np.array([ np.arange(10, dtype=dtype), np.arange(5, dtype=dtype), None, np.arange(1, dtype=dtype) ]) type_ = pa.list_(pa.int8()) parr = pa.array(arr, type=type_) assert parr[0].as_py() == list(range(10)) assert parr[1].as_py() == list(range(5)) assert parr[2].as_py() is None assert parr[3].as_py() == [0] def test_column_of_lists(self): df, schema = dataframe_with_lists() _check_pandas_roundtrip(df, schema=schema, expected_schema=schema) table = pa.Table.from_pandas(df, schema=schema, preserve_index=False) # schema's metadata is generated by from_pandas conversion expected_schema = schema.add_metadata(table.schema.metadata) assert table.schema.equals(expected_schema) for column in df.columns: field = schema.field_by_name(column) _check_array_roundtrip(df[column], type=field.type) def test_column_of_lists_first_empty(self): # ARROW-2124 num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]] series = pd.Series([np.array(s, dtype=float) for s in num_lists]) arr = pa.array(series) result = pd.Series(arr.to_pandas()) tm.assert_series_equal(result, series) def test_column_of_lists_chunked(self): # ARROW-1357 df = pd.DataFrame({ 'lists': np.array([ [1, 2], None, [2, 3], [4, 5], [6, 7], [8, 9] ], dtype=object) }) schema = pa.schema([ pa.field('lists', pa.list_(pa.int64())) ]) t1 = pa.Table.from_pandas(df[:2], schema=schema) t2 = pa.Table.from_pandas(df[2:], schema=schema) table = pa.concat_tables([t1, t2]) result = table.to_pandas() tm.assert_frame_equal(result, df) def test_column_of_lists_chunked2(self): data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11], [12, 13], [14, 15], [16, 17]] data2 = [[8, 9], [18, 19]] a1 = pa.array(data1) a2 = pa.array(data2) t1 = pa.Table.from_arrays([a1], names=['a']) t2 = pa.Table.from_arrays([a2], names=['a']) concatenated = pa.concat_tables([t1, t2]) result = concatenated.to_pandas() expected = pd.DataFrame({'a': data1 + data2}) tm.assert_frame_equal(result, expected) def test_column_of_lists_strided(self): df, schema = dataframe_with_lists() df = pd.concat([df] * 6, ignore_index=True) arr = df['int64'].values[::3] assert arr.strides[0] != 8 _check_array_roundtrip(arr) def test_nested_lists_all_none(self): data = np.array([[None, None], None], dtype=object) arr = pa.array(data) expected = pa.array(list(data)) assert arr.equals(expected) assert arr.type == pa.list_(pa.null()) data2 = np.array([None, None, [None, None], np.array([None, None], dtype=object)], dtype=object) arr = pa.array(data2) expected = pa.array([None, None, [None, None], [None, None]]) assert arr.equals(expected) def test_nested_lists_all_empty(self): # ARROW-2128 data = pd.Series([[], [], []]) arr = pa.array(data) expected = pa.array(list(data)) assert arr.equals(expected) assert arr.type == pa.list_(pa.null()) def test_nested_smaller_ints(self): # ARROW-1345, ARROW-2008, there were some type inference bugs happening # before data = pd.Series([np.array([1, 2, 3], dtype='i1'), None]) result = pa.array(data) result2 = pa.array(data.values) expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8())) assert result.equals(expected) assert result2.equals(expected) data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None]) result3 = pa.array(data3) expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32())) assert result3.equals(expected3) def test_infer_lists(self): data = OrderedDict([ ('nan_ints', [[None, 1], [2, 3]]), ('ints', [[0, 1], [2, 3]]), ('strs', [[None, u'b'], [u'c', u'd']]), ('nested_strs', [[[None, u'b'], [u'c', u'd']], None]) ]) df = pd.DataFrame(data) expected_schema = pa.schema([ pa.field('nan_ints', pa.list_(pa.int64())), pa.field('ints', pa.list_(pa.int64())), pa.field('strs', pa.list_(pa.string())), pa.field('nested_strs', pa.list_(pa.list_(pa.string()))) ]) _check_pandas_roundtrip(df, expected_schema=expected_schema) def test_infer_numpy_array(self): data = OrderedDict([ ('ints', [ np.array([0, 1], dtype=np.int64), np.array([2, 3], dtype=np.int64) ]) ]) df = pd.DataFrame(data) expected_schema = pa.schema([ pa.field('ints', pa.list_(pa.int64())) ]) _check_pandas_roundtrip(df, expected_schema=expected_schema) @pytest.mark.parametrize('t,data,expected', [ ( pa.int64, [[1, 2], [3], None], [None, [3], None] ), ( pa.string, [[u'aaa', u'bb'], [u'c'], None], [None, [u'c'], None] ), ( pa.null, [[None, None], [None], None], [None, [None], None] ) ]) def test_array_from_pandas_typed_array_with_mask(self, t, data, expected): m = np.array([True, False, True]) s = pd.Series(data) result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t())) assert pa.Array.from_pandas(expected, type=pa.list_(t())).equals(result) def test_empty_list_roundtrip(self): empty_list_array = np.empty((3,), dtype=object) empty_list_array.fill([]) df = pd.DataFrame({'a': np.array(['1', '2', '3']), 'b': empty_list_array}) tbl = pa.Table.from_pandas(df) result = tbl.to_pandas() tm.assert_frame_equal(result, df) def test_array_from_nested_arrays(self): df, schema = dataframe_with_arrays() for field in schema: arr = df[field.name].values expected = pa.array(list(arr), type=field.type) result = pa.array(arr) assert result.type == field.type # == list<scalar> assert result.equals(expected) class TestConvertStructTypes(object): """ Conversion tests for struct types. """ def test_to_pandas(self): ints = pa.array([None, 2, 3], type=pa.int64()) strs = pa.array([u'a', None, u'c'], type=pa.string()) bools = pa.array([True, False, None], type=pa.bool_()) arr = pa.StructArray.from_arrays( [ints, strs, bools], ['ints', 'strs', 'bools']) expected = pd.Series([ {'ints': None, 'strs': u'a', 'bools': True}, {'ints': 2, 'strs': None, 'bools': False}, {'ints': 3, 'strs': u'c', 'bools': None}, ]) series = pd.Series(arr.to_pandas()) tm.assert_series_equal(series, expected) def test_from_numpy(self): dt = np.dtype([('x', np.int32), (('y_title', 'y'), np.bool_)]) ty = pa.struct([pa.field('x', pa.int32()), pa.field('y', pa.bool_())]) data = np.array([], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [] data = np.array([(42, True), (43, False)], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [{'x': 42, 'y': True}, {'x': 43, 'y': False}] # With mask arr = pa.array(data, mask=np.bool_([False, True]), type=ty) assert arr.to_pylist() == [{'x': 42, 'y': True}, None] # Trivial struct type dt = np.dtype([]) ty = pa.struct([]) data = np.array([], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [] data = np.array([(), ()], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [{}, {}] def test_from_numpy_nested(self): dt = np.dtype([('x', np.dtype([('xx', np.int8), ('yy', np.bool_)])), ('y', np.int16)]) ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()), pa.field('yy', pa.bool_())])), pa.field('y', pa.int16())]) data = np.array([], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [] data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2}, {'x': {'xx': 3, 'yy': False}, 'y': 4}] @pytest.mark.large_memory def test_from_numpy_large(self): # Exercise rechunking + nulls target_size = 3 * 1024**3 # 4GB dt = np.dtype([('x', np.float64), ('y', 'object')]) bs = 65536 - dt.itemsize block = b'.' * bs n = target_size // (bs + dt.itemsize) data = np.zeros(n, dtype=dt) data['x'] = np.random.random_sample(n) data['y'] = block # Add implicit nulls data['x'][data['x'] < 0.2] = np.nan ty = pa.struct([pa.field('x', pa.float64()), pa.field('y', pa.binary(bs))]) arr = pa.array(data, type=ty, from_pandas=True) assert arr.num_chunks == 2 def iter_chunked_array(arr): for chunk in arr.iterchunks(): for item in chunk: yield item def check(arr, data, mask=None): assert len(arr) == len(data) xs = data['x'] ys = data['y'] for i, obj in enumerate(iter_chunked_array(arr)): try: d = obj.as_py() if mask is not None and mask[i]: assert d is None else: x = xs[i] if np.isnan(x): assert d['x'] is None else: assert d['x'] == x assert d['y'] == ys[i] except Exception: print("Failed at index", i) raise check(arr, data) del arr # Now with explicit mask mask = np.random.random_sample(n) < 0.2 arr = pa.array(data, type=ty, mask=mask, from_pandas=True) assert arr.num_chunks == 2 check(arr, data, mask) del arr def test_from_numpy_bad_input(self): ty = pa.struct([pa.field('x', pa.int32()), pa.field('y', pa.bool_())]) dt = np.dtype([('x', np.int32), ('z', np.bool_)]) data = np.array([], dtype=dt) with pytest.raises(TypeError, match="Missing field 'y'"): pa.array(data, type=ty) data = np.int32([]) with pytest.raises(TypeError, match="Expected struct array"): pa.array(data, type=ty) class TestZeroCopyConversion(object): """ Tests that zero-copy conversion works with some types. """ def test_zero_copy_success(self): result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True) npt.assert_array_equal(result, [0, 1, 2]) def test_zero_copy_dictionaries(self): arr = pa.DictionaryArray.from_arrays( np.array([0, 0]), np.array([5])) result = arr.to_pandas(zero_copy_only=True) values = pd.Categorical([5, 5]) tm.assert_series_equal(pd.Series(result), pd.Series(values), check_names=False) def check_zero_copy_failure(self, arr): with pytest.raises(pa.ArrowInvalid): arr.to_pandas(zero_copy_only=True) def test_zero_copy_failure_on_object_types(self): self.check_zero_copy_failure(pa.array(['A', 'B', 'C'])) def test_zero_copy_failure_with_int_when_nulls(self): self.check_zero_copy_failure(pa.array([0, 1, None])) def test_zero_copy_failure_with_float_when_nulls(self): self.check_zero_copy_failure(pa.array([0.0, 1.0, None])) def test_zero_copy_failure_on_bool_types(self): self.check_zero_copy_failure(pa.array([True, False])) def test_zero_copy_failure_on_list_types(self): arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64())) self.check_zero_copy_failure(arr) def test_zero_copy_failure_on_timestamp_types(self): arr = np.array(['2007-07-13'], dtype='datetime64[ns]') self.check_zero_copy_failure(pa.array(arr)) class TestConvertMisc(object): """ Miscellaneous conversion tests. """ type_pairs = [ (np.int8, pa.int8()), (np.int16, pa.int16()), (np.int32, pa.int32()), (np.int64, pa.int64()), (np.uint8, pa.uint8()), (np.uint16, pa.uint16()), (np.uint32, pa.uint32()), (np.uint64, pa.uint64()), (np.float16, pa.float16()), (np.float32, pa.float32()), (np.float64, pa.float64()), # XXX unsupported # (np.dtype([('a', 'i2')]), pa.struct([pa.field('a', pa.int16())])), (np.object, pa.string()), (np.object, pa.binary()), (np.object, pa.binary(10)), (np.object, pa.list_(pa.int64())), ] def test_all_none_objects(self): df = pd.DataFrame({'a': [None, None, None]}) _check_pandas_roundtrip(df) def test_all_none_category(self): df = pd.DataFrame({'a': [None, None, None]}) df['a'] = df['a'].astype('category') _check_pandas_roundtrip(df) def test_empty_arrays(self): for dtype, pa_type in self.type_pairs: arr = np.array([], dtype=dtype) _check_array_roundtrip(arr, type=pa_type) def test_threaded_conversion(self): df = _alltypes_example() _check_pandas_roundtrip(df, use_threads=True) _check_pandas_roundtrip(df, use_threads=True, as_batch=True) def test_category(self): repeats = 5 v1 = ['foo', None, 'bar', 'qux', np.nan] v2 = [4, 5, 6, 7, 8] v3 = [b'foo', None, b'bar', b'qux', np.nan] df = pd.DataFrame({'cat_strings': pd.Categorical(v1 * repeats), 'cat_ints': pd.Categorical(v2 * repeats), 'cat_binary': pd.Categorical(v3 * repeats), 'cat_strings_ordered': pd.Categorical( v1 * repeats, categories=['bar', 'qux', 'foo'], ordered=True), 'ints': v2 * repeats, 'ints2': v2 * repeats, 'strings': v1 * repeats, 'strings2': v1 * repeats, 'strings3': v3 * repeats}) _check_pandas_roundtrip(df) arrays = [ pd.Categorical(v1 * repeats), pd.Categorical(v2 * repeats), pd.Categorical(v3 * repeats) ] for values in arrays: _check_array_roundtrip(values) def test_empty_category(self): # ARROW-2443 df = pd.DataFrame({'cat': pd.Categorical([])}) _check_pandas_roundtrip(df) def test_mixed_types_fails(self): data = pd.DataFrame({'a': ['a', 1, 2.0]}) with pytest.raises(pa.ArrowTypeError): pa.Table.from_pandas(data) data = pd.DataFrame({'a': [1, True]}) with pytest.raises(pa.ArrowTypeError): pa.Table.from_pandas(data) def test_strided_data_import(self): cases = [] columns = ['a', 'b', 'c'] N, K = 100, 3 random_numbers = np.random.randn(N, K).copy() * 100 numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8'] for type_name in numeric_dtypes: cases.append(random_numbers.astype(type_name)) # strings cases.append(np.array([tm.rands(10) for i in range(N * K)], dtype=object) .reshape(N, K).copy()) # booleans boolean_objects = (np.array([True, False, True] * N, dtype=object) .reshape(N, K).copy()) # add some nulls, so dtype comes back as objects boolean_objects[5] = None cases.append(boolean_objects) cases.append(np.arange("2016-01-01T00:00:00.001", N * K, dtype='datetime64[ms]') .reshape(N, K).copy()) strided_mask = (random_numbers > 0).astype(bool)[:, 0] for case in cases: df = pd.DataFrame(case, columns=columns) col = df['a'] _check_pandas_roundtrip(df) _check_array_roundtrip(col) _check_array_roundtrip(col, mask=strided_mask) def test_all_nones(self): def _check_series(s): converted = pa.array(s) assert isinstance(converted, pa.NullArray) assert len(converted) == 3 assert converted.null_count == 3 assert converted[0] is pa.NA _check_series(pd.Series([None] * 3, dtype=object)) _check_series(pd.Series([np.nan] * 3, dtype=object)) _check_series(pd.Series([np.sqrt(-1)] * 3, dtype=object)) def test_partial_schema(self): data = OrderedDict([ ('a', [0, 1, 2, 3, 4]), ('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)), ('c', [-10, -5, 0, 5, 10]) ]) df = pd.DataFrame(data) partial_schema = pa.schema([ pa.field('a', pa.int64()), pa.field('b', pa.int32()) ]) expected_schema = pa.schema([ pa.field('a', pa.int64()), pa.field('b', pa.int32()), pa.field('c', pa.int64()) ]) _check_pandas_roundtrip(df, schema=partial_schema, expected_schema=expected_schema) def test_table_batch_empty_dataframe(self): df = pd.DataFrame({}) _check_pandas_roundtrip(df) _check_pandas_roundtrip(df, as_batch=True) df2 = pd.DataFrame({}, index=[0, 1, 2]) _check_pandas_roundtrip(df2, preserve_index=True) _check_pandas_roundtrip(df2, as_batch=True, preserve_index=True) def test_convert_empty_table(self): arr = pa.array([], type=pa.int64()) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=np.int64)) arr = pa.array([], type=pa.string()) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object)) arr = pa.array([], type=pa.list_(pa.int64())) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object)) arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())])) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object)) def test_non_natural_stride(self): """ ARROW-2172: converting from a Numpy array with a stride that's not a multiple of itemsize. """ dtype = np.dtype([('x', np.int32), ('y', np.int16)]) data = np.array([(42, -1), (-43, 2)], dtype=dtype) assert data.strides == (6,) arr = pa.array(data['x'], type=pa.int32()) assert arr.to_pylist() == [42, -43] arr = pa.array(data['y'], type=pa.int16()) assert arr.to_pylist() == [-1, 2] def test_mixed_integer_columns(self): row = [[], []] df = pd.DataFrame(data=[row], columns=['foo', 123]) expected_df = pd.DataFrame(data=[row], columns=['foo', '123']) _check_pandas_roundtrip(df, expected=expected_df, preserve_index=True) def _fully_loaded_dataframe_example(): from distutils.version import LooseVersion index = pd.MultiIndex.from_arrays([ pd.date_range('2000-01-01', periods=5).repeat(2), np.tile(np.array(['foo', 'bar'], dtype=object), 5) ]) c1 = pd.date_range('2000-01-01', periods=10) data = { 0: c1, 1: c1.tz_localize('utc'), 2: c1.tz_localize('US/Eastern'), 3: c1[::2].tz_localize('utc').repeat(2).astype('category'), 4: ['foo', 'bar'] * 5, 5: pd.Series(['foo', 'bar'] * 5).astype('category').values, 6: [True, False] * 5, 7: np.random.randn(10), 8: np.random.randint(0, 100, size=10), 9: pd.period_range('2013', periods=10, freq='M') } if LooseVersion(pd.__version__) >= '0.21': # There is an issue with pickling IntervalIndex in pandas 0.20.x data[10] = pd.interval_range(start=1, freq=1, periods=10) return pd.DataFrame(data, index=index) @pytest.mark.parametrize('columns', ([b'foo'], ['foo'])) def test_roundtrip_with_bytes_unicode(columns): df = pd.DataFrame(columns=columns) table1 = pa.Table.from_pandas(df) table2 = pa.Table.from_pandas(table1.to_pandas()) assert table1.equals(table2) assert table1.schema.equals(table2.schema) assert table1.schema.metadata == table2.schema.metadata def _check_serialize_components_roundtrip(df): ctx = pa.default_serialization_context() components = ctx.serialize(df).to_components() deserialized = ctx.deserialize_components(components) tm.assert_frame_equal(df, deserialized) def test_serialize_deserialize_pandas(): # ARROW-1784, serialize and deserialize DataFrame by decomposing # BlockManager df = _fully_loaded_dataframe_example() _check_serialize_components_roundtrip(df) def _pytime_from_micros(val): microseconds = val % 1000000 val //= 1000000 seconds = val % 60 val //= 60 minutes = val % 60 hours = val // 60 return time(hours, minutes, seconds, microseconds) def _pytime_to_micros(pytime): return (pytime.hour * 3600000000 + pytime.minute * 60000000 + pytime.second * 1000000 + pytime.microsecond)
35.459395
79
0.562467
import decimal import json from collections import OrderedDict from datetime import date, datetime, time, timedelta import numpy as np import numpy.testing as npt import pandas as pd import pandas.util.testing as tm import pytest import pyarrow as pa import pyarrow.types as patypes from pyarrow.compat import PY2 from .pandas_examples import dataframe_with_arrays, dataframe_with_lists def _alltypes_example(size=100): return pd.DataFrame({ 'uint8': np.arange(size, dtype=np.uint8), 'uint16': np.arange(size, dtype=np.uint16), 'uint32': np.arange(size, dtype=np.uint32), 'uint64': np.arange(size, dtype=np.uint64), 'int8': np.arange(size, dtype=np.int16), 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, 'datetime': np.arange("2016-01-01T00:00:00.001", size, dtype='datetime64[ms]'), 'str': [str(x) for x in range(size)], 'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None], 'empty_str': [''] * size }) def _check_pandas_roundtrip(df, expected=None, use_threads=False, expected_schema=None, check_dtype=True, schema=None, preserve_index=False, as_batch=False): klass = pa.RecordBatch if as_batch else pa.Table table = klass.from_pandas(df, schema=schema, preserve_index=preserve_index, nthreads=2 if use_threads else 1) result = table.to_pandas(use_threads=use_threads) if expected_schema: expected_schema = expected_schema.add_metadata(table.schema.metadata) assert table.schema.equals(expected_schema) if expected is None: expected = df tm.assert_frame_equal(result, expected, check_dtype=check_dtype, check_index_type=('equiv' if preserve_index else False)) def _check_series_roundtrip(s, type_=None, expected_pa_type=None): arr = pa.array(s, from_pandas=True, type=type_) if type_ is not None and expected_pa_type is None: expected_pa_type = type_ if expected_pa_type is not None: assert arr.type == expected_pa_type result = pd.Series(arr.to_pandas(), name=s.name) if patypes.is_timestamp(arr.type) and arr.type.tz is not None: result = (result.dt.tz_localize('utc') .dt.tz_convert(arr.type.tz)) tm.assert_series_equal(s, result) def _check_array_roundtrip(values, expected=None, mask=None, type=None): arr = pa.array(values, from_pandas=True, mask=mask, type=type) result = arr.to_pandas() values_nulls = pd.isnull(values) if mask is None: assert arr.null_count == values_nulls.sum() else: assert arr.null_count == (mask | values_nulls).sum() if mask is None: tm.assert_series_equal(pd.Series(result), pd.Series(values), check_names=False) else: expected = pd.Series(np.ma.masked_array(values, mask=mask)) tm.assert_series_equal(pd.Series(result), expected, check_names=False) def _check_array_from_pandas_roundtrip(np_array): arr = pa.array(np_array, from_pandas=True) result = arr.to_pandas() npt.assert_array_equal(result, np_array) class TestConvertMetadata(object): def test_non_string_columns(self): df = pd.DataFrame({0: [1, 2, 3]}) table = pa.Table.from_pandas(df) assert table.column(0).name == '0' def test_from_pandas_with_columns(self): df = pd.DataFrame({0: [1, 2, 3], 1: [1, 3, 3], 2: [2, 4, 5]}) table = pa.Table.from_pandas(df, columns=[0, 1]) expected = pa.Table.from_pandas(df[[0, 1]]) assert expected.equals(table) record_batch_table = pa.RecordBatch.from_pandas(df, columns=[0, 1]) record_batch_expected = pa.RecordBatch.from_pandas(df[[0, 1]]) assert record_batch_expected.equals(record_batch_table) def test_column_index_names_are_preserved(self): df = pd.DataFrame({'data': [1, 2, 3]}) df.columns.names = ['a'] _check_pandas_roundtrip(df, preserve_index=True) def test_multiindex_columns(self): columns = pd.MultiIndex.from_arrays([ ['one', 'two'], ['X', 'Y'] ]) df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) _check_pandas_roundtrip(df, preserve_index=True) def test_multiindex_columns_with_dtypes(self): columns = pd.MultiIndex.from_arrays( [ ['one', 'two'], pd.DatetimeIndex(['2017-08-01', '2017-08-02']), ], names=['level_1', 'level_2'], ) df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) _check_pandas_roundtrip(df, preserve_index=True) def test_multiindex_columns_unicode(self): columns = pd.MultiIndex.from_arrays([[u'あ', u'い'], ['X', 'Y']]) df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')], columns=columns) _check_pandas_roundtrip(df, preserve_index=True) def test_integer_index_column(self): df = pd.DataFrame([(1, 'a'), (2, 'b'), (3, 'c')]) _check_pandas_roundtrip(df, preserve_index=True) def test_index_metadata_field_name(self): df = pd.DataFrame( [(1, 'a', 3.1), (2, 'b', 2.2), (3, 'c', 1.3)], index=pd.MultiIndex.from_arrays( [['c', 'b', 'a'], [3, 2, 1]], names=[None, 'foo'] ), columns=['a', None, '__index_level_0__'], ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) col1, col2, col3, idx0, foo = js['columns'] assert col1['name'] == 'a' assert col1['name'] == col1['field_name'] assert col2['name'] is None assert col2['field_name'] == 'None' assert col3['name'] == '__index_level_0__' assert col3['name'] == col3['field_name'] idx0_name, foo_name = js['index_columns'] assert idx0_name == '__index_level_0__' assert idx0['field_name'] == idx0_name assert idx0['name'] is None assert foo_name == 'foo' assert foo['field_name'] == foo_name assert foo['name'] == foo_name def test_categorical_column_index(self): df = pd.DataFrame( [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], columns=pd.Index(list('def'), dtype='category') ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) column_indexes, = js['column_indexes'] assert column_indexes['name'] is None assert column_indexes['pandas_type'] == 'categorical' assert column_indexes['numpy_type'] == 'int8' md = column_indexes['metadata'] assert md['num_categories'] == 3 assert md['ordered'] is False def test_string_column_index(self): df = pd.DataFrame( [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], columns=pd.Index(list('def'), name='stringz') ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) column_indexes, = js['column_indexes'] assert column_indexes['name'] == 'stringz' assert column_indexes['name'] == column_indexes['field_name'] assert column_indexes['pandas_type'] == ('bytes' if PY2 else 'unicode') assert column_indexes['numpy_type'] == 'object' md = column_indexes['metadata'] if not PY2: assert len(md) == 1 assert md['encoding'] == 'UTF-8' else: assert md is None or 'encoding' not in md def test_datetimetz_column_index(self): df = pd.DataFrame( [(1, 'a', 2.0), (2, 'b', 3.0), (3, 'c', 4.0)], columns=pd.date_range( start='2017-01-01', periods=3, tz='America/New_York' ) ) t = pa.Table.from_pandas(df, preserve_index=True) raw_metadata = t.schema.metadata js = json.loads(raw_metadata[b'pandas'].decode('utf8')) column_indexes, = js['column_indexes'] assert column_indexes['name'] is None assert column_indexes['pandas_type'] == 'datetimetz' assert column_indexes['numpy_type'] == 'datetime64[ns]' md = column_indexes['metadata'] assert md['timezone'] == 'America/New_York' def test_datetimetz_row_index(self): df = pd.DataFrame({ 'a': pd.date_range( start='2017-01-01', periods=3, tz='America/New_York' ) }) df = df.set_index('a') _check_pandas_roundtrip(df, preserve_index=True) def test_categorical_row_index(self): df = pd.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}) df['a'] = df.a.astype('category') df = df.set_index('a') _check_pandas_roundtrip(df, preserve_index=True) def test_duplicate_column_names_does_not_crash(self): df = pd.DataFrame([(1, 'a'), (2, 'b')], columns=list('aa')) with pytest.raises(ValueError): pa.Table.from_pandas(df) def test_dictionary_indices_boundscheck(self): indices = [[0, 1], [0, -1]] for inds in indices: arr = pa.DictionaryArray.from_arrays(inds, ['a'], safe=False) batch = pa.RecordBatch.from_arrays([arr], ['foo']) table = pa.Table.from_batches([batch, batch, batch]) with pytest.raises(pa.ArrowInvalid): arr.to_pandas() with pytest.raises(pa.ArrowInvalid): table.to_pandas() def test_unicode_with_unicode_column_and_index(self): df = pd.DataFrame({u'あ': [u'い']}, index=[u'う']) _check_pandas_roundtrip(df, preserve_index=True) def test_mixed_unicode_column_names(self): df = pd.DataFrame({u'あ': [u'い'], b'a': 1}, index=[u'う']) with pytest.raises(AssertionError): _check_pandas_roundtrip(df, preserve_index=True) def test_binary_column_name(self): column_data = [u'い'] key = u'あ'.encode('utf8') data = {key: column_data} df = pd.DataFrame(data) # is always decoded as utf8: even if binary goes in, utf8 comes out t = pa.Table.from_pandas(df, preserve_index=True) df2 = t.to_pandas() assert df.values[0] == df2.values[0] assert df.index.values[0] == df2.index.values[0] assert df.columns[0] == key def test_multiindex_duplicate_values(self): num_rows = 3 numbers = list(range(num_rows)) index = pd.MultiIndex.from_arrays( [['foo', 'foo', 'bar'], numbers], names=['foobar', 'some_numbers'], ) df = pd.DataFrame({'numbers': numbers}, index=index) table = pa.Table.from_pandas(df) result_df = table.to_pandas() tm.assert_frame_equal(result_df, df) def test_metadata_with_mixed_types(self): df = pd.DataFrame({'data': [b'some_bytes', u'some_unicode']}) table = pa.Table.from_pandas(df) metadata = table.schema.metadata assert b'mixed' not in metadata[b'pandas'] js = json.loads(metadata[b'pandas'].decode('utf8')) data_column = js['columns'][0] assert data_column['pandas_type'] == 'bytes' assert data_column['numpy_type'] == 'object' def test_list_metadata(self): df = pd.DataFrame({'data': [[1], [2, 3, 4], [5] * 7]}) schema = pa.schema([pa.field('data', type=pa.list_(pa.int64()))]) table = pa.Table.from_pandas(df, schema=schema) metadata = table.schema.metadata assert b'mixed' not in metadata[b'pandas'] js = json.loads(metadata[b'pandas'].decode('utf8')) data_column = js['columns'][0] assert data_column['pandas_type'] == 'list[int64]' assert data_column['numpy_type'] == 'object' def test_decimal_metadata(self): expected = pd.DataFrame({ 'decimals': [ decimal.Decimal('394092382910493.12341234678'), -decimal.Decimal('314292388910493.12343437128'), ] }) table = pa.Table.from_pandas(expected) metadata = table.schema.metadata assert b'mixed' not in metadata[b'pandas'] js = json.loads(metadata[b'pandas'].decode('utf8')) data_column = js['columns'][0] assert data_column['pandas_type'] == 'decimal' assert data_column['numpy_type'] == 'object' assert data_column['metadata'] == {'precision': 26, 'scale': 11} def test_table_column_subset_metadata(self): # ARROW-1883 df = pd.DataFrame({ 'a': [1, 2, 3], 'b': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')}) table = pa.Table.from_pandas(df) table_subset = table.remove_column(1) result = table_subset.to_pandas() tm.assert_frame_equal(result, df[['a']]) table_subset2 = table_subset.remove_column(1) result = table_subset2.to_pandas() tm.assert_frame_equal(result, df[['a']]) # non-default index for index in [ pd.Index(['a', 'b', 'c'], name='index'), pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')]: df = pd.DataFrame({'a': [1, 2, 3], 'b': [.1, .2, .3]}, index=index) table = pa.Table.from_pandas(df) table_subset = table.remove_column(1) result = table_subset.to_pandas() tm.assert_frame_equal(result, df[['a']]) table_subset2 = table_subset.remove_column(1) result = table_subset2.to_pandas() tm.assert_frame_equal(result, df[['a']].reset_index(drop=True)) def test_empty_list_metadata(self): # Create table with array of empty lists, forced to have type # list(string) in pyarrow c1 = [["test"], ["a", "b"], None] c2 = [[], [], []] arrays = OrderedDict([ ('c1', pa.array(c1, type=pa.list_(pa.string()))), ('c2', pa.array(c2, type=pa.list_(pa.string()))), ]) rb = pa.RecordBatch.from_arrays( list(arrays.values()), list(arrays.keys()) ) tbl = pa.Table.from_batches([rb]) # First roundtrip changes schema, because pandas cannot preserve the # type of empty lists df = tbl.to_pandas() tbl2 = pa.Table.from_pandas(df, preserve_index=True) md2 = json.loads(tbl2.schema.metadata[b'pandas'].decode('utf8')) # Second roundtrip df2 = tbl2.to_pandas() expected = pd.DataFrame(OrderedDict([('c1', c1), ('c2', c2)])) tm.assert_frame_equal(df2, expected) assert md2['columns'] == [ { 'name': 'c1', 'field_name': 'c1', 'metadata': None, 'numpy_type': 'object', 'pandas_type': 'list[unicode]', }, { 'name': 'c2', 'field_name': 'c2', 'metadata': None, 'numpy_type': 'object', 'pandas_type': 'list[empty]', }, { 'name': None, 'field_name': '__index_level_0__', 'metadata': None, 'numpy_type': 'int64', 'pandas_type': 'int64', } ] class TestConvertPrimitiveTypes(object): def test_float_no_nulls(self): data = {} fields = [] dtypes = [('f2', pa.float16()), ('f4', pa.float32()), ('f8', pa.float64())] num_values = 100 for numpy_dtype, arrow_dtype in dtypes: values = np.random.randn(num_values) data[numpy_dtype] = values.astype(numpy_dtype) fields.append(pa.field(numpy_dtype, arrow_dtype)) df = pd.DataFrame(data) schema = pa.schema(fields) _check_pandas_roundtrip(df, expected_schema=schema) def test_float_nulls(self): num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 dtypes = [('f2', pa.float16()), ('f4', pa.float32()), ('f8', pa.float64())] names = ['f2', 'f4', 'f8'] expected_cols = [] arrays = [] fields = [] for name, arrow_dtype in dtypes: values = np.random.randn(num_values).astype(name) arr = pa.array(values, from_pandas=True, mask=null_mask) arrays.append(arr) fields.append(pa.field(name, arrow_dtype)) values[null_mask] = np.nan expected_cols.append(values) ex_frame = pd.DataFrame(dict(zip(names, expected_cols)), columns=names) table = pa.Table.from_arrays(arrays, names) assert table.schema.equals(pa.schema(fields)) result = table.to_pandas() tm.assert_frame_equal(result, ex_frame) def test_float_nulls_to_ints(self): # ARROW-2135 df = pd.DataFrame({"a": [1.0, 2.0, pd.np.NaN]}) schema = pa.schema([pa.field("a", pa.int16(), nullable=True)]) table = pa.Table.from_pandas(df, schema=schema) assert table[0].to_pylist() == [1, 2, None] tm.assert_frame_equal(df, table.to_pandas()) def test_integer_no_nulls(self): data = OrderedDict() fields = [] numpy_dtypes = [ ('i1', pa.int8()), ('i2', pa.int16()), ('i4', pa.int32()), ('i8', pa.int64()), ('u1', pa.uint8()), ('u2', pa.uint16()), ('u4', pa.uint32()), ('u8', pa.uint64()), ('longlong', pa.int64()), ('ulonglong', pa.uint64()) ] num_values = 100 for dtype, arrow_dtype in numpy_dtypes: info = np.iinfo(dtype) values = np.random.randint(max(info.min, np.iinfo(np.int_).min), min(info.max, np.iinfo(np.int_).max), size=num_values) data[dtype] = values.astype(dtype) fields.append(pa.field(dtype, arrow_dtype)) df = pd.DataFrame(data) schema = pa.schema(fields) _check_pandas_roundtrip(df, expected_schema=schema) def test_all_integer_types(self): # Test all Numpy integer aliases data = OrderedDict() numpy_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'byte', 'ubyte', 'short', 'ushort', 'intc', 'uintc', 'int_', 'uint', 'longlong', 'ulonglong'] for dtype in numpy_dtypes: data[dtype] = np.arange(12, dtype=dtype) df = pd.DataFrame(data) _check_pandas_roundtrip(df) def test_integer_with_nulls(self): # pandas requires upcast to float dtype int_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8'] num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 expected_cols = [] arrays = [] for name in int_dtypes: values = np.random.randint(0, 100, size=num_values) arr = pa.array(values, mask=null_mask) arrays.append(arr) expected = values.astype('f8') expected[null_mask] = np.nan expected_cols.append(expected) ex_frame = pd.DataFrame(dict(zip(int_dtypes, expected_cols)), columns=int_dtypes) table = pa.Table.from_arrays(arrays, int_dtypes) result = table.to_pandas() tm.assert_frame_equal(result, ex_frame) def test_array_from_pandas_type_cast(self): arr = np.arange(10, dtype='int64') target_type = pa.int8() result = pa.array(arr, type=target_type) expected = pa.array(arr.astype('int8')) assert result.equals(expected) def test_boolean_no_nulls(self): num_values = 100 np.random.seed(0) df = pd.DataFrame({'bools': np.random.randn(num_values) > 0}) field = pa.field('bools', pa.bool_()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected_schema=schema) def test_boolean_nulls(self): # pandas requires upcast to object dtype num_values = 100 np.random.seed(0) mask = np.random.randint(0, 10, size=num_values) < 3 values = np.random.randint(0, 10, size=num_values) < 5 arr = pa.array(values, mask=mask) expected = values.astype(object) expected[mask] = None field = pa.field('bools', pa.bool_()) schema = pa.schema([field]) ex_frame = pd.DataFrame({'bools': expected}) table = pa.Table.from_arrays([arr], ['bools']) assert table.schema.equals(schema) result = table.to_pandas() tm.assert_frame_equal(result, ex_frame) def test_float_object_nulls(self): arr = np.array([None, 1.5, np.float64(3.5)] * 5, dtype=object) df = pd.DataFrame({'floats': arr}) expected = pd.DataFrame({'floats': pd.to_numeric(arr)}) field = pa.field('floats', pa.float64()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected=expected, expected_schema=schema) def test_int_object_nulls(self): arr = np.array([None, 1, np.int64(3)] * 5, dtype=object) df = pd.DataFrame({'ints': arr}) expected = pd.DataFrame({'ints': pd.to_numeric(arr)}) field = pa.field('ints', pa.int64()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected=expected, expected_schema=schema) def test_boolean_object_nulls(self): arr = np.array([False, None, True] * 100, dtype=object) df = pd.DataFrame({'bools': arr}) field = pa.field('bools', pa.bool_()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected_schema=schema) def test_all_nulls_cast_numeric(self): arr = np.array([None], dtype=object) def _check_type(t): a2 = pa.array(arr, type=t) assert a2.type == t assert a2[0].as_py() is None _check_type(pa.int32()) _check_type(pa.float64()) def test_half_floats_from_numpy(self): arr = np.array([1.5, np.nan], dtype=np.float16) a = pa.array(arr, type=pa.float16()) x, y = a.to_pylist() assert isinstance(x, np.float16) assert x == 1.5 assert isinstance(y, np.float16) assert np.isnan(y) a = pa.array(arr, type=pa.float16(), from_pandas=True) x, y = a.to_pylist() assert isinstance(x, np.float16) assert x == 1.5 assert y is None @pytest.mark.parametrize('dtype', ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']) def test_array_integer_object_nulls_option(dtype): num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 values = np.random.randint(0, 100, size=num_values, dtype=dtype) array = pa.array(values, mask=null_mask) if null_mask.any(): expected = values.astype('O') expected[null_mask] = None else: expected = values result = array.to_pandas(integer_object_nulls=True) np.testing.assert_equal(result, expected) @pytest.mark.parametrize('dtype', ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']) def test_table_integer_object_nulls_option(dtype): num_values = 100 null_mask = np.random.randint(0, 10, size=num_values) < 3 values = np.random.randint(0, 100, size=num_values, dtype=dtype) array = pa.array(values, mask=null_mask) if null_mask.any(): expected = values.astype('O') expected[null_mask] = None else: expected = values expected = pd.DataFrame({dtype: expected}) table = pa.Table.from_arrays([array], [dtype]) result = table.to_pandas(integer_object_nulls=True) tm.assert_frame_equal(result, expected) class TestConvertDateTimeLikeTypes(object): def test_timestamps_notimezone_no_nulls(self): df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123456789', '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') }) field = pa.field('datetime64', pa.timestamp('ns')) schema = pa.schema([field]) _check_pandas_roundtrip( df, expected_schema=schema, ) def test_timestamps_notimezone_nulls(self): df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123456789', None, '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') }) field = pa.field('datetime64', pa.timestamp('ns')) schema = pa.schema([field]) _check_pandas_roundtrip( df, expected_schema=schema, ) def test_timestamps_with_timezone(self): df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123', '2006-01-13T12:34:56.432', '2010-08-13T05:46:57.437'], dtype='datetime64[ms]') }) df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern') .to_frame()) _check_pandas_roundtrip(df) _check_series_roundtrip(df['datetime64']) # drop-in a null and ns instead of ms df = pd.DataFrame({ 'datetime64': np.array([ '2007-07-13T01:23:34.123456789', None, '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') }) df['datetime64'] = (df['datetime64'].dt.tz_localize('US/Eastern') .to_frame()) _check_pandas_roundtrip(df) def test_python_datetime(self): # ARROW-2106 date_array = [datetime.today() + timedelta(days=x) for x in range(10)] df = pd.DataFrame({ 'datetime': pd.Series(date_array, dtype=object) }) table = pa.Table.from_pandas(df) assert isinstance(table[0].data.chunk(0), pa.TimestampArray) result = table.to_pandas() expected_df = pd.DataFrame({ 'datetime': date_array }) tm.assert_frame_equal(expected_df, result) def test_python_datetime_subclass(self): class MyDatetime(datetime): # see https://github.com/pandas-dev/pandas/issues/21142 nanosecond = 0.0 date_array = [MyDatetime(2000, 1, 1, 1, 1, 1)] df = pd.DataFrame({"datetime": pd.Series(date_array, dtype=object)}) table = pa.Table.from_pandas(df) assert isinstance(table[0].data.chunk(0), pa.TimestampArray) result = table.to_pandas() expected_df = pd.DataFrame({"datetime": date_array}) # https://github.com/pandas-dev/pandas/issues/21142 expected_df["datetime"] = pd.to_datetime(expected_df["datetime"]) tm.assert_frame_equal(expected_df, result) def test_python_date_subclass(self): class MyDate(date): pass date_array = [MyDate(2000, 1, 1)] df = pd.DataFrame({"date": pd.Series(date_array, dtype=object)}) table = pa.Table.from_pandas(df) assert isinstance(table[0].data.chunk(0), pa.Date32Array) result = table.to_pandas() expected_df = pd.DataFrame( {"date": np.array(["2000-01-01"], dtype="datetime64[ns]")} ) tm.assert_frame_equal(expected_df, result) def test_datetime64_to_date32(self): # ARROW-1718 arr = pa.array([date(2017, 10, 23), None]) c = pa.Column.from_array("d", arr) s = c.to_pandas() arr2 = pa.Array.from_pandas(s, type=pa.date32()) assert arr2.equals(arr.cast('date32')) @pytest.mark.parametrize('mask', [ None, np.ones(3), np.array([True, False, False]), ]) def test_pandas_datetime_to_date64(self, mask): s = pd.to_datetime([ '2018-05-10T00:00:00', '2018-05-11T00:00:00', '2018-05-12T00:00:00', ]) arr = pa.Array.from_pandas(s, type=pa.date64(), mask=mask) data = np.array([ date(2018, 5, 10), date(2018, 5, 11), date(2018, 5, 12) ]) expected = pa.array(data, mask=mask, type=pa.date64()) assert arr.equals(expected) @pytest.mark.parametrize('mask', [ None, np.ones(3), np.array([True, False, False]) ]) def test_pandas_datetime_to_date64_failures(self, mask): s = pd.to_datetime([ '2018-05-10T10:24:01', '2018-05-11T10:24:01', '2018-05-12T10:24:01', ]) expected_msg = 'Timestamp value had non-zero intraday milliseconds' with pytest.raises(pa.ArrowInvalid, match=expected_msg): pa.Array.from_pandas(s, type=pa.date64(), mask=mask) def test_date_infer(self): df = pd.DataFrame({ 'date': [date(2000, 1, 1), None, date(1970, 1, 1), date(2040, 2, 26)]}) table = pa.Table.from_pandas(df, preserve_index=False) field = pa.field('date', pa.date32()) # schema's metadata is generated by from_pandas conversion expected_schema = pa.schema([field], metadata=table.schema.metadata) assert table.schema.equals(expected_schema) result = table.to_pandas() expected = df.copy() expected['date'] = pd.to_datetime(df['date']) tm.assert_frame_equal(result, expected) def test_date_mask(self): arr = np.array([date(2017, 4, 3), date(2017, 4, 4)], dtype='datetime64[D]') mask = [True, False] result = pa.array(arr, mask=np.array(mask)) expected = np.array([None, date(2017, 4, 4)], dtype='datetime64[D]') expected = pa.array(expected, from_pandas=True) assert expected.equals(result) def test_date_objects_typed(self): arr = np.array([ date(2017, 4, 3), None, date(2017, 4, 4), date(2017, 4, 5)], dtype=object) arr_i4 = np.array([17259, -1, 17260, 17261], dtype='int32') arr_i8 = arr_i4.astype('int64') * 86400000 mask = np.array([False, True, False, False]) t32 = pa.date32() t64 = pa.date64() a32 = pa.array(arr, type=t32) a64 = pa.array(arr, type=t64) a32_expected = pa.array(arr_i4, mask=mask, type=t32) a64_expected = pa.array(arr_i8, mask=mask, type=t64) assert a32.equals(a32_expected) assert a64.equals(a64_expected) colnames = ['date32', 'date64'] table = pa.Table.from_arrays([a32, a64], colnames) table_pandas = table.to_pandas() ex_values = (np.array(['2017-04-03', '2017-04-04', '2017-04-04', '2017-04-05'], dtype='datetime64[D]') .astype('datetime64[ns]')) ex_values[1] = pd.NaT.value expected_pandas = pd.DataFrame({'date32': ex_values, 'date64': ex_values}, columns=colnames) tm.assert_frame_equal(table_pandas, expected_pandas) def test_dates_from_integers(self): t1 = pa.date32() t2 = pa.date64() arr = np.array([17259, 17260, 17261], dtype='int32') arr2 = arr.astype('int64') * 86400000 a1 = pa.array(arr, type=t1) a2 = pa.array(arr2, type=t2) expected = date(2017, 4, 3) assert a1[0].as_py() == expected assert a2[0].as_py() == expected @pytest.mark.xfail(reason="not supported ATM", raises=NotImplementedError) def test_timedelta(self): df = pd.DataFrame({ 'timedelta': np.arange(start=0, stop=3 * 86400000, step=86400000, dtype='timedelta64[ms]') }) pa.Table.from_pandas(df) def test_pytime_from_pandas(self): pytimes = [time(1, 2, 3, 1356), time(4, 5, 6, 1356)] t1 = pa.time64('us') aobjs = np.array(pytimes + [None], dtype=object) parr = pa.array(aobjs) assert parr.type == t1 assert parr[0].as_py() == pytimes[0] assert parr[1].as_py() == pytimes[1] assert parr[2] is pa.NA df = pd.DataFrame({'times': aobjs}) batch = pa.RecordBatch.from_pandas(df) assert batch[0].equals(parr) arr = np.array([_pytime_to_micros(v) for v in pytimes], dtype='int64') a1 = pa.array(arr, type=pa.time64('us')) assert a1[0].as_py() == pytimes[0] a2 = pa.array(arr * 1000, type=pa.time64('ns')) assert a2[0].as_py() == pytimes[0] a3 = pa.array((arr / 1000).astype('i4'), type=pa.time32('ms')) assert a3[0].as_py() == pytimes[0].replace(microsecond=1000) a4 = pa.array((arr / 1000000).astype('i4'), type=pa.time32('s')) assert a4[0].as_py() == pytimes[0].replace(microsecond=0) def test_arrow_time_to_pandas(self): pytimes = [time(1, 2, 3, 1356), time(4, 5, 6, 1356), time(0, 0, 0)] expected = np.array(pytimes[:2] + [None]) expected_ms = np.array([x.replace(microsecond=1000) for x in pytimes[:2]] + [None]) expected_s = np.array([x.replace(microsecond=0) for x in pytimes[:2]] + [None]) arr = np.array([_pytime_to_micros(v) for v in pytimes], dtype='int64') arr = np.array([_pytime_to_micros(v) for v in pytimes], dtype='int64') null_mask = np.array([False, False, True], dtype=bool) a1 = pa.array(arr, mask=null_mask, type=pa.time64('us')) a2 = pa.array(arr * 1000, mask=null_mask, type=pa.time64('ns')) a3 = pa.array((arr / 1000).astype('i4'), mask=null_mask, type=pa.time32('ms')) a4 = pa.array((arr / 1000000).astype('i4'), mask=null_mask, type=pa.time32('s')) names = ['time64[us]', 'time64[ns]', 'time32[ms]', 'time32[s]'] batch = pa.RecordBatch.from_arrays([a1, a2, a3, a4], names) arr = a1.to_pandas() assert (arr == expected).all() arr = a2.to_pandas() assert (arr == expected).all() arr = a3.to_pandas() assert (arr == expected_ms).all() arr = a4.to_pandas() assert (arr == expected_s).all() df = batch.to_pandas() expected_df = pd.DataFrame({'time64[us]': expected, 'time64[ns]': expected, 'time32[ms]': expected_ms, 'time32[s]': expected_s}, columns=names) tm.assert_frame_equal(df, expected_df) def test_numpy_datetime64_columns(self): datetime64_ns = np.array([ '2007-07-13T01:23:34.123456789', None, '2006-01-13T12:34:56.432539784', '2010-08-13T05:46:57.437699912'], dtype='datetime64[ns]') _check_array_from_pandas_roundtrip(datetime64_ns) datetime64_us = np.array([ '2007-07-13T01:23:34.123456', None, '2006-01-13T12:34:56.432539', '2010-08-13T05:46:57.437699'], dtype='datetime64[us]') _check_array_from_pandas_roundtrip(datetime64_us) datetime64_ms = np.array([ '2007-07-13T01:23:34.123', None, '2006-01-13T12:34:56.432', '2010-08-13T05:46:57.437'], dtype='datetime64[ms]') _check_array_from_pandas_roundtrip(datetime64_ms) datetime64_s = np.array([ '2007-07-13T01:23:34', None, '2006-01-13T12:34:56', '2010-08-13T05:46:57'], dtype='datetime64[s]') _check_array_from_pandas_roundtrip(datetime64_s) def test_numpy_datetime64_day_unit(self): datetime64_d = np.array([ '2007-07-13', None, '2006-01-15', '2010-08-19'], dtype='datetime64[D]') _check_array_from_pandas_roundtrip(datetime64_d) def test_array_from_pandas_date_with_mask(self): m = np.array([True, False, True]) data = pd.Series([ date(1990, 1, 1), date(1991, 1, 1), date(1992, 1, 1) ]) result = pa.Array.from_pandas(data, mask=m) expected = pd.Series([None, date(1991, 1, 1), None]) assert pa.Array.from_pandas(expected).equals(result) def test_fixed_offset_timezone(self): df = pd.DataFrame({ 'a': [ pd.Timestamp('2012-11-11 00:00:00+01:00'), pd.NaT ] }) _check_pandas_roundtrip(df) _check_serialize_components_roundtrip(df) class TestConvertStringLikeTypes(object): def test_unicode(self): repeats = 1000 values = [u'foo', None, u'bar', u'mañana', np.nan] df = pd.DataFrame({'strings': values * repeats}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) _check_pandas_roundtrip(df, expected_schema=schema) def test_bytes_to_binary(self): values = [u'qux', b'foo', None, bytearray(b'barz'), 'qux', np.nan] df = pd.DataFrame({'strings': values}) table = pa.Table.from_pandas(df) assert table[0].type == pa.binary() values2 = [b'qux', b'foo', None, b'barz', b'qux', np.nan] expected = pd.DataFrame({'strings': values2}) _check_pandas_roundtrip(df, expected) @pytest.mark.large_memory def test_bytes_exceed_2gb(self): v1 = b'x' * 100000000 v2 = b'x' * 147483646 df = pd.DataFrame({ 'strings': [v1] * 20 + [v2] + ['x'] * 20 }) arr = pa.array(df['strings']) assert isinstance(arr, pa.ChunkedArray) assert arr.num_chunks == 2 arr = None table = pa.Table.from_pandas(df) assert table[0].data.num_chunks == 2 def test_fixed_size_bytes(self): values = [b'foo', None, bytearray(b'bar'), None, None, b'hey'] df = pd.DataFrame({'strings': values}) schema = pa.schema([pa.field('strings', pa.binary(3))]) table = pa.Table.from_pandas(df, schema=schema) assert table.schema[0].type == schema[0].type assert table.schema[0].name == schema[0].name result = table.to_pandas() tm.assert_frame_equal(result, df) def test_fixed_size_bytes_does_not_accept_varying_lengths(self): values = [b'foo', None, b'ba', None, None, b'hey'] df = pd.DataFrame({'strings': values}) schema = pa.schema([pa.field('strings', pa.binary(3))]) with pytest.raises(pa.ArrowInvalid): pa.Table.from_pandas(df, schema=schema) def test_variable_size_bytes(self): s = pd.Series([b'123', b'', b'a', None]) _check_series_roundtrip(s, type_=pa.binary()) def test_binary_from_bytearray(self): s = pd.Series([bytearray(b'123'), bytearray(b''), bytearray(b'a'), None]) _check_series_roundtrip(s, type_=pa.binary()) _check_series_roundtrip(s, expected_pa_type=pa.binary()) def test_table_empty_str(self): values = ['', '', '', '', ''] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) result1 = table.to_pandas(strings_to_categorical=False) expected1 = pd.DataFrame({'strings': values}) tm.assert_frame_equal(result1, expected1, check_dtype=True) result2 = table.to_pandas(strings_to_categorical=True) expected2 = pd.DataFrame({'strings': pd.Categorical(values)}) tm.assert_frame_equal(result2, expected2, check_dtype=True) def test_selective_categoricals(self): values = ['', '', '', '', ''] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) expected_str = pd.DataFrame({'strings': values}) expected_cat = pd.DataFrame({'strings': pd.Categorical(values)}) result1 = table.to_pandas(categories=['strings']) tm.assert_frame_equal(result1, expected_cat, check_dtype=True) result2 = table.to_pandas(categories=[]) tm.assert_frame_equal(result2, expected_str, check_dtype=True) result3 = table.to_pandas(categories=('strings',)) tm.assert_frame_equal(result3, expected_cat, check_dtype=True) result4 = table.to_pandas(categories=tuple()) tm.assert_frame_equal(result4, expected_str, check_dtype=True) def test_table_str_to_categorical_without_na(self): values = ['a', 'a', 'b', 'b', 'c'] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) result = table.to_pandas(strings_to_categorical=True) expected = pd.DataFrame({'strings': pd.Categorical(values)}) tm.assert_frame_equal(result, expected, check_dtype=True) with pytest.raises(pa.ArrowInvalid): table.to_pandas(strings_to_categorical=True, zero_copy_only=True) def test_table_str_to_categorical_with_na(self): values = [None, 'a', 'b', np.nan] df = pd.DataFrame({'strings': values}) field = pa.field('strings', pa.string()) schema = pa.schema([field]) table = pa.Table.from_pandas(df, schema=schema) result = table.to_pandas(strings_to_categorical=True) expected = pd.DataFrame({'strings': pd.Categorical(values)}) tm.assert_frame_equal(result, expected, check_dtype=True) with pytest.raises(pa.ArrowInvalid): table.to_pandas(strings_to_categorical=True, zero_copy_only=True) def test_array_of_bytes_to_strings(self): converted = pa.array(np.array([b'x'], dtype=object), pa.string()) assert converted.type == pa.string() def test_array_of_bytes_to_strings_bad_data(self): with pytest.raises( pa.lib.ArrowInvalid, match=("'(utf8|utf-8)' codec can't decode byte 0x80 " "in position 0: invalid start byte")): pa.array(np.array([b'\x80\x81'], dtype=object), pa.string()) def test_numpy_string_array_to_fixed_size_binary(self): arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3') converted = pa.array(arr, type=pa.binary(3)) expected = pa.array(list(arr), type=pa.binary(3)) assert converted.equals(expected) mask = np.array([True, False, True]) converted = pa.array(arr, type=pa.binary(3), mask=mask) expected = pa.array([b'foo', None, b'baz'], type=pa.binary(3)) assert converted.equals(expected) with pytest.raises(pa.lib.ArrowInvalid, match='Got bytestring of length 3 \(expected 4\)'): arr = np.array([b'foo', b'bar', b'baz'], dtype='|S3') pa.array(arr, type=pa.binary(4)) with pytest.raises(pa.lib.ArrowInvalid, match='Got bytestring of length 12 \(expected 3\)'): arr = np.array([b'foo', b'bar', b'baz'], dtype='|U3') pa.array(arr, type=pa.binary(3)) class TestConvertDecimalTypes(object): decimal32 = [ decimal.Decimal('-1234.123'), decimal.Decimal('1234.439') ] decimal64 = [ decimal.Decimal('-129934.123331'), decimal.Decimal('129534.123731') ] decimal128 = [ decimal.Decimal('394092382910493.12341234678'), decimal.Decimal('-314292388910493.12343437128') ] @pytest.mark.parametrize(('values', 'expected_type'), [ pytest.param(decimal32, pa.decimal128(7, 3), id='decimal32'), pytest.param(decimal64, pa.decimal128(12, 6), id='decimal64'), pytest.param(decimal128, pa.decimal128(26, 11), id='decimal128') ]) def test_decimal_from_pandas(self, values, expected_type): expected = pd.DataFrame({'decimals': values}) table = pa.Table.from_pandas(expected, preserve_index=False) field = pa.field('decimals', expected_type) # schema's metadata is generated by from_pandas conversion expected_schema = pa.schema([field], metadata=table.schema.metadata) assert table.schema.equals(expected_schema) @pytest.mark.parametrize('values', [ pytest.param(decimal32, id='decimal32'), pytest.param(decimal64, id='decimal64'), pytest.param(decimal128, id='decimal128') ]) def test_decimal_to_pandas(self, values): expected = pd.DataFrame({'decimals': values}) converted = pa.Table.from_pandas(expected) df = converted.to_pandas() tm.assert_frame_equal(df, expected) def test_decimal_fails_with_truncation(self): data1 = [decimal.Decimal('1.234')] type1 = pa.decimal128(10, 2) with pytest.raises(pa.ArrowInvalid): pa.array(data1, type=type1) data2 = [decimal.Decimal('1.2345')] type2 = pa.decimal128(10, 3) with pytest.raises(pa.ArrowInvalid): pa.array(data2, type=type2) def test_decimal_with_different_precisions(self): data = [ decimal.Decimal('0.01'), decimal.Decimal('0.001'), ] series = pd.Series(data) array = pa.array(series) assert array.to_pylist() == data assert array.type == pa.decimal128(3, 3) array = pa.array(data, type=pa.decimal128(12, 5)) expected = [decimal.Decimal('0.01000'), decimal.Decimal('0.00100')] assert array.to_pylist() == expected def test_decimal_with_None_explicit_type(self): series = pd.Series([decimal.Decimal('3.14'), None]) _check_series_roundtrip(series, type_=pa.decimal128(12, 5)) series = pd.Series([None] * 2) _check_series_roundtrip(series, type_=pa.decimal128(12, 5)) def test_decimal_with_None_infer_type(self): series = pd.Series([decimal.Decimal('3.14'), None]) _check_series_roundtrip(series, expected_pa_type=pa.decimal128(3, 2)) class TestListTypes(object): def test_column_of_arrays(self): df, schema = dataframe_with_arrays() _check_pandas_roundtrip(df, schema=schema, expected_schema=schema) table = pa.Table.from_pandas(df, schema=schema, preserve_index=False) expected_schema = schema.add_metadata(table.schema.metadata) assert table.schema.equals(expected_schema) for column in df.columns: field = schema.field_by_name(column) _check_array_roundtrip(df[column], type=field.type) def test_column_of_arrays_to_py(self): # Test regression in ARROW-1199 not caught in above test dtype = 'i1' arr = np.array([ np.arange(10, dtype=dtype), np.arange(5, dtype=dtype), None, np.arange(1, dtype=dtype) ]) type_ = pa.list_(pa.int8()) parr = pa.array(arr, type=type_) assert parr[0].as_py() == list(range(10)) assert parr[1].as_py() == list(range(5)) assert parr[2].as_py() is None assert parr[3].as_py() == [0] def test_column_of_lists(self): df, schema = dataframe_with_lists() _check_pandas_roundtrip(df, schema=schema, expected_schema=schema) table = pa.Table.from_pandas(df, schema=schema, preserve_index=False) # schema's metadata is generated by from_pandas conversion expected_schema = schema.add_metadata(table.schema.metadata) assert table.schema.equals(expected_schema) for column in df.columns: field = schema.field_by_name(column) _check_array_roundtrip(df[column], type=field.type) def test_column_of_lists_first_empty(self): num_lists = [[], [2, 3, 4], [3, 6, 7, 8], [], [2]] series = pd.Series([np.array(s, dtype=float) for s in num_lists]) arr = pa.array(series) result = pd.Series(arr.to_pandas()) tm.assert_series_equal(result, series) def test_column_of_lists_chunked(self): df = pd.DataFrame({ 'lists': np.array([ [1, 2], None, [2, 3], [4, 5], [6, 7], [8, 9] ], dtype=object) }) schema = pa.schema([ pa.field('lists', pa.list_(pa.int64())) ]) t1 = pa.Table.from_pandas(df[:2], schema=schema) t2 = pa.Table.from_pandas(df[2:], schema=schema) table = pa.concat_tables([t1, t2]) result = table.to_pandas() tm.assert_frame_equal(result, df) def test_column_of_lists_chunked2(self): data1 = [[0, 1], [2, 3], [4, 5], [6, 7], [10, 11], [12, 13], [14, 15], [16, 17]] data2 = [[8, 9], [18, 19]] a1 = pa.array(data1) a2 = pa.array(data2) t1 = pa.Table.from_arrays([a1], names=['a']) t2 = pa.Table.from_arrays([a2], names=['a']) concatenated = pa.concat_tables([t1, t2]) result = concatenated.to_pandas() expected = pd.DataFrame({'a': data1 + data2}) tm.assert_frame_equal(result, expected) def test_column_of_lists_strided(self): df, schema = dataframe_with_lists() df = pd.concat([df] * 6, ignore_index=True) arr = df['int64'].values[::3] assert arr.strides[0] != 8 _check_array_roundtrip(arr) def test_nested_lists_all_none(self): data = np.array([[None, None], None], dtype=object) arr = pa.array(data) expected = pa.array(list(data)) assert arr.equals(expected) assert arr.type == pa.list_(pa.null()) data2 = np.array([None, None, [None, None], np.array([None, None], dtype=object)], dtype=object) arr = pa.array(data2) expected = pa.array([None, None, [None, None], [None, None]]) assert arr.equals(expected) def test_nested_lists_all_empty(self): data = pd.Series([[], [], []]) arr = pa.array(data) expected = pa.array(list(data)) assert arr.equals(expected) assert arr.type == pa.list_(pa.null()) def test_nested_smaller_ints(self): data = pd.Series([np.array([1, 2, 3], dtype='i1'), None]) result = pa.array(data) result2 = pa.array(data.values) expected = pa.array([[1, 2, 3], None], type=pa.list_(pa.int8())) assert result.equals(expected) assert result2.equals(expected) data3 = pd.Series([np.array([1, 2, 3], dtype='f4'), None]) result3 = pa.array(data3) expected3 = pa.array([[1, 2, 3], None], type=pa.list_(pa.float32())) assert result3.equals(expected3) def test_infer_lists(self): data = OrderedDict([ ('nan_ints', [[None, 1], [2, 3]]), ('ints', [[0, 1], [2, 3]]), ('strs', [[None, u'b'], [u'c', u'd']]), ('nested_strs', [[[None, u'b'], [u'c', u'd']], None]) ]) df = pd.DataFrame(data) expected_schema = pa.schema([ pa.field('nan_ints', pa.list_(pa.int64())), pa.field('ints', pa.list_(pa.int64())), pa.field('strs', pa.list_(pa.string())), pa.field('nested_strs', pa.list_(pa.list_(pa.string()))) ]) _check_pandas_roundtrip(df, expected_schema=expected_schema) def test_infer_numpy_array(self): data = OrderedDict([ ('ints', [ np.array([0, 1], dtype=np.int64), np.array([2, 3], dtype=np.int64) ]) ]) df = pd.DataFrame(data) expected_schema = pa.schema([ pa.field('ints', pa.list_(pa.int64())) ]) _check_pandas_roundtrip(df, expected_schema=expected_schema) @pytest.mark.parametrize('t,data,expected', [ ( pa.int64, [[1, 2], [3], None], [None, [3], None] ), ( pa.string, [[u'aaa', u'bb'], [u'c'], None], [None, [u'c'], None] ), ( pa.null, [[None, None], [None], None], [None, [None], None] ) ]) def test_array_from_pandas_typed_array_with_mask(self, t, data, expected): m = np.array([True, False, True]) s = pd.Series(data) result = pa.Array.from_pandas(s, mask=m, type=pa.list_(t())) assert pa.Array.from_pandas(expected, type=pa.list_(t())).equals(result) def test_empty_list_roundtrip(self): empty_list_array = np.empty((3,), dtype=object) empty_list_array.fill([]) df = pd.DataFrame({'a': np.array(['1', '2', '3']), 'b': empty_list_array}) tbl = pa.Table.from_pandas(df) result = tbl.to_pandas() tm.assert_frame_equal(result, df) def test_array_from_nested_arrays(self): df, schema = dataframe_with_arrays() for field in schema: arr = df[field.name].values expected = pa.array(list(arr), type=field.type) result = pa.array(arr) assert result.type == field.type assert result.equals(expected) class TestConvertStructTypes(object): def test_to_pandas(self): ints = pa.array([None, 2, 3], type=pa.int64()) strs = pa.array([u'a', None, u'c'], type=pa.string()) bools = pa.array([True, False, None], type=pa.bool_()) arr = pa.StructArray.from_arrays( [ints, strs, bools], ['ints', 'strs', 'bools']) expected = pd.Series([ {'ints': None, 'strs': u'a', 'bools': True}, {'ints': 2, 'strs': None, 'bools': False}, {'ints': 3, 'strs': u'c', 'bools': None}, ]) series = pd.Series(arr.to_pandas()) tm.assert_series_equal(series, expected) def test_from_numpy(self): dt = np.dtype([('x', np.int32), (('y_title', 'y'), np.bool_)]) ty = pa.struct([pa.field('x', pa.int32()), pa.field('y', pa.bool_())]) data = np.array([], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [] data = np.array([(42, True), (43, False)], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [{'x': 42, 'y': True}, {'x': 43, 'y': False}] arr = pa.array(data, mask=np.bool_([False, True]), type=ty) assert arr.to_pylist() == [{'x': 42, 'y': True}, None] dt = np.dtype([]) ty = pa.struct([]) data = np.array([], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [] data = np.array([(), ()], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [{}, {}] def test_from_numpy_nested(self): dt = np.dtype([('x', np.dtype([('xx', np.int8), ('yy', np.bool_)])), ('y', np.int16)]) ty = pa.struct([pa.field('x', pa.struct([pa.field('xx', pa.int8()), pa.field('yy', pa.bool_())])), pa.field('y', pa.int16())]) data = np.array([], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [] data = np.array([((1, True), 2), ((3, False), 4)], dtype=dt) arr = pa.array(data, type=ty) assert arr.to_pylist() == [{'x': {'xx': 1, 'yy': True}, 'y': 2}, {'x': {'xx': 3, 'yy': False}, 'y': 4}] @pytest.mark.large_memory def test_from_numpy_large(self): target_size = 3 * 1024**3 dt = np.dtype([('x', np.float64), ('y', 'object')]) bs = 65536 - dt.itemsize block = b'.' * bs n = target_size // (bs + dt.itemsize) data = np.zeros(n, dtype=dt) data['x'] = np.random.random_sample(n) data['y'] = block data['x'][data['x'] < 0.2] = np.nan ty = pa.struct([pa.field('x', pa.float64()), pa.field('y', pa.binary(bs))]) arr = pa.array(data, type=ty, from_pandas=True) assert arr.num_chunks == 2 def iter_chunked_array(arr): for chunk in arr.iterchunks(): for item in chunk: yield item def check(arr, data, mask=None): assert len(arr) == len(data) xs = data['x'] ys = data['y'] for i, obj in enumerate(iter_chunked_array(arr)): try: d = obj.as_py() if mask is not None and mask[i]: assert d is None else: x = xs[i] if np.isnan(x): assert d['x'] is None else: assert d['x'] == x assert d['y'] == ys[i] except Exception: print("Failed at index", i) raise check(arr, data) del arr mask = np.random.random_sample(n) < 0.2 arr = pa.array(data, type=ty, mask=mask, from_pandas=True) assert arr.num_chunks == 2 check(arr, data, mask) del arr def test_from_numpy_bad_input(self): ty = pa.struct([pa.field('x', pa.int32()), pa.field('y', pa.bool_())]) dt = np.dtype([('x', np.int32), ('z', np.bool_)]) data = np.array([], dtype=dt) with pytest.raises(TypeError, match="Missing field 'y'"): pa.array(data, type=ty) data = np.int32([]) with pytest.raises(TypeError, match="Expected struct array"): pa.array(data, type=ty) class TestZeroCopyConversion(object): def test_zero_copy_success(self): result = pa.array([0, 1, 2]).to_pandas(zero_copy_only=True) npt.assert_array_equal(result, [0, 1, 2]) def test_zero_copy_dictionaries(self): arr = pa.DictionaryArray.from_arrays( np.array([0, 0]), np.array([5])) result = arr.to_pandas(zero_copy_only=True) values = pd.Categorical([5, 5]) tm.assert_series_equal(pd.Series(result), pd.Series(values), check_names=False) def check_zero_copy_failure(self, arr): with pytest.raises(pa.ArrowInvalid): arr.to_pandas(zero_copy_only=True) def test_zero_copy_failure_on_object_types(self): self.check_zero_copy_failure(pa.array(['A', 'B', 'C'])) def test_zero_copy_failure_with_int_when_nulls(self): self.check_zero_copy_failure(pa.array([0, 1, None])) def test_zero_copy_failure_with_float_when_nulls(self): self.check_zero_copy_failure(pa.array([0.0, 1.0, None])) def test_zero_copy_failure_on_bool_types(self): self.check_zero_copy_failure(pa.array([True, False])) def test_zero_copy_failure_on_list_types(self): arr = pa.array([[1, 2], [8, 9]], type=pa.list_(pa.int64())) self.check_zero_copy_failure(arr) def test_zero_copy_failure_on_timestamp_types(self): arr = np.array(['2007-07-13'], dtype='datetime64[ns]') self.check_zero_copy_failure(pa.array(arr)) class TestConvertMisc(object): type_pairs = [ (np.int8, pa.int8()), (np.int16, pa.int16()), (np.int32, pa.int32()), (np.int64, pa.int64()), (np.uint8, pa.uint8()), (np.uint16, pa.uint16()), (np.uint32, pa.uint32()), (np.uint64, pa.uint64()), (np.float16, pa.float16()), (np.float32, pa.float32()), (np.float64, pa.float64()), (np.object, pa.string()), (np.object, pa.binary()), (np.object, pa.binary(10)), (np.object, pa.list_(pa.int64())), ] def test_all_none_objects(self): df = pd.DataFrame({'a': [None, None, None]}) _check_pandas_roundtrip(df) def test_all_none_category(self): df = pd.DataFrame({'a': [None, None, None]}) df['a'] = df['a'].astype('category') _check_pandas_roundtrip(df) def test_empty_arrays(self): for dtype, pa_type in self.type_pairs: arr = np.array([], dtype=dtype) _check_array_roundtrip(arr, type=pa_type) def test_threaded_conversion(self): df = _alltypes_example() _check_pandas_roundtrip(df, use_threads=True) _check_pandas_roundtrip(df, use_threads=True, as_batch=True) def test_category(self): repeats = 5 v1 = ['foo', None, 'bar', 'qux', np.nan] v2 = [4, 5, 6, 7, 8] v3 = [b'foo', None, b'bar', b'qux', np.nan] df = pd.DataFrame({'cat_strings': pd.Categorical(v1 * repeats), 'cat_ints': pd.Categorical(v2 * repeats), 'cat_binary': pd.Categorical(v3 * repeats), 'cat_strings_ordered': pd.Categorical( v1 * repeats, categories=['bar', 'qux', 'foo'], ordered=True), 'ints': v2 * repeats, 'ints2': v2 * repeats, 'strings': v1 * repeats, 'strings2': v1 * repeats, 'strings3': v3 * repeats}) _check_pandas_roundtrip(df) arrays = [ pd.Categorical(v1 * repeats), pd.Categorical(v2 * repeats), pd.Categorical(v3 * repeats) ] for values in arrays: _check_array_roundtrip(values) def test_empty_category(self): df = pd.DataFrame({'cat': pd.Categorical([])}) _check_pandas_roundtrip(df) def test_mixed_types_fails(self): data = pd.DataFrame({'a': ['a', 1, 2.0]}) with pytest.raises(pa.ArrowTypeError): pa.Table.from_pandas(data) data = pd.DataFrame({'a': [1, True]}) with pytest.raises(pa.ArrowTypeError): pa.Table.from_pandas(data) def test_strided_data_import(self): cases = [] columns = ['a', 'b', 'c'] N, K = 100, 3 random_numbers = np.random.randn(N, K).copy() * 100 numeric_dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8', 'f4', 'f8'] for type_name in numeric_dtypes: cases.append(random_numbers.astype(type_name)) cases.append(np.array([tm.rands(10) for i in range(N * K)], dtype=object) .reshape(N, K).copy()) boolean_objects = (np.array([True, False, True] * N, dtype=object) .reshape(N, K).copy()) boolean_objects[5] = None cases.append(boolean_objects) cases.append(np.arange("2016-01-01T00:00:00.001", N * K, dtype='datetime64[ms]') .reshape(N, K).copy()) strided_mask = (random_numbers > 0).astype(bool)[:, 0] for case in cases: df = pd.DataFrame(case, columns=columns) col = df['a'] _check_pandas_roundtrip(df) _check_array_roundtrip(col) _check_array_roundtrip(col, mask=strided_mask) def test_all_nones(self): def _check_series(s): converted = pa.array(s) assert isinstance(converted, pa.NullArray) assert len(converted) == 3 assert converted.null_count == 3 assert converted[0] is pa.NA _check_series(pd.Series([None] * 3, dtype=object)) _check_series(pd.Series([np.nan] * 3, dtype=object)) _check_series(pd.Series([np.sqrt(-1)] * 3, dtype=object)) def test_partial_schema(self): data = OrderedDict([ ('a', [0, 1, 2, 3, 4]), ('b', np.array([-10, -5, 0, 5, 10], dtype=np.int32)), ('c', [-10, -5, 0, 5, 10]) ]) df = pd.DataFrame(data) partial_schema = pa.schema([ pa.field('a', pa.int64()), pa.field('b', pa.int32()) ]) expected_schema = pa.schema([ pa.field('a', pa.int64()), pa.field('b', pa.int32()), pa.field('c', pa.int64()) ]) _check_pandas_roundtrip(df, schema=partial_schema, expected_schema=expected_schema) def test_table_batch_empty_dataframe(self): df = pd.DataFrame({}) _check_pandas_roundtrip(df) _check_pandas_roundtrip(df, as_batch=True) df2 = pd.DataFrame({}, index=[0, 1, 2]) _check_pandas_roundtrip(df2, preserve_index=True) _check_pandas_roundtrip(df2, as_batch=True, preserve_index=True) def test_convert_empty_table(self): arr = pa.array([], type=pa.int64()) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=np.int64)) arr = pa.array([], type=pa.string()) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object)) arr = pa.array([], type=pa.list_(pa.int64())) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object)) arr = pa.array([], type=pa.struct([pa.field('a', pa.int64())])) tm.assert_almost_equal(arr.to_pandas(), np.array([], dtype=object)) def test_non_natural_stride(self): dtype = np.dtype([('x', np.int32), ('y', np.int16)]) data = np.array([(42, -1), (-43, 2)], dtype=dtype) assert data.strides == (6,) arr = pa.array(data['x'], type=pa.int32()) assert arr.to_pylist() == [42, -43] arr = pa.array(data['y'], type=pa.int16()) assert arr.to_pylist() == [-1, 2] def test_mixed_integer_columns(self): row = [[], []] df = pd.DataFrame(data=[row], columns=['foo', 123]) expected_df = pd.DataFrame(data=[row], columns=['foo', '123']) _check_pandas_roundtrip(df, expected=expected_df, preserve_index=True) def _fully_loaded_dataframe_example(): from distutils.version import LooseVersion index = pd.MultiIndex.from_arrays([ pd.date_range('2000-01-01', periods=5).repeat(2), np.tile(np.array(['foo', 'bar'], dtype=object), 5) ]) c1 = pd.date_range('2000-01-01', periods=10) data = { 0: c1, 1: c1.tz_localize('utc'), 2: c1.tz_localize('US/Eastern'), 3: c1[::2].tz_localize('utc').repeat(2).astype('category'), 4: ['foo', 'bar'] * 5, 5: pd.Series(['foo', 'bar'] * 5).astype('category').values, 6: [True, False] * 5, 7: np.random.randn(10), 8: np.random.randint(0, 100, size=10), 9: pd.period_range('2013', periods=10, freq='M') } if LooseVersion(pd.__version__) >= '0.21': data[10] = pd.interval_range(start=1, freq=1, periods=10) return pd.DataFrame(data, index=index) @pytest.mark.parametrize('columns', ([b'foo'], ['foo'])) def test_roundtrip_with_bytes_unicode(columns): df = pd.DataFrame(columns=columns) table1 = pa.Table.from_pandas(df) table2 = pa.Table.from_pandas(table1.to_pandas()) assert table1.equals(table2) assert table1.schema.equals(table2.schema) assert table1.schema.metadata == table2.schema.metadata def _check_serialize_components_roundtrip(df): ctx = pa.default_serialization_context() components = ctx.serialize(df).to_components() deserialized = ctx.deserialize_components(components) tm.assert_frame_equal(df, deserialized) def test_serialize_deserialize_pandas(): df = _fully_loaded_dataframe_example() _check_serialize_components_roundtrip(df) def _pytime_from_micros(val): microseconds = val % 1000000 val //= 1000000 seconds = val % 60 val //= 60 minutes = val % 60 hours = val // 60 return time(hours, minutes, seconds, microseconds) def _pytime_to_micros(pytime): return (pytime.hour * 3600000000 + pytime.minute * 60000000 + pytime.second * 1000000 + pytime.microsecond)
true
true
f7136a2f149441e3ee72a2d51d0cc65681f3bad7
119
py
Python
tests/integration/ui_write/__init__.py
VBaratham/pynwb
a9429c93f29763b9ebe9022b099afcffbc6be493
[ "BSD-3-Clause-LBNL" ]
1
2021-04-13T20:47:36.000Z
2021-04-13T20:47:36.000Z
tests/integration/ui_write/__init__.py
VBaratham/pynwb
a9429c93f29763b9ebe9022b099afcffbc6be493
[ "BSD-3-Clause-LBNL" ]
1
2021-06-01T22:25:36.000Z
2021-06-01T22:25:36.000Z
tests/integration/ui_write/__init__.py
VBaratham/pynwb
a9429c93f29763b9ebe9022b099afcffbc6be493
[ "BSD-3-Clause-LBNL" ]
null
null
null
# flake8: noqa: F401 from . import base from . import test_base from . import test_ecephys from . import test_nwbfile
17
26
0.764706
from . import base from . import test_base from . import test_ecephys from . import test_nwbfile
true
true
f7136a2f16131f008e9144f7ec2886f64d0e53f6
5,882
py
Python
Model/EngHindiDataPreprocess/eng_hin_vocab_creator.py
porcelainruler/Language-Translation
31b5a0489aa243cf4608d7bcf795f136ab97021b
[ "Apache-2.0" ]
null
null
null
Model/EngHindiDataPreprocess/eng_hin_vocab_creator.py
porcelainruler/Language-Translation
31b5a0489aa243cf4608d7bcf795f136ab97021b
[ "Apache-2.0" ]
null
null
null
Model/EngHindiDataPreprocess/eng_hin_vocab_creator.py
porcelainruler/Language-Translation
31b5a0489aa243cf4608d7bcf795f136ab97021b
[ "Apache-2.0" ]
1
2021-02-09T08:04:51.000Z
2021-02-09T08:04:51.000Z
import codecs import math from Model.EngHindiDataPreprocess import config, EnglishTokenizer as ENG_Tok, HindiTokenizer as HIN_TOK, \ IndicTokenizer as IND_TOK from nltk import word_tokenize from Model.EngHindiDataPreprocess import config def load_data_sp(path): with codecs.open(path, encoding='utf-8') as f: data = f.read().split('\n') print('Num of Lines in Data:', len(data)) return data def tokenizer(data: list, flag: bool = True, max_length: int = math.inf): assert type(data) == list, 'Raw Data should be in list data type' print(max_length) token_list = list() for line in data: if flag: tokens = word_tokenize(line) else: tokens = ['<sos>'] + line.split(' ') + ['<eos>'] token_list.append(tokens) if len(token_list) > max_length: break return token_list # Vocab Code for Hindi hin_vocab_intToText = dict() hin_vocab_textToInt = dict() # Vocab Code for English eng_vocab_intToText = dict() eng_vocab_textToInt = dict() # Setting up some Special Token value for Hindi Vocab hin_vocab_textToInt[config.SOS_TOKEN] = config.SOS_TOKEN_IDX hin_vocab_textToInt[config.EOS_TOKEN] = config.EOS_TOKEN_IDX hin_vocab_textToInt[config.PAD_TOKEN] = config.PAD_TOKEN_IDX hin_vocab_textToInt[config.UNK_TOKEN] = config.UNK_TOKEN_IDX hin_vocab_intToText[config.SOS_TOKEN_IDX] = config.SOS_TOKEN hin_vocab_intToText[config.EOS_TOKEN_IDX] = config.EOS_TOKEN hin_vocab_intToText[config.PAD_TOKEN_IDX] = config.PAD_TOKEN hin_vocab_intToText[config.UNK_TOKEN_IDX] = config.UNK_TOKEN # Setting up some Special Token value for Hindi Vocab eng_vocab_textToInt[config.SOS_TOKEN] = config.SOS_TOKEN_IDX eng_vocab_textToInt[config.EOS_TOKEN] = config.EOS_TOKEN_IDX eng_vocab_textToInt[config.PAD_TOKEN] = config.PAD_TOKEN_IDX eng_vocab_textToInt[config.UNK_TOKEN] = config.UNK_TOKEN_IDX eng_vocab_intToText[config.SOS_TOKEN_IDX] = config.SOS_TOKEN eng_vocab_intToText[config.EOS_TOKEN_IDX] = config.EOS_TOKEN eng_vocab_intToText[config.PAD_TOKEN_IDX] = config.PAD_TOKEN eng_vocab_intToText[config.UNK_TOKEN_IDX] = config.UNK_TOKEN count_eng = 4 count_hin = 4 def create_hindi_vocab(data: list): global count_hin for arr in data: for token in arr: if token in hin_vocab_textToInt.keys(): continue else: hin_vocab_textToInt[token] = count_hin hin_vocab_intToText[count_hin] = token count_hin += 1 return hin_vocab_textToInt, hin_vocab_intToText def create_eng_vocab(data: list): global count_eng for arr in data: for token in arr: if token in eng_vocab_textToInt.keys(): continue else: eng_vocab_textToInt[token] = count_eng eng_vocab_intToText[count_eng] = token count_eng += 1 return eng_vocab_textToInt, eng_vocab_intToText def convert_seq_to_int(data: list, flag: bool): arr = list() for line in data: tok_line = list() for token in line: if flag: if token in eng_vocab_textToInt.keys(): tok_line.append(eng_vocab_textToInt[token]) else: tok_line.append(eng_vocab_textToInt['<unk>']) else: if token in hin_vocab_textToInt.keys(): tok_line.append(hin_vocab_textToInt[token]) else: tok_line.append(hin_vocab_textToInt['<unk>']) arr.append(tok_line) return arr print('Vocab Creation in Progress...') # English Dataset Tokenization and Vocab Creation ENG_TOKENIZER = ENG_Tok.EnglishTokenizer() eng_read = ENG_TOKENIZER.read_from_file(path='mlc_train.hi-en.en') ENG_TOKENS = ENG_TOKENIZER.tokenize() create_eng_vocab(ENG_TOKENS) # Hindi Dataset Tokenization and Vocab Creation hin_read = IND_TOK.get_sentences(filepath='mlc_train.hi-en.hi') HIN_TOKENS = IND_TOK.get_token(filepath='mlc_train.hi-en.hi') create_hindi_vocab(HIN_TOKENS) # Printing Vocab Size # print('English Vocab Size:', count_eng, 'Hindi Vocab Size:', count_hin) print('----------------------Vocab Creation Done for Both----------------------') def vocab_creator(vocab_dict: dict, flag: bool): if flag: out = codecs.open('hindi_vocab.txt', encoding='utf-8', mode='w') else: out = codecs.open('english_vocab.txt', encoding='utf-8', mode='w') for key in vocab_dict.keys(): out.write(f'{key}_:_{vocab_dict[key]}') out.write('\n') out.close() # Vocab txt File Creation for both English and Hindi <For Vocab Creation in txt> # vocab_creator(eng_vocab_textToInt, flag=False) # vocab_creator(hin_vocab_textToInt, flag=True) # Vocab Checker: # print('English Vocab:', eng_vocab_textToInt) # print('Hindi Vocab:', hin_vocab_textToInt) print('Data Conversion to Integer in Progress...') max_length = -math.inf def max_length_updator(seq: list): global max_length for sent in seq: if len(sent) > max_length: max_length = len(sent) def padding_seq(seq: list): global max_length new_seq = list() for idx in range(len(seq)): padding = [config.PAD_TOKEN_IDX]*int(max_length - len(seq[idx])) new_seq.append(seq[idx] + padding) return new_seq # Sequence Tokens Convert to Integer Form ENG_DATA = convert_seq_to_int(ENG_TOKENS, flag=True) HIN_DATA = convert_seq_to_int(HIN_TOKENS, flag=False) # Updating Max-Length for Dataset Padding max_length_updator(ENG_DATA) max_length_updator(HIN_DATA) # Adding Padding to Dataset ENG_DATA_PADDED = padding_seq(ENG_DATA) HIN_DATA_PADDED = padding_seq(HIN_DATA) print('Data Conversion to Integer Done...') # Check for Correct Tokenization # print(ENG_DATA[:20]) # print(HIN_DATA[:20])
29.118812
111
0.690581
import codecs import math from Model.EngHindiDataPreprocess import config, EnglishTokenizer as ENG_Tok, HindiTokenizer as HIN_TOK, \ IndicTokenizer as IND_TOK from nltk import word_tokenize from Model.EngHindiDataPreprocess import config def load_data_sp(path): with codecs.open(path, encoding='utf-8') as f: data = f.read().split('\n') print('Num of Lines in Data:', len(data)) return data def tokenizer(data: list, flag: bool = True, max_length: int = math.inf): assert type(data) == list, 'Raw Data should be in list data type' print(max_length) token_list = list() for line in data: if flag: tokens = word_tokenize(line) else: tokens = ['<sos>'] + line.split(' ') + ['<eos>'] token_list.append(tokens) if len(token_list) > max_length: break return token_list hin_vocab_intToText = dict() hin_vocab_textToInt = dict() eng_vocab_intToText = dict() eng_vocab_textToInt = dict() hin_vocab_textToInt[config.SOS_TOKEN] = config.SOS_TOKEN_IDX hin_vocab_textToInt[config.EOS_TOKEN] = config.EOS_TOKEN_IDX hin_vocab_textToInt[config.PAD_TOKEN] = config.PAD_TOKEN_IDX hin_vocab_textToInt[config.UNK_TOKEN] = config.UNK_TOKEN_IDX hin_vocab_intToText[config.SOS_TOKEN_IDX] = config.SOS_TOKEN hin_vocab_intToText[config.EOS_TOKEN_IDX] = config.EOS_TOKEN hin_vocab_intToText[config.PAD_TOKEN_IDX] = config.PAD_TOKEN hin_vocab_intToText[config.UNK_TOKEN_IDX] = config.UNK_TOKEN eng_vocab_textToInt[config.SOS_TOKEN] = config.SOS_TOKEN_IDX eng_vocab_textToInt[config.EOS_TOKEN] = config.EOS_TOKEN_IDX eng_vocab_textToInt[config.PAD_TOKEN] = config.PAD_TOKEN_IDX eng_vocab_textToInt[config.UNK_TOKEN] = config.UNK_TOKEN_IDX eng_vocab_intToText[config.SOS_TOKEN_IDX] = config.SOS_TOKEN eng_vocab_intToText[config.EOS_TOKEN_IDX] = config.EOS_TOKEN eng_vocab_intToText[config.PAD_TOKEN_IDX] = config.PAD_TOKEN eng_vocab_intToText[config.UNK_TOKEN_IDX] = config.UNK_TOKEN count_eng = 4 count_hin = 4 def create_hindi_vocab(data: list): global count_hin for arr in data: for token in arr: if token in hin_vocab_textToInt.keys(): continue else: hin_vocab_textToInt[token] = count_hin hin_vocab_intToText[count_hin] = token count_hin += 1 return hin_vocab_textToInt, hin_vocab_intToText def create_eng_vocab(data: list): global count_eng for arr in data: for token in arr: if token in eng_vocab_textToInt.keys(): continue else: eng_vocab_textToInt[token] = count_eng eng_vocab_intToText[count_eng] = token count_eng += 1 return eng_vocab_textToInt, eng_vocab_intToText def convert_seq_to_int(data: list, flag: bool): arr = list() for line in data: tok_line = list() for token in line: if flag: if token in eng_vocab_textToInt.keys(): tok_line.append(eng_vocab_textToInt[token]) else: tok_line.append(eng_vocab_textToInt['<unk>']) else: if token in hin_vocab_textToInt.keys(): tok_line.append(hin_vocab_textToInt[token]) else: tok_line.append(hin_vocab_textToInt['<unk>']) arr.append(tok_line) return arr print('Vocab Creation in Progress...') ENG_TOKENIZER = ENG_Tok.EnglishTokenizer() eng_read = ENG_TOKENIZER.read_from_file(path='mlc_train.hi-en.en') ENG_TOKENS = ENG_TOKENIZER.tokenize() create_eng_vocab(ENG_TOKENS) hin_read = IND_TOK.get_sentences(filepath='mlc_train.hi-en.hi') HIN_TOKENS = IND_TOK.get_token(filepath='mlc_train.hi-en.hi') create_hindi_vocab(HIN_TOKENS) print('----------------------Vocab Creation Done for Both----------------------') def vocab_creator(vocab_dict: dict, flag: bool): if flag: out = codecs.open('hindi_vocab.txt', encoding='utf-8', mode='w') else: out = codecs.open('english_vocab.txt', encoding='utf-8', mode='w') for key in vocab_dict.keys(): out.write(f'{key}_:_{vocab_dict[key]}') out.write('\n') out.close() print('Data Conversion to Integer in Progress...') max_length = -math.inf def max_length_updator(seq: list): global max_length for sent in seq: if len(sent) > max_length: max_length = len(sent) def padding_seq(seq: list): global max_length new_seq = list() for idx in range(len(seq)): padding = [config.PAD_TOKEN_IDX]*int(max_length - len(seq[idx])) new_seq.append(seq[idx] + padding) return new_seq ENG_DATA = convert_seq_to_int(ENG_TOKENS, flag=True) HIN_DATA = convert_seq_to_int(HIN_TOKENS, flag=False) max_length_updator(ENG_DATA) max_length_updator(HIN_DATA) ENG_DATA_PADDED = padding_seq(ENG_DATA) HIN_DATA_PADDED = padding_seq(HIN_DATA) print('Data Conversion to Integer Done...')
true
true
f7136b3e36d166ad59bd5c55e8375b6b714e178f
3,050
py
Python
src/transformers/tokenization_distilbert.py
suliuzh/transformers
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
[ "Apache-2.0" ]
12
2021-06-05T03:51:23.000Z
2022-03-05T05:09:41.000Z
src/transformers/tokenization_distilbert.py
suliuzh/transformers
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
[ "Apache-2.0" ]
1
2021-10-20T02:25:36.000Z
2021-10-20T02:25:36.000Z
src/transformers/tokenization_distilbert.py
suliuzh/transformers
f34372a9ff99f6bc8619ac83dc07f7afe6b92141
[ "Apache-2.0" ]
2
2021-05-25T19:59:13.000Z
2022-02-28T18:11:12.000Z
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for DistilBERT.""" from .tokenization_bert import BertTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class DistilBertTokenizer(BertTokenizer): r""" Construct a DistilBERT tokenizer. :class:`~transformers.DistilBertTokenizer` is identical to :class:`~transformers.BertTokenizer` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION model_input_names = ["attention_mask"]
42.361111
139
0.74918
from .tokenization_bert import BertTokenizer from .utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-vocab.txt", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "distilbert-base-uncased": 512, "distilbert-base-uncased-distilled-squad": 512, "distilbert-base-cased": 512, "distilbert-base-cased-distilled-squad": 512, "distilbert-base-german-cased": 512, "distilbert-base-multilingual-cased": 512, } PRETRAINED_INIT_CONFIGURATION = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class DistilBertTokenizer(BertTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION model_input_names = ["attention_mask"]
true
true
f7136c1fb25ded35764b40b0c6d8e28b5747f6c4
436
py
Python
source/RPCConstants.py
oleguldberg/nvda
05f55ff146ef8ba481a2de4f1bcf187200474cea
[ "bzip2-1.0.6" ]
2
2022-03-26T13:52:41.000Z
2022-03-26T14:28:46.000Z
source/RPCConstants.py
oleguldberg/nvda
05f55ff146ef8ba481a2de4f1bcf187200474cea
[ "bzip2-1.0.6" ]
1
2022-02-17T20:51:08.000Z
2022-02-17T20:51:08.000Z
source/RPCConstants.py
oleguldberg/nvda
05f55ff146ef8ba481a2de4f1bcf187200474cea
[ "bzip2-1.0.6" ]
1
2017-08-04T09:00:01.000Z
2017-08-04T09:00:01.000Z
# A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2009-2021 NV Access Limited # This file may be used under the terms of the GNU General Public License, version 2 or later. # For more details see: https://www.gnu.org/licenses/gpl-2.0.html import enum class RPC(enum.IntEnum): E_CALL_CANCELED = -2147418110 S_SERVER_UNAVAILABLE = 1722 S_CALL_FAILED_DNE = 1727 E_CALL_REJECTED = -2147418111 E_DISCONNECTED = -2147417848
29.066667
94
0.768349
import enum class RPC(enum.IntEnum): E_CALL_CANCELED = -2147418110 S_SERVER_UNAVAILABLE = 1722 S_CALL_FAILED_DNE = 1727 E_CALL_REJECTED = -2147418111 E_DISCONNECTED = -2147417848
true
true
f7136d35230742783dd27c357ea221db59c64d84
6,386
py
Python
src/lambda_timestream_backup.py
oimoralest/timestream_plugin
0e3a6dca50d7157faef4f28cce1c087fca30a08f
[ "MIT" ]
null
null
null
src/lambda_timestream_backup.py
oimoralest/timestream_plugin
0e3a6dca50d7157faef4f28cce1c087fca30a08f
[ "MIT" ]
null
null
null
src/lambda_timestream_backup.py
oimoralest/timestream_plugin
0e3a6dca50d7157faef4f28cce1c087fca30a08f
[ "MIT" ]
null
null
null
import boto3 from zipfile import ZipFile import json import csv from retention_times import memory_retention, magnetic_retention, table_name from math import ceil def read_s3(Session, event): """This method gets an object from an AWS S3 bucket (Backup previously made by the plugin AWS S3) and prepares the data stored in to be written in AWS Timestream Args: Session: boto3 Session that allows to create service clients and resources event: Information about the object set in the trigger Returns: the records to be stored and the name of the table to be create in timestream or None on error """ print('Reading s3') # Creates a s3 client s3 = Session.client('s3') # Get info from a new bucket object s3_bucket_object = event.get('Records')[0].get('s3').get('object') s3_bucket_name = event.get( 'Records')[0].get('s3').get('bucket').get('name') # Get the name of the zip File print('Bucket: ', s3_bucket_name) print('Object: ', s3_bucket_object.get('key')) for item in s3_bucket_object.get('key').split('/'): if '.zip' in item: zip_file = item # Download the file from the client's S3 bucket s3.download_file( s3_bucket_name, s3_bucket_object.get('key'), '/tmp/{}'.format(zip_file) ) # Data formatting for TimeStream # open file zip with ZipFile('/tmp/{}'.format(zip_file), 'r') as zip: records = [] # Go over each csv file for file_name in zip.namelist(): if '.csv' not in file_name: continue with zip.open(file_name, 'r', pwd=None) as csv_file: print('csv_file: ', csv_file) device_name = file_name.split('/')[1] variable_name = file_name.split('/')[2] # Each line needs to be decode into utf-8 lines = [line.decode('utf-8') for line in csv_file.readlines()] reader = csv.reader(lines) parsed_csv = list(reader) for row in parsed_csv[1:]: # Clean the context variable inside csv file context = json.loads(row[3][2:-1]) dimensions = [{ 'Name': 'device', 'Value': device_name }] # If context is not empty, it is added to the dimension if len(context) != 0: for key, value in context.items(): dimensions.append({ 'Name': key, 'Value': str(value) }) # Each line is stored as new timestream record records.append({ 'Dimensions': dimensions, 'MeasureName': variable_name, 'MeasureValue': str(row[2]), 'Time': row[0], }) # If the zip file is empty or no csv files were found if records is []: return None, None return records def write_timestream(Session, records, t_name): """This method write records on AWS timestream Args: Session: boto3 Session that allows to create service clients and resources records: data to be stored in AWS timestream t_name: table name to be created in AWS timestream inside ubidots_s3_backup Returns: Nothing """ print('Writing to timestream') print('Number of records:', len(records)) timestream = Session.client('timestream-write') # Creates the database try: print('Creating Database') timestream.create_database( DatabaseName='ubidots_s3_backup' ) # Checks if the database already exists except timestream.exceptions.ConflictException: print('Database already exists') pass # Creates the table try: print('Creating table') timestream.create_table( DatabaseName='ubidots_s3_backup', TableName=t_name, RetentionProperties={ 'MemoryStoreRetentionPeriodInHours': memory_retention, 'MagneticStoreRetentionPeriodInDays': magnetic_retention } ) # Checks if the table already exists except timestream.exceptions.ConflictException: print('Table already exists. Updating table properties') timestream.update_table( DatabaseName='ubidots_s3_backup', TableName=t_name, RetentionProperties={ 'MemoryStoreRetentionPeriodInHours': memory_retention, 'MagneticStoreRetentionPeriodInDays': magnetic_retention } ) # Write the records try: calls = ceil(len(records) / 100) print('Calls:', calls) for i in range(calls): timestream.write_records( DatabaseName='ubidots_s3_backup', TableName=t_name, Records=records[100 * i:100 * (i + 1)] ) # If an error occurs the error is printed as warning except IndexError: timestream.write_records( DatabaseName='ubidots_s3_backup', TableName=t_name, Records=records[100 * i:] ) except timestream.exceptions.RejectedRecordsException as err: print('Warning: Some records were rejected. See RejectedRecords for \ more information') print('RejectedRecords: ', err.response['RejectedRecords']) def lambda_handler(event, context): """This method is the handler for the AWS Lambda function Args: event: Information about the object set in the trigger context: LambdaContext Object Returns: Status code and the corresponding message """ Session = boto3.Session() records= read_s3(Session, event) if records is None: return { 'statusCode': 400, 'body': json.dumps('No records found!') } else: write_timestream(Session, records, table_name) return { 'statusCode': 200, 'body': json.dumps('Records written successfully!') }
35.876404
82
0.572346
import boto3 from zipfile import ZipFile import json import csv from retention_times import memory_retention, magnetic_retention, table_name from math import ceil def read_s3(Session, event): print('Reading s3') s3 = Session.client('s3') s3_bucket_object = event.get('Records')[0].get('s3').get('object') s3_bucket_name = event.get( 'Records')[0].get('s3').get('bucket').get('name') print('Bucket: ', s3_bucket_name) print('Object: ', s3_bucket_object.get('key')) for item in s3_bucket_object.get('key').split('/'): if '.zip' in item: zip_file = item s3.download_file( s3_bucket_name, s3_bucket_object.get('key'), '/tmp/{}'.format(zip_file) ) # Data formatting for TimeStream # open file zip with ZipFile('/tmp/{}'.format(zip_file), 'r') as zip: records = [] # Go over each csv file for file_name in zip.namelist(): if '.csv' not in file_name: continue with zip.open(file_name, 'r', pwd=None) as csv_file: print('csv_file: ', csv_file) device_name = file_name.split('/')[1] variable_name = file_name.split('/')[2] # Each line needs to be decode into utf-8 lines = [line.decode('utf-8') for line in csv_file.readlines()] reader = csv.reader(lines) parsed_csv = list(reader) for row in parsed_csv[1:]: # Clean the context variable inside csv file context = json.loads(row[3][2:-1]) dimensions = [{ 'Name': 'device', 'Value': device_name }] # If context is not empty, it is added to the dimension if len(context) != 0: for key, value in context.items(): dimensions.append({ 'Name': key, 'Value': str(value) }) # Each line is stored as new timestream record records.append({ 'Dimensions': dimensions, 'MeasureName': variable_name, 'MeasureValue': str(row[2]), 'Time': row[0], }) # If the zip file is empty or no csv files were found if records is []: return None, None return records def write_timestream(Session, records, t_name): print('Writing to timestream') print('Number of records:', len(records)) timestream = Session.client('timestream-write') # Creates the database try: print('Creating Database') timestream.create_database( DatabaseName='ubidots_s3_backup' ) # Checks if the database already exists except timestream.exceptions.ConflictException: print('Database already exists') pass # Creates the table try: print('Creating table') timestream.create_table( DatabaseName='ubidots_s3_backup', TableName=t_name, RetentionProperties={ 'MemoryStoreRetentionPeriodInHours': memory_retention, 'MagneticStoreRetentionPeriodInDays': magnetic_retention } ) # Checks if the table already exists except timestream.exceptions.ConflictException: print('Table already exists. Updating table properties') timestream.update_table( DatabaseName='ubidots_s3_backup', TableName=t_name, RetentionProperties={ 'MemoryStoreRetentionPeriodInHours': memory_retention, 'MagneticStoreRetentionPeriodInDays': magnetic_retention } ) # Write the records try: calls = ceil(len(records) / 100) print('Calls:', calls) for i in range(calls): timestream.write_records( DatabaseName='ubidots_s3_backup', TableName=t_name, Records=records[100 * i:100 * (i + 1)] ) # If an error occurs the error is printed as warning except IndexError: timestream.write_records( DatabaseName='ubidots_s3_backup', TableName=t_name, Records=records[100 * i:] ) except timestream.exceptions.RejectedRecordsException as err: print('Warning: Some records were rejected. See RejectedRecords for \ more information') print('RejectedRecords: ', err.response['RejectedRecords']) def lambda_handler(event, context): Session = boto3.Session() records= read_s3(Session, event) if records is None: return { 'statusCode': 400, 'body': json.dumps('No records found!') } else: write_timestream(Session, records, table_name) return { 'statusCode': 200, 'body': json.dumps('Records written successfully!') }
true
true
f7136d71f63998085416b4234c539ab036fae904
92
py
Python
poets/__main__.py
EgorZhuk/poets
62d5dd789ede4116990495b4a326eb5ea2123ea6
[ "MIT" ]
null
null
null
poets/__main__.py
EgorZhuk/poets
62d5dd789ede4116990495b4a326eb5ea2123ea6
[ "MIT" ]
null
null
null
poets/__main__.py
EgorZhuk/poets
62d5dd789ede4116990495b4a326eb5ea2123ea6
[ "MIT" ]
null
null
null
import poets def main(): print(poets.poets()) if __name__ == '__main__': main()
9.2
26
0.597826
import poets def main(): print(poets.poets()) if __name__ == '__main__': main()
true
true
f7136d775fc20c3ff728133ec84f1a91b737ce08
2,382
py
Python
Core/Logic/routers/issues_routes.py
dikshita-mehta/Sahaay
368826cb0b9f7085f901895a29b1df0895f90f5b
[ "MIT" ]
4
2021-09-29T13:53:10.000Z
2021-11-08T09:35:22.000Z
Core/Logic/routers/issues_routes.py
dikshita-mehta/Sahaay
368826cb0b9f7085f901895a29b1df0895f90f5b
[ "MIT" ]
23
2021-08-23T04:39:20.000Z
2022-01-13T06:57:14.000Z
Core/Logic/routers/issues_routes.py
dikshita-mehta/Sahaay
368826cb0b9f7085f901895a29b1df0895f90f5b
[ "MIT" ]
6
2021-08-18T08:24:34.000Z
2021-11-23T05:40:41.000Z
from typing import List from starlette.status import HTTP_200_OK, HTTP_202_ACCEPTED, HTTP_404_NOT_FOUND from fastapi import Request, APIRouter, File from fastapi.datastructures import UploadFile from pydantic import BaseModel from .abstraction import create_linked_issue, create_new_issue class IssueTable(BaseModel): Issue_Name: str Issue_Tags: List[str] Issue_description: str isActive: bool uniqueID: str LinkedIssue_id: str User_id: str class IssueTableNew(BaseModel): Issue_Name: str Issue_Tags: List[str] Issue_description: str isActive: bool uniqueID: str User_Id: str issues = APIRouter() @issues.get("/get-all-issues") def show_all_issues(): from Logic.models import Issues return list(Issues.objects.all()) @issues.get("/get-issue/{issue_id}") def get_specific_issue(issue_id: str): from Logic.models import Issues specific_issue = Issues.objects.get(Issue_Name=issue_id) return specific_issue @issues.post("/post-new-issue") def post_new_issue(request: Request, table: IssueTableNew): create_new_issue(request, table) return {HTTP_200_OK:"New issue was added"} @issues.delete("/delete-issues") def delete_an_issue(id: str): from Logic.models import Issues try: instance = Issues.objects.get(uniqueID=id) instance.delete() return {HTTP_202_ACCEPTED : f"{id} was deleted"} except Exception: return {HTTP_404_NOT_FOUND:"Image not added"} @issues.post("/add-image") def create_file(unique_id: str, file: UploadFile = File(...)): from Logic.models import Issues try: instance = Issues.objects.get(uniqueID = unique_id) instance.Issues_image = file.file.read() instance.save() #Images aren't being loaded on to POSTGRES return {HTTP_202_ACCEPTED:"New image was added."} except Exception: return {HTTP_404_NOT_FOUND:"Image not added"} @issues.post("/post-linked-issue") def post_a_linked_issue(issuesTable: IssueTable): from Logic.models import Issues, UserModel if len(Issues.objects.filter(uniqueID=issuesTable.LinkedIssue_id)): create_linked_issue(IssueTable) return {HTTP_200_OK:"New issue was saved."} else: return {HTTP_404_NOT_FOUND:"Instance not found"}
28.357143
80
0.696474
from typing import List from starlette.status import HTTP_200_OK, HTTP_202_ACCEPTED, HTTP_404_NOT_FOUND from fastapi import Request, APIRouter, File from fastapi.datastructures import UploadFile from pydantic import BaseModel from .abstraction import create_linked_issue, create_new_issue class IssueTable(BaseModel): Issue_Name: str Issue_Tags: List[str] Issue_description: str isActive: bool uniqueID: str LinkedIssue_id: str User_id: str class IssueTableNew(BaseModel): Issue_Name: str Issue_Tags: List[str] Issue_description: str isActive: bool uniqueID: str User_Id: str issues = APIRouter() @issues.get("/get-all-issues") def show_all_issues(): from Logic.models import Issues return list(Issues.objects.all()) @issues.get("/get-issue/{issue_id}") def get_specific_issue(issue_id: str): from Logic.models import Issues specific_issue = Issues.objects.get(Issue_Name=issue_id) return specific_issue @issues.post("/post-new-issue") def post_new_issue(request: Request, table: IssueTableNew): create_new_issue(request, table) return {HTTP_200_OK:"New issue was added"} @issues.delete("/delete-issues") def delete_an_issue(id: str): from Logic.models import Issues try: instance = Issues.objects.get(uniqueID=id) instance.delete() return {HTTP_202_ACCEPTED : f"{id} was deleted"} except Exception: return {HTTP_404_NOT_FOUND:"Image not added"} @issues.post("/add-image") def create_file(unique_id: str, file: UploadFile = File(...)): from Logic.models import Issues try: instance = Issues.objects.get(uniqueID = unique_id) instance.Issues_image = file.file.read() instance.save() return {HTTP_202_ACCEPTED:"New image was added."} except Exception: return {HTTP_404_NOT_FOUND:"Image not added"} @issues.post("/post-linked-issue") def post_a_linked_issue(issuesTable: IssueTable): from Logic.models import Issues, UserModel if len(Issues.objects.filter(uniqueID=issuesTable.LinkedIssue_id)): create_linked_issue(IssueTable) return {HTTP_200_OK:"New issue was saved."} else: return {HTTP_404_NOT_FOUND:"Instance not found"}
true
true
f7136e27bba530f985381d41a682d8ddeab860f2
1,851
py
Python
package/spack-py-quantities/package.py
ctuning/ck-spack
307934efce1be2d4f104251275c82fbc70127105
[ "BSD-3-Clause" ]
1
2018-07-17T07:45:09.000Z
2018-07-17T07:45:09.000Z
package/spack-py-quantities/package.py
ctuning/ck-spack
307934efce1be2d4f104251275c82fbc70127105
[ "BSD-3-Clause" ]
null
null
null
package/spack-py-quantities/package.py
ctuning/ck-spack
307934efce1be2d4f104251275c82fbc70127105
[ "BSD-3-Clause" ]
null
null
null
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class PyQuantities(PythonPackage): """Support for physical quantities with units, based on numpy""" homepage = "http://python-quantities.readthedocs.org" url = "https://pypi.io/packages/source/q/quantities/quantities-0.12.1.tar.gz" version('0.12.1', '9c9ecda15e905cccfc420e5341199512') version('0.11.1', 'f4c6287bfd2e93322b25a7c1311a0243', url="https://pypi.io/packages/source/q/quantities/quantities-0.11.1.zip") conflicts('py-numpy@1.13:', when='@:0.11.99') depends_on('python@2.6.0:') depends_on('py-numpy@1.4.0:', type=('build', 'run'))
44.071429
86
0.672609
true
true
f7136efbc9729168e3afeff8a58d80d8289cc150
1,421
py
Python
python_demo_v2/zhihu_data.py
renhongl/python_demo
039d0e046885dd3890526ae91efa2d601f6f9b73
[ "MIT" ]
1
2019-11-29T02:47:19.000Z
2019-11-29T02:47:19.000Z
python_demo_v2/zhihu_data.py
renhongl/python_demo
039d0e046885dd3890526ae91efa2d601f6f9b73
[ "MIT" ]
null
null
null
python_demo_v2/zhihu_data.py
renhongl/python_demo
039d0e046885dd3890526ae91efa2d601f6f9b73
[ "MIT" ]
null
null
null
import requests from pyquery import PyQuery as pq import json def get_one_page(url): headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' } res = requests.get(url, headers=headers) text = res.text doc = pq(text) results = [] for item in doc.find('.explore-feed.feed-item').items(): temp = { 'title': item.find('h2').text(), 'anwser': item.find('.zh-summary.summary').text(), 'author': item.find('.author-link-line').text() } results.append(temp) return results def write_to_txt(results): for item in results: with open('./output/zhihu_data.txt', 'a', encoding='utf-8') as f: print(item) question = '' question = question + item['title'] + '\n' question = question + item['anwser'] + '\n' question = question + item['author'] + '\n' question = question + '========\n' f.write(question) def write_to_json(results): with open('./output/zhihu_data.json', 'w', encoding='utf-8') as f: f.write(json.dumps(results, indent=4, ensure_ascii=False)) def main(): url = 'https://www.zhihu.com/explore' results = get_one_page(url) # write_to_txt(results) write_to_json(results) print(results) main()
28.42
145
0.582688
import requests from pyquery import PyQuery as pq import json def get_one_page(url): headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36' } res = requests.get(url, headers=headers) text = res.text doc = pq(text) results = [] for item in doc.find('.explore-feed.feed-item').items(): temp = { 'title': item.find('h2').text(), 'anwser': item.find('.zh-summary.summary').text(), 'author': item.find('.author-link-line').text() } results.append(temp) return results def write_to_txt(results): for item in results: with open('./output/zhihu_data.txt', 'a', encoding='utf-8') as f: print(item) question = '' question = question + item['title'] + '\n' question = question + item['anwser'] + '\n' question = question + item['author'] + '\n' question = question + '========\n' f.write(question) def write_to_json(results): with open('./output/zhihu_data.json', 'w', encoding='utf-8') as f: f.write(json.dumps(results, indent=4, ensure_ascii=False)) def main(): url = 'https://www.zhihu.com/explore' results = get_one_page(url) write_to_json(results) print(results) main()
true
true
f7136f491edca08617ce711f647f8a42bbbe8aed
100
py
Python
automaton/__init__.py
PaulRaUnite/dsl_lab1
49368922902eadea53771278eb3a0c559cb7fb35
[ "MIT" ]
null
null
null
automaton/__init__.py
PaulRaUnite/dsl_lab1
49368922902eadea53771278eb3a0c559cb7fb35
[ "MIT" ]
null
null
null
automaton/__init__.py
PaulRaUnite/dsl_lab1
49368922902eadea53771278eb3a0c559cb7fb35
[ "MIT" ]
null
null
null
from .ndfa import * from .dfa import * __all__ = [] __all__ += ndfa.__all__ __all__ += dfa.__all__
14.285714
23
0.69
from .ndfa import * from .dfa import * __all__ = [] __all__ += ndfa.__all__ __all__ += dfa.__all__
true
true
f7136f6c0be19743771c16fbece82ec79c8ede9d
2,355
py
Python
api/tacticalrmm/agents/utils.py
v2cloud/tacticalrmm
12f599f9749985f66ff9b559c5e5abd36064b182
[ "MIT" ]
null
null
null
api/tacticalrmm/agents/utils.py
v2cloud/tacticalrmm
12f599f9749985f66ff9b559c5e5abd36064b182
[ "MIT" ]
null
null
null
api/tacticalrmm/agents/utils.py
v2cloud/tacticalrmm
12f599f9749985f66ff9b559c5e5abd36064b182
[ "MIT" ]
null
null
null
import asyncio import tempfile import urllib.parse from django.conf import settings from django.http import FileResponse from core.models import CodeSignToken from core.utils import get_core_settings, get_mesh_device_id, get_mesh_ws_url from tacticalrmm.constants import MeshAgentIdent def get_agent_url(arch: str, plat: str) -> str: if plat == "windows": endpoint = "winagents" dl_url = settings.DL_32 if arch == "32" else settings.DL_64 else: endpoint = "linuxagents" dl_url = "" token = CodeSignToken.objects.first() if not token: return dl_url if token.is_valid: base_url = settings.EXE_GEN_URL + f"/api/v1/{endpoint}/?" params = { "version": settings.LATEST_AGENT_VER, "arch": arch, "token": token.token, } dl_url = base_url + urllib.parse.urlencode(params) return dl_url def generate_linux_install( client: str, site: str, agent_type: str, arch: str, token: str, api: str, download_url: str, ) -> FileResponse: match arch: case "amd64": arch_id = MeshAgentIdent.LINUX64 case "386": arch_id = MeshAgentIdent.LINUX32 case "arm64": arch_id = MeshAgentIdent.LINUX_ARM_64 case "arm": arch_id = MeshAgentIdent.LINUX_ARM_HF case _: arch_id = "not_found" core = get_core_settings() uri = get_mesh_ws_url() mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group)) mesh_dl = ( f"{core.mesh_site}/meshagents?id={mesh_id}&installflags=0&meshinstall={arch_id}" ) sh = settings.LINUX_AGENT_SCRIPT with open(sh, "r") as f: text = f.read() replace = { "agentDLChange": download_url, "meshDLChange": mesh_dl, "clientIDChange": client, "siteIDChange": site, "agentTypeChange": agent_type, "tokenChange": token, "apiURLChange": api, } for i, j in replace.items(): text = text.replace(i, j) with tempfile.NamedTemporaryFile() as fp: with open(fp.name, "w") as f: f.write(text) f.write("\n") return FileResponse( open(fp.name, "rb"), as_attachment=True, filename="linux_agent_install.sh" )
25.322581
88
0.609766
import asyncio import tempfile import urllib.parse from django.conf import settings from django.http import FileResponse from core.models import CodeSignToken from core.utils import get_core_settings, get_mesh_device_id, get_mesh_ws_url from tacticalrmm.constants import MeshAgentIdent def get_agent_url(arch: str, plat: str) -> str: if plat == "windows": endpoint = "winagents" dl_url = settings.DL_32 if arch == "32" else settings.DL_64 else: endpoint = "linuxagents" dl_url = "" token = CodeSignToken.objects.first() if not token: return dl_url if token.is_valid: base_url = settings.EXE_GEN_URL + f"/api/v1/{endpoint}/?" params = { "version": settings.LATEST_AGENT_VER, "arch": arch, "token": token.token, } dl_url = base_url + urllib.parse.urlencode(params) return dl_url def generate_linux_install( client: str, site: str, agent_type: str, arch: str, token: str, api: str, download_url: str, ) -> FileResponse: match arch: case "amd64": arch_id = MeshAgentIdent.LINUX64 case "386": arch_id = MeshAgentIdent.LINUX32 case "arm64": arch_id = MeshAgentIdent.LINUX_ARM_64 case "arm": arch_id = MeshAgentIdent.LINUX_ARM_HF case _: arch_id = "not_found" core = get_core_settings() uri = get_mesh_ws_url() mesh_id = asyncio.run(get_mesh_device_id(uri, core.mesh_device_group)) mesh_dl = ( f"{core.mesh_site}/meshagents?id={mesh_id}&installflags=0&meshinstall={arch_id}" ) sh = settings.LINUX_AGENT_SCRIPT with open(sh, "r") as f: text = f.read() replace = { "agentDLChange": download_url, "meshDLChange": mesh_dl, "clientIDChange": client, "siteIDChange": site, "agentTypeChange": agent_type, "tokenChange": token, "apiURLChange": api, } for i, j in replace.items(): text = text.replace(i, j) with tempfile.NamedTemporaryFile() as fp: with open(fp.name, "w") as f: f.write(text) f.write("\n") return FileResponse( open(fp.name, "rb"), as_attachment=True, filename="linux_agent_install.sh" )
true
true
f7136fa5e6598f9ae88b101d1322debe9c2f4d03
6,468
py
Python
old build/trackbot.py
matieo33/RF-Activity-Count
9fa3e122ff01ffc082b7d21b527c5b139967ac95
[ "MIT" ]
1
2022-02-11T02:59:44.000Z
2022-02-11T02:59:44.000Z
old build/trackbot.py
matieo33/robloxforum-activity-count
9fa3e122ff01ffc082b7d21b527c5b139967ac95
[ "MIT" ]
null
null
null
old build/trackbot.py
matieo33/robloxforum-activity-count
9fa3e122ff01ffc082b7d21b527c5b139967ac95
[ "MIT" ]
null
null
null
try: import logo as logo_print except ModuleNotFoundError: missingfile = str(input("The program is missing a file. Continue anyways? ")) if missingfile.lower() == "yes" or "y" or "yea": pass else: os.exit(0) try: from bs4 import BeautifulSoup import requests, time, re, os, random from termcolor import colored from colorama import init except ModuleNotFoundError and ImportError: print("The program is missing essential libraries. Read the Github's tutorial how to install all the libraries.") os.exit(0) os.system("mode con cols=150 lines=75") decision = '' init() colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan'] # DEFS BELOW def print_status(): obj = time.localtime() currentime = time.asctime(obj) if decision.lower() == 'sample': pass else: print(decision) time.sleep(a) source = requests.get('https://www.robloxforum.com').text soup = BeautifulSoup(source, 'lxml') stringy = soup.find('span', class_='block-footer-counter').text usernames = soup.find_all('span', class_=['username--style1', 'username--style2', 'username--style3', 'username--style4', 'username--style5', 'username--style6', 'username--style7', 'username--style8', 'username--style9', 'username--style10' 'username--style11']) whitespace_remove = stringy.replace(' Robots', "Robots") print(currentime) print(whitespace_remove) for span in usernames: attr = span.attrs['class'] numbas = re.findall(r'\d+', str(attr)) if numbas[0] == "2": print(span.text) elif numbas[0] == "3": print(colored(span.text, 'red', attrs=['bold'])) elif numbas[0] == "4": print(colored(span.text, 'blue', attrs=['bold'])) elif numbas[0] == "6": print(colored(span.text, 'green', attrs=['bold'])) elif numbas[0] == "7": print(colored(span.text, 'green')) elif numbas[0] == "8": print(colored(span.text, 'blue')) elif numbas[0] == "9": print(colored(span.text, 'yellow')) elif numbas[0] == "10": def strike(text): return ''.join([u'\u0336{}'.format(c) for c in text]) black = (colored(span.text, 'yellow')) print(strike(black)) elif numbas[0] == "11": print(colored(span.text, 'blue', attrs=['bold'])) print('\n') if decision == 'SAMPLE' or 'sample': print() else: if b.lower() == "y" or "yes" or "yea": with open("log.txt", "a") as o: encoded_string = stringy.encode("ascii", "ignore") decode_string = encoded_string.decode() whitespace_remove = decode_string.replace(' Robots', "Robots") o.write(whitespace_remove) if c.lower() == "y" or "yes" or "yea": o.write(currentime + '\n') for span in usernames: attr = span.attrs['class'] numbas = re.findall(r'\d+', str(attr)) sp = span.text obj = time.localtime() currentime = time.asctime(obj) if c.lower() == "y" or "yes" or "yea": if numbas[0] == "2": o.write(sp + " | normal user") o.write('\n') elif numbas[0] == "3": o.write(sp + " | administrator") o.write('\n') elif numbas[0] == "4": o.write(sp + " | moderator") o.write('\n') elif numbas[0] == "6": o.write(sp + " | verified") o.write('\n') elif numbas[0] == "7": o.write(sp + " | vip") o.write('\n') elif numbas[0] == "8": o.write(sp + " | pro") o.write('\n') elif numbas[0] == "9": o.write(sp + " | ultra") o.write('\n') elif numbas[0] == "10": o.write(sp + " | banned") o.write('\n') o.write('\n') else: pass else: pass def run(): process = 1 while process == 1: print_status() # DEFS ABOVE try: print(colored(logo_print.final_str, random.choice(colors))) except ModuleNotFoundError: pass print(colored("RF trackbot - credits to MATIEO33", 'blue')) print(colored("RF: https://robloxforum.com/members/matieo33.8832/", 'red')) print(colored("Github: https://github.com/matieo33", 'green')) print("Available options: TRACK SAMPLE HELP \n") if __name__ == '__main__': in_menu = 1 while in_menu == 1: decision = str(input()) if decision.lower() == 'help': print("I made this bot purely for the purpose of entertainment, and if ever happens - maybe also will come in handy for somebody.") print("Wanna help this bot grow? DM me.") print('Important: CTRL + C will stop the program entirely! Make sure to answer with "Y" if you wish to save the data to a TXT file.') print( "TRACK: Prints the activity of the site per amount of seconds you select.") print( "SAMPLE: Prints the activity of the site one time as an example of the program's work.") elif decision.lower() == 'sample': print('') print_status() elif decision.lower() == 'track': print('') in_menu = 0 else: print("ERROR: unknown command " + "'" + decision + "'") a = int(input( "Every how much seconds do you wish to recieve updates on the site activity? ")) b = str(input("Do you wish the data to be saved to a TXT file? ")) if b.lower() == "y" or "yes" or "yea": c = str(input('Do you wish to include the list of all online users? ')) while 1: print_status() else: while 1: print_status()
39.439024
165
0.495671
try: import logo as logo_print except ModuleNotFoundError: missingfile = str(input("The program is missing a file. Continue anyways? ")) if missingfile.lower() == "yes" or "y" or "yea": pass else: os.exit(0) try: from bs4 import BeautifulSoup import requests, time, re, os, random from termcolor import colored from colorama import init except ModuleNotFoundError and ImportError: print("The program is missing essential libraries. Read the Github's tutorial how to install all the libraries.") os.exit(0) os.system("mode con cols=150 lines=75") decision = '' init() colors = ['red', 'green', 'yellow', 'blue', 'magenta', 'cyan'] # DEFS BELOW def print_status(): obj = time.localtime() currentime = time.asctime(obj) if decision.lower() == 'sample': pass else: print(decision) time.sleep(a) source = requests.get('https://www.robloxforum.com').text soup = BeautifulSoup(source, 'lxml') stringy = soup.find('span', class_='block-footer-counter').text usernames = soup.find_all('span', class_=['username--style1', 'username--style2', 'username--style3', 'username--style4', 'username--style5', 'username--style6', 'username--style7', 'username--style8', 'username--style9', 'username--style10' 'username--style11']) whitespace_remove = stringy.replace(' Robots', "Robots") print(currentime) print(whitespace_remove) for span in usernames: attr = span.attrs['class'] numbas = re.findall(r'\d+', str(attr)) if numbas[0] == "2": print(span.text) elif numbas[0] == "3": print(colored(span.text, 'red', attrs=['bold'])) elif numbas[0] == "4": print(colored(span.text, 'blue', attrs=['bold'])) elif numbas[0] == "6": print(colored(span.text, 'green', attrs=['bold'])) elif numbas[0] == "7": print(colored(span.text, 'green')) elif numbas[0] == "8": print(colored(span.text, 'blue')) elif numbas[0] == "9": print(colored(span.text, 'yellow')) elif numbas[0] == "10": def strike(text): return ''.join([u'\u0336{}'.format(c) for c in text]) black = (colored(span.text, 'yellow')) print(strike(black)) elif numbas[0] == "11": print(colored(span.text, 'blue', attrs=['bold'])) print('\n') if decision == 'SAMPLE' or 'sample': print() else: if b.lower() == "y" or "yes" or "yea": with open("log.txt", "a") as o: encoded_string = stringy.encode("ascii", "ignore") decode_string = encoded_string.decode() whitespace_remove = decode_string.replace(' Robots', "Robots") o.write(whitespace_remove) if c.lower() == "y" or "yes" or "yea": o.write(currentime + '\n') for span in usernames: attr = span.attrs['class'] numbas = re.findall(r'\d+', str(attr)) sp = span.text obj = time.localtime() currentime = time.asctime(obj) if c.lower() == "y" or "yes" or "yea": if numbas[0] == "2": o.write(sp + " | normal user") o.write('\n') elif numbas[0] == "3": o.write(sp + " | administrator") o.write('\n') elif numbas[0] == "4": o.write(sp + " | moderator") o.write('\n') elif numbas[0] == "6": o.write(sp + " | verified") o.write('\n') elif numbas[0] == "7": o.write(sp + " | vip") o.write('\n') elif numbas[0] == "8": o.write(sp + " | pro") o.write('\n') elif numbas[0] == "9": o.write(sp + " | ultra") o.write('\n') elif numbas[0] == "10": o.write(sp + " | banned") o.write('\n') o.write('\n') else: pass else: pass def run(): process = 1 while process == 1: print_status() # DEFS ABOVE try: print(colored(logo_print.final_str, random.choice(colors))) except ModuleNotFoundError: pass print(colored("RF trackbot - credits to MATIEO33", 'blue')) print(colored("RF: https://robloxforum.com/members/matieo33.8832/", 'red')) print(colored("Github: https://github.com/matieo33", 'green')) print("Available options: TRACK SAMPLE HELP \n") if __name__ == '__main__': in_menu = 1 while in_menu == 1: decision = str(input()) if decision.lower() == 'help': print("I made this bot purely for the purpose of entertainment, and if ever happens - maybe also will come in handy for somebody.") print("Wanna help this bot grow? DM me.") print('Important: CTRL + C will stop the program entirely! Make sure to answer with "Y" if you wish to save the data to a TXT file.') print( "TRACK: Prints the activity of the site per amount of seconds you select.") print( "SAMPLE: Prints the activity of the site one time as an example of the program's work.") elif decision.lower() == 'sample': print('') print_status() elif decision.lower() == 'track': print('') in_menu = 0 else: print("ERROR: unknown command " + "'" + decision + "'") a = int(input( "Every how much seconds do you wish to recieve updates on the site activity? ")) b = str(input("Do you wish the data to be saved to a TXT file? ")) if b.lower() == "y" or "yes" or "yea": c = str(input('Do you wish to include the list of all online users? ')) while 1: print_status() else: while 1: print_status()
true
true
f7137068828350e4b2a3991c7da868c21d1ebab6
1,915
py
Python
lib/spack/spack/build_systems/r.py
m-shunji/spack
ee1b0b9fb980d16c80a5f43d9f93f54424995268
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
1
2020-10-08T06:16:50.000Z
2020-10-08T06:16:50.000Z
lib/spack/spack/build_systems/r.py
m-shunji/spack
ee1b0b9fb980d16c80a5f43d9f93f54424995268
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
12
2021-05-12T05:54:41.000Z
2022-03-30T11:09:24.000Z
lib/spack/spack/build_systems/r.py
ellio167/spack
45040589c8458ccd435f10be7123875181be76ff
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2021-04-07T18:27:09.000Z
2022-03-31T22:52:38.000Z
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import inspect from spack.directives import extends from spack.package import PackageBase, run_after class RPackage(PackageBase): """Specialized class for packages that are built using R. For more information on the R build system, see: https://stat.ethz.ch/R-manual/R-devel/library/utils/html/INSTALL.html This class provides a single phase that can be overridden: 1. :py:meth:`~.RPackage.install` It has sensible defaults, and for many packages the only thing necessary will be to add dependencies """ phases = ['install'] maintainers = ['glennpj'] #: This attribute is used in UI queries that need to know the build #: system base class build_system_class = 'RPackage' extends('r') def configure_args(self): """Arguments to pass to install via ``--configure-args``.""" return [] def configure_vars(self): """Arguments to pass to install via ``--configure-vars``.""" return [] def install(self, spec, prefix): """Installs an R package.""" config_args = self.configure_args() config_vars = self.configure_vars() args = [ 'CMD', 'INSTALL' ] if config_args: args.append('--configure-args={0}'.format(' '.join(config_args))) if config_vars: args.append('--configure-vars={0}'.format(' '.join(config_vars))) args.extend([ '--library={0}'.format(self.module.r_lib_dir), self.stage.source_path ]) inspect.getmodule(self).R(*args) # Check that self.prefix is there after installation run_after('install')(PackageBase.sanity_check_prefix)
27.357143
77
0.641775
import inspect from spack.directives import extends from spack.package import PackageBase, run_after class RPackage(PackageBase): phases = ['install'] maintainers = ['glennpj'] build_system_class = 'RPackage' extends('r') def configure_args(self): return [] def configure_vars(self): return [] def install(self, spec, prefix): config_args = self.configure_args() config_vars = self.configure_vars() args = [ 'CMD', 'INSTALL' ] if config_args: args.append('--configure-args={0}'.format(' '.join(config_args))) if config_vars: args.append('--configure-vars={0}'.format(' '.join(config_vars))) args.extend([ '--library={0}'.format(self.module.r_lib_dir), self.stage.source_path ]) inspect.getmodule(self).R(*args) run_after('install')(PackageBase.sanity_check_prefix)
true
true
f71370c5ef66e0710af22b26b806b032387c0cc4
4,722
py
Python
tests/configs/memtest-ruby.py
LingxiaoJIA/gem5
708d23fc73f30be2726530bbc82702d0d84e0f4a
[ "BSD-3-Clause" ]
31
2015-12-15T19:14:10.000Z
2021-12-31T17:40:21.000Z
tests/configs/memtest-ruby.py
LingxiaoJIA/gem5
708d23fc73f30be2726530bbc82702d0d84e0f4a
[ "BSD-3-Clause" ]
5
2015-12-04T08:06:47.000Z
2020-08-09T21:49:46.000Z
tests/configs/memtest-ruby.py
LingxiaoJIA/gem5
708d23fc73f30be2726530bbc82702d0d84e0f4a
[ "BSD-3-Clause" ]
21
2015-11-05T08:25:45.000Z
2021-06-19T02:24:50.000Z
# Copyright (c) 2006-2007 The Regents of The University of Michigan # Copyright (c) 2010 Advanced Micro Devices, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Ron Dreslinski import m5 from m5.objects import * from m5.defines import buildEnv from m5.util import addToPath import os, optparse, sys # Get paths we might need config_path = os.path.dirname(os.path.abspath(__file__)) config_root = os.path.dirname(config_path) m5_root = os.path.dirname(config_root) addToPath(config_root+'/configs/common') addToPath(config_root+'/configs/ruby') addToPath(config_root+'/configs/topologies') import Ruby import Options parser = optparse.OptionParser() Options.addCommonOptions(parser) # Add the ruby specific and protocol specific options Ruby.define_options(parser) (options, args) = parser.parse_args() # # Set the default cache size and associativity to be very small to encourage # races between requests and writebacks. # options.l1d_size="256B" options.l1i_size="256B" options.l2_size="512B" options.l3_size="1kB" options.l1d_assoc=2 options.l1i_assoc=2 options.l2_assoc=2 options.l3_assoc=2 options.ports=32 #MAX CORES IS 8 with the fals sharing method nb_cores = 8 # ruby does not support atomic, functional, or uncacheable accesses cpus = [ MemTest(atomic=False, percent_functional=50, percent_uncacheable=0, suppress_func_warnings=True) \ for i in xrange(nb_cores) ] # overwrite options.num_cpus with the nb_cores value options.num_cpus = nb_cores # system simulated system = System(cpu = cpus, funcmem = SimpleMemory(in_addr_map = False), funcbus = NoncoherentXBar()) # Dummy voltage domain for all our clock domains system.voltage_domain = VoltageDomain() system.clk_domain = SrcClockDomain(clock = '1GHz', voltage_domain = system.voltage_domain) # Create a seperate clock domain for components that should run at # CPUs frequency system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', voltage_domain = system.voltage_domain) # All cpus are associated with cpu_clk_domain for cpu in cpus: cpu.clk_domain = system.cpu_clk_domain system.mem_ranges = AddrRange('256MB') Ruby.create_system(options, False, system) # Create a separate clock domain for Ruby system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, voltage_domain = system.voltage_domain) assert(len(cpus) == len(system.ruby._cpu_ports)) for (i, ruby_port) in enumerate(system.ruby._cpu_ports): # # Tie the cpu test and functional ports to the ruby cpu ports and # physmem, respectively # cpus[i].test = ruby_port.slave cpus[i].functional = system.funcbus.slave # # Since the memtester is incredibly bursty, increase the deadlock # threshold to 1 million cycles # ruby_port.deadlock_threshold = 1000000 # connect reference memory to funcbus system.funcmem.port = system.funcbus.master # ----------------------- # run simulation # ----------------------- root = Root(full_system = False, system = system) root.system.mem_mode = 'timing' # Not much point in this being higher than the L1 latency m5.ticks.setGlobalFrequency('1ns')
35.238806
79
0.741423
import m5 from m5.objects import * from m5.defines import buildEnv from m5.util import addToPath import os, optparse, sys config_path = os.path.dirname(os.path.abspath(__file__)) config_root = os.path.dirname(config_path) m5_root = os.path.dirname(config_root) addToPath(config_root+'/configs/common') addToPath(config_root+'/configs/ruby') addToPath(config_root+'/configs/topologies') import Ruby import Options parser = optparse.OptionParser() Options.addCommonOptions(parser) Ruby.define_options(parser) (options, args) = parser.parse_args() options.l1d_size="256B" options.l1i_size="256B" options.l2_size="512B" options.l3_size="1kB" options.l1d_assoc=2 options.l1i_assoc=2 options.l2_assoc=2 options.l3_assoc=2 options.ports=32 nb_cores = 8 cpus = [ MemTest(atomic=False, percent_functional=50, percent_uncacheable=0, suppress_func_warnings=True) \ for i in xrange(nb_cores) ] options.num_cpus = nb_cores system = System(cpu = cpus, funcmem = SimpleMemory(in_addr_map = False), funcbus = NoncoherentXBar()) system.voltage_domain = VoltageDomain() system.clk_domain = SrcClockDomain(clock = '1GHz', voltage_domain = system.voltage_domain) system.cpu_clk_domain = SrcClockDomain(clock = '2GHz', voltage_domain = system.voltage_domain) for cpu in cpus: cpu.clk_domain = system.cpu_clk_domain system.mem_ranges = AddrRange('256MB') Ruby.create_system(options, False, system) system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock, voltage_domain = system.voltage_domain) assert(len(cpus) == len(system.ruby._cpu_ports)) for (i, ruby_port) in enumerate(system.ruby._cpu_ports): cpus[i].test = ruby_port.slave cpus[i].functional = system.funcbus.slave ruby_port.deadlock_threshold = 1000000 system.funcmem.port = system.funcbus.master root = Root(full_system = False, system = system) root.system.mem_mode = 'timing' m5.ticks.setGlobalFrequency('1ns')
true
true
f713710e3441b1a7648d0165d70d8f7e5e989657
3,130
py
Python
iceisland/config.py
WeitBelou/IceIsland
17bd202759d6af350fa315f891ec726e7fdadd7f
[ "MIT" ]
null
null
null
iceisland/config.py
WeitBelou/IceIsland
17bd202759d6af350fa315f891ec726e7fdadd7f
[ "MIT" ]
null
null
null
iceisland/config.py
WeitBelou/IceIsland
17bd202759d6af350fa315f891ec726e7fdadd7f
[ "MIT" ]
2
2017-11-29T14:51:28.000Z
2018-08-31T18:03:06.000Z
import operator as op import os from typing import Dict, Callable from dolfin import Expression, Mesh from dolfin.cpp.mesh import MeshFunctionSizet class Material: def __init__(self, rho: float, young_modulus: float, shear_modulus: float): self._rho = rho self._young_modulus = young_modulus self._shear_modulus = shear_modulus @property def rho(self) -> float: return self._rho @property def young_modulus(self) -> float: return self._young_modulus @property def shear_modulus(self) -> float: return self._shear_modulus @property def lambda_(self) -> float: return self.shear_modulus * (self.young_modulus - 2 * self.shear_modulus) / ( 3 * self.shear_modulus - self.young_modulus) @property def mu(self) -> float: return self.shear_modulus def __repr__(self): return '<Material rho={rho} young_modulus={young_modulus} shear_modulus={shear_modulus}>'.format( rho=self.rho, young_modulus=self.young_modulus, shear_modulus=self.shear_modulus, ) class Base: def __init__(self, g: float, mesh_dir: str, materials: Dict[int, Material]): self._g = g self._mesh_dir = mesh_dir self._materials = materials self._mesh = Mesh(os.path.join(mesh_dir, 'mesh.xml')) self._subdomains = MeshFunctionSizet(self.mesh, os.path.join(mesh_dir, 'mesh_physical_region.xml')) self._boundaries = MeshFunctionSizet(self.mesh, os.path.join(mesh_dir, 'mesh_facet_region.xml')) @property def g(self): return self._g @property def mesh_dir(self) -> str: return self._mesh_dir @property def mesh(self) -> Mesh: return self._mesh @property def materials(self) -> Dict[int, Material]: return self._materials @property def lambda_(self) -> Expression: return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('lambda_')) @property def mu(self) -> Expression: return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('mu')) @property def rho(self): return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('rho')) def __repr__(self) -> str: return '<Base g={g} mesh_dir={mesh_dir} materials={materials}>'.format( g=self.g, mesh_dir=self.mesh_dir, materials=self.materials ) class UnknownDomainException(Exception): pass class MaterialGetter(Expression): @staticmethod def create(materials: Dict[int, Material], subdomains: MeshFunctionSizet, f: Callable[[Material], float]): a = MaterialGetter(degree=0) a._f = f a._subdomains = subdomains a._materials = materials return a def eval_cell(self, values, x, cell): material = self._materials.get(self._subdomains[cell.index]) if material: values[0] = self._f(material) else: raise UnknownDomainException()
29.252336
119
0.661981
import operator as op import os from typing import Dict, Callable from dolfin import Expression, Mesh from dolfin.cpp.mesh import MeshFunctionSizet class Material: def __init__(self, rho: float, young_modulus: float, shear_modulus: float): self._rho = rho self._young_modulus = young_modulus self._shear_modulus = shear_modulus @property def rho(self) -> float: return self._rho @property def young_modulus(self) -> float: return self._young_modulus @property def shear_modulus(self) -> float: return self._shear_modulus @property def lambda_(self) -> float: return self.shear_modulus * (self.young_modulus - 2 * self.shear_modulus) / ( 3 * self.shear_modulus - self.young_modulus) @property def mu(self) -> float: return self.shear_modulus def __repr__(self): return '<Material rho={rho} young_modulus={young_modulus} shear_modulus={shear_modulus}>'.format( rho=self.rho, young_modulus=self.young_modulus, shear_modulus=self.shear_modulus, ) class Base: def __init__(self, g: float, mesh_dir: str, materials: Dict[int, Material]): self._g = g self._mesh_dir = mesh_dir self._materials = materials self._mesh = Mesh(os.path.join(mesh_dir, 'mesh.xml')) self._subdomains = MeshFunctionSizet(self.mesh, os.path.join(mesh_dir, 'mesh_physical_region.xml')) self._boundaries = MeshFunctionSizet(self.mesh, os.path.join(mesh_dir, 'mesh_facet_region.xml')) @property def g(self): return self._g @property def mesh_dir(self) -> str: return self._mesh_dir @property def mesh(self) -> Mesh: return self._mesh @property def materials(self) -> Dict[int, Material]: return self._materials @property def lambda_(self) -> Expression: return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('lambda_')) @property def mu(self) -> Expression: return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('mu')) @property def rho(self): return MaterialGetter.create(materials=self.materials, subdomains=self._subdomains, f=op.attrgetter('rho')) def __repr__(self) -> str: return '<Base g={g} mesh_dir={mesh_dir} materials={materials}>'.format( g=self.g, mesh_dir=self.mesh_dir, materials=self.materials ) class UnknownDomainException(Exception): pass class MaterialGetter(Expression): @staticmethod def create(materials: Dict[int, Material], subdomains: MeshFunctionSizet, f: Callable[[Material], float]): a = MaterialGetter(degree=0) a._f = f a._subdomains = subdomains a._materials = materials return a def eval_cell(self, values, x, cell): material = self._materials.get(self._subdomains[cell.index]) if material: values[0] = self._f(material) else: raise UnknownDomainException()
true
true
f71371685e17f803653a289a21c29aad24b12eac
473
py
Python
JB/7.py
boostjanbjorge/adventofcode
5cdd540a553550b1000496dfa39cbf7cf431a85f
[ "MIT" ]
null
null
null
JB/7.py
boostjanbjorge/adventofcode
5cdd540a553550b1000496dfa39cbf7cf431a85f
[ "MIT" ]
null
null
null
JB/7.py
boostjanbjorge/adventofcode
5cdd540a553550b1000496dfa39cbf7cf431a85f
[ "MIT" ]
null
null
null
def load(): with open("inputs/7.txt") as f: yield from map(int, f.readline().split(",")) def solve(cost): positions = tuple(load()) min_position, max_position = min(positions), max(positions) return min( sum(cost(suggestion, crab) for crab in positions) for suggestion in range(min_position, max_position + 1) ) print("a:", solve(lambda a, b: abs(a - b))) print("b:", solve(lambda a, b: (abs(a - b) + abs(a - b) ** 2) // 2))
27.823529
68
0.598309
def load(): with open("inputs/7.txt") as f: yield from map(int, f.readline().split(",")) def solve(cost): positions = tuple(load()) min_position, max_position = min(positions), max(positions) return min( sum(cost(suggestion, crab) for crab in positions) for suggestion in range(min_position, max_position + 1) ) print("a:", solve(lambda a, b: abs(a - b))) print("b:", solve(lambda a, b: (abs(a - b) + abs(a - b) ** 2) // 2))
true
true
f71371bc08f3756dcf85d1c11b5f3d2763745556
18,363
py
Python
biggan/BigGAN-paddle/layers.py
zzz2010/Contrib
d351d83da718145cef9f6c98598f7fedc027efe5
[ "Apache-2.0" ]
20
2020-03-13T13:40:32.000Z
2022-03-10T07:31:48.000Z
biggan/BigGAN-paddle/layers.py
zzz2010/Contrib
d351d83da718145cef9f6c98598f7fedc027efe5
[ "Apache-2.0" ]
34
2020-02-20T11:04:58.000Z
2022-03-12T00:54:26.000Z
biggan/BigGAN-paddle/layers.py
zzz2010/Contrib
d351d83da718145cef9f6c98598f7fedc027efe5
[ "Apache-2.0" ]
41
2020-02-14T09:34:39.000Z
2022-03-10T07:31:42.000Z
''' Layers This file contains various layers for the BigGAN models. ''' import numpy as np import paddorch as torch import paddorch.nn as nn from paddorch.nn import init import paddorch.optim as optim import paddorch.nn.functional as F from paddorch.nn import Parameter as P # Projection of x onto y def proj(x, y): return torch.mm(y, x.t()) * y / torch.mm(y, y.t()) # Orthogonalize x wrt list of vectors ys def gram_schmidt(x, ys): for y in ys: x = x - proj(x, y) return x # Apply num_itrs steps of the power method to estimate top N singular values. def power_iteration(W, u_, update=True, eps=1e-12): # Lists holding singular vectors and values Wt=torch.Tensor(W).t() us, vs, svs = [], [], [] for i, u in enumerate(u_): # Run one step of the power iteration with torch.no_grad(): if W.shape[1] == 27: a = 1 v = torch.matmul(u, W) # if (W.shape[0]==u.shape[1]) : # v = torch.matmul(u, W) # else: # v = torch.matmul(u, Wt) # Run Gram-Schmidt to subtract components of all other singular vectors v = F.normalize(gram_schmidt(v, vs), eps=eps) # Add to the list vs += [v] # Update the other singular vector u = torch.matmul(v, Wt) # if (W.shape[0]!=v.shape[1]): # u = torch.matmul(v, Wt ) # else: # u = torch.matmul(v, W) # Run Gram-Schmidt to subtract components of all other singular vectors u = F.normalize(gram_schmidt(u, us), eps=eps) # Add to the list us += [u] if update: torch.copy(u,u_[i]) # u_[i][:] = u # Compute this singular value and add it to the list svs += [torch.squeeze(torch.matmul(torch.matmul(v, Wt), u.t()))] # if (W.shape[0]!=v.shape[1]): # svs += [torch.squeeze(torch.matmul(torch.matmul(v, Wt ), u.t() ))] # else: # svs += [torch.squeeze(torch.matmul(torch.matmul(v, W), u.t()))] #svs += [torch.sum(F.linear(u, W.transpose(0, 1)) * v)] return svs, us, vs # Convenience passthrough function class identity(nn.Module): def forward(self, input): return input # Spectral normalization base class class SN(object): def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12): # Number of power iterations per step self.num_itrs = num_itrs # Number of singular values self.num_svs = num_svs # Transposed? self.transpose = transpose # Epsilon value for avoiding divide-by-0 self.eps = eps self.register_buffer=dict() # Register a singular vector for each sv self.name="%d_%d_%d"%(num_svs, num_itrs, num_outputs) for i in range(self.num_svs): self.__setattr__('u%d' % i,torch.nn.Parameter(torch.randn(1, num_outputs))) self.__setattr__('sv%d' % i, torch.nn.Parameter(torch.ones(1))) # self.register_buffer['u%d' % i]= # self.register_buffer['sv%d' % i]= torch.ones(1) # Singular vectors (u side) @property def u(self): DD=[self.state_dict()['u%d' % i] for i in range(self.num_svs)] return DD # return [self.register_buffer['u%d' % i] for i in range(self.num_svs)] # Singular values; # note that these buffers are just for logging and are not used in training. @property def sv(self): return [self.state_dict()['sv%d' % i] for i in range(self.num_svs)] # return [self.register_buffer['sv%d' % i] for i in range(self.num_svs)] # Compute the spectrally-normalized weight def W_(self): self.training=True if isinstance(self,SNLinear): W_mat = torch.Tensor(self.weight).t() ##linear layer weight is different from pytorch weight, need to transpose else: W_mat = torch.Tensor(self.weight).view(self.weight.shape[0], -1) if self.transpose: W_mat = W_mat.t() # Apply num_itrs power iterations for _ in range(self.num_itrs): svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps) # Update the svs if self.training: with torch.no_grad(): # Make sure to do this in a no_grad() context or you'll get memory leaks! for i, sv in enumerate(svs): torch.copy(sv,self.sv[i]) # self.sv[i][:] = sv return self.weight / svs[0] # 2D Conv layer with spectral norm class SNConv2d(nn.Conv2d, SN): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_svs=1, num_itrs=1, eps=1e-12): nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps) self.stride=stride self.dilation=dilation self.groups=groups self.padding=padding def forward(self, x): return F.conv2d(x, self.W_(), self.bias, self.stride, self.padding, self.dilation, self.groups) # Linear layer with spectral norm class SNLinear(nn.Linear, SN): def __init__(self, in_features, out_features, bias=True, num_svs=1, num_itrs=1, eps=1e-12): nn.Linear.__init__(self, in_features, out_features, bias) SN.__init__(self, num_svs, num_itrs, out_features, eps=eps) def forward(self, x): return F.linear(x, self.W_(), self.bias) # Embedding layer with spectral norm # We use num_embeddings as the dim instead of embedding_dim here # for convenience sake class SNEmbedding(nn.Embedding, SN): def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2, scale_grad_by_freq=False, sparse=False, _weight=None, num_svs=1, num_itrs=1, eps=1e-12): nn.Embedding.__init__(self, num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight) SN.__init__(self, num_svs, num_itrs, num_embeddings, eps=eps) def forward(self, x): return F.embedding(x ,self.W_()) # A non-local block as used in SA-GAN # Note that the implementation as described in the paper is largely incorrect; # refer to the released code for the actual implementation. class Attention(nn.Module): def __init__(self, ch, which_conv=SNConv2d, name='attention'): super(Attention, self).__init__() # Channel multiplier self.ch = ch self.which_conv = which_conv self.theta = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False) self.phi = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False) self.g = self.which_conv(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False) self.o = self.which_conv(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False) # Learnable gain parameter self.gamma = P(torch.tensor(0.), requires_grad=True) def forward(self, x, y=None): # Apply convs theta = self.theta(x) phi = F.max_pool2d(self.phi(x), [2,2]) g = F.max_pool2d(self.g(x), [2,2]) # Perform reshapes theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3]) phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4) g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4) # Matmul and softmax to get attention maps beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) # Attention map times g path o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3])) return self.gamma * o + x # Fused batchnorm op def fused_bn(x, mean, var, gain=None, bias=None, eps=1e-5): # Apply scale and shift--if gain and bias are provided, fuse them here # Prepare scale scale = torch.rsqrt(var + eps) # If a gain is provided, use it if gain is not None: scale = scale * gain # Prepare shift shift = mean * scale # If bias is provided, use it if bias is not None: shift = shift - bias return x * scale - shift #return ((x - mean) / ((var + eps) ** 0.5)) * gain + bias # The unfused way. # Manual BN # Calculate means and variances using mean-of-squares minus mean-squared def manual_bn(x, gain=None, bias=None, return_mean_var=False, eps=1e-5): # Cast x to float32 if necessary float_x = x.float() # Calculate expected value of x (m) and expected value of x**2 (m2) # Mean of x m = torch.mean(float_x, [0, 2, 3], keepdim=True) # Mean of x squared m2 = torch.mean(float_x ** 2, [0, 2, 3], keepdim=True) # Calculate variance as mean of squared minus mean squared. var = (m2 - m **2) # Cast back to float 16 if necessary var = var.type(x.type()) m = m.type(x.type()) # Return mean and variance for updating stored mean/var if requested if return_mean_var: return fused_bn(x, m, var, gain, bias, eps), m.squeeze(), var.squeeze() else: return fused_bn(x, m, var, gain, bias, eps) # My batchnorm, supports standing stats class myBN(nn.Module): def __init__(self, num_channels, eps=1e-5, momentum=0.1): super(myBN, self).__init__() # momentum for updating running stats self.momentum = momentum # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Register buffers self.stored_mean= torch.nn.Parameter( torch.zeros(num_channels)) self.stored_var= torch.nn.Parameter( torch.ones(num_channels)) self.accumulation_counter= torch.nn.Parameter( torch.zeros(1)) # Accumulate running means and vars self.accumulate_standing = False # reset standing stats def reset_stats(self): self.stored_mean[:] = 0 self.stored_var[:] = 0 self.accumulation_counter[:] = 0 def forward(self, x, gain, bias): if self.training: out, mean, var = manual_bn(x, gain, bias, return_mean_var=True, eps=self.eps) # If accumulating standing stats, increment them if self.accumulate_standing: self.stored_mean[:] = self.stored_mean + mean.data self.stored_var[:] = self.stored_var + var.data self.accumulation_counter += 1.0 # If not accumulating standing stats, take running averages else: self.stored_mean[:] = self.stored_mean * (1 - self.momentum) + mean * self.momentum self.stored_var[:] = self.stored_var * (1 - self.momentum) + var * self.momentum return out # If not in training mode, use the stored statistics else: mean = self.stored_mean.view(1, -1, 1, 1) var = self.stored_var.view(1, -1, 1, 1) # If using standing stats, divide them by the accumulation counter if self.accumulate_standing: mean = mean / self.accumulation_counter var = var / self.accumulation_counter return fused_bn(x, mean, var, gain, bias, self.eps) # Simple function to handle groupnorm norm stylization def groupnorm(x, norm_style): # If number of channels specified in norm_style: if 'ch' in norm_style: ch = int(norm_style.split('_')[-1]) groups = max(int(x.shape[1]) // ch, 1) # If number of groups specified in norm style elif 'grp' in norm_style: groups = int(norm_style.split('_')[-1]) # If neither, default to groups = 16 else: groups = 16 return F.group_norm(x, groups) # Class-conditional bn # output size is the number of channels, input size is for the linear layers # Andy's Note: this class feels messy but I'm not really sure how to clean it up # Suggestions welcome! (By which I mean, refactor this and make a pull request # if you want to make this more readable/usable). class ccbn(nn.Module): def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False, norm_style='bn',): super(ccbn, self).__init__() self.output_size, self.input_size = output_size, input_size # Prepare gain and bias layers self.gain = which_linear(input_size, output_size) self.bias = which_linear(input_size, output_size) # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Use cross-replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn # Norm style? self.norm_style = norm_style if self.cross_replica: self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False) elif self.mybn: self.bn = myBN(output_size, self.eps, self.momentum) elif self.norm_style in ['bn', 'in']: self.stored_mean=torch.nn.Parameter(torch.zeros(output_size)) self.stored_var=torch.nn.Parameter(torch.ones(output_size)) def forward(self, x, y): # Calculate class-conditional gains and biases gain = torch.Tensor(1 + self.gain(y)).view(y.size(0), -1, 1, 1) bias = torch.Tensor(self.bias(y)).view(y.size(0), -1, 1, 1) # If using my batchnorm if self.mybn or self.cross_replica: return self.bn(x, gain=gain, bias=bias) # else: else: if self.norm_style == 'bn': out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None, self.training, 0.1, self.eps) elif self.norm_style == 'in': out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None, self.training, 0.1, self.eps) elif self.norm_style == 'gn': out = groupnorm(x, self.normstyle) elif self.norm_style == 'nonorm': out = x return out * gain + bias def extra_repr(self): s = 'out: {output_size}, in: {input_size},' s +=' cross_replica={cross_replica}' return s.format(**self.__dict__) # Normal, non-class-conditional BN class bn(nn.Module): def __init__(self, output_size, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False): super(bn, self).__init__() self.output_size= output_size # Prepare gain and bias layers self.gain = torch.nn.Parameter(output_size,1.0) self.bias = torch.nn.Parameter(output_size,0.0) # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Use cross-replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn if self.cross_replica: self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False) elif mybn: self.bn = myBN(output_size, self.eps, self.momentum) # Register buffers if neither of the above else: self.stored_mean = torch.nn.Parameter(torch.zeros(output_size) ) self.stored_var = torch.nn.Parameter(torch.ones(output_size)) def forward(self, x, y=None): if self.cross_replica or self.mybn: gain = self.gain.view(1,-1,1,1) bias = self.bias.view(1,-1,1,1) return self.bn(x, gain=gain, bias=bias) else: return F.batch_norm(x, self.stored_mean, self.stored_var, self.gain, self.bias, self.training, self.momentum, self.eps) # Generator blocks # Note that this class assumes the kernel size and padding (and any other # settings) have been selected in the main generator module and passed in # through the which_conv arg. Similar rules apply with which_bn (the input # size [which is actually the number of channels of the conditional info] must # be preselected) class GBlock(nn.Module): def __init__(self, in_channels, out_channels, which_conv=nn.Conv2d, which_bn=bn, activation=None, upsample=None): super(GBlock, self).__init__() self.in_channels, self.out_channels = in_channels, out_channels self.which_conv, self.which_bn = which_conv, which_bn self.activation = activation self.upsample = upsample # Conv layers self.conv1 = self.which_conv(self.in_channels, self.out_channels) self.conv2 = self.which_conv(self.out_channels, self.out_channels) self.learnable_sc = in_channels != out_channels or upsample if self.learnable_sc: self.conv_sc = self.which_conv(in_channels, out_channels, kernel_size=1, padding=0) # Batchnorm layers self.bn1 = self.which_bn(in_channels) self.bn2 = self.which_bn(out_channels) # upsample layers self.upsample = upsample def forward(self, x, y): h = self.activation(self.bn1(x, y)) if self.upsample: h = self.upsample(h) x = self.upsample(x) h = self.conv1(h) h = self.activation(self.bn2(h, y)) h = self.conv2(h) if self.learnable_sc: x = self.conv_sc(x) return h + x # Residual block for the discriminator class DBlock(nn.Module): def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True, preactivation=False, activation=None, downsample=None,): super(DBlock, self).__init__() self.in_channels, self.out_channels = in_channels, out_channels # If using wide D (as in SA-GAN and BigGAN), change the channel pattern self.hidden_channels = self.out_channels if wide else self.in_channels self.which_conv = which_conv self.preactivation = preactivation self.activation = activation self.downsample = downsample # Conv layers self.conv1 = self.which_conv(self.in_channels, self.hidden_channels) self.conv2 = self.which_conv(self.hidden_channels, self.out_channels) self.learnable_sc = True if (in_channels != out_channels) or downsample else False if self.learnable_sc: self.conv_sc = self.which_conv(in_channels, out_channels, kernel_size=1, padding=0) def shortcut(self, x): if self.preactivation: if self.learnable_sc: x = self.conv_sc(x) if self.downsample: x = self.downsample(x) else: if self.downsample: x = self.downsample(x) if self.learnable_sc: x = self.conv_sc(x) return x def forward(self, x): if self.preactivation: # h = self.activation(x) # NOT TODAY SATAN # Andy's note: This line *must* be an out-of-place ReLU or it # will negatively affect the shortcut connection. h = F.relu(x) else: h = x h = self.conv1(h) h = self.conv2(self.activation(h)) if self.downsample: h = self.downsample(h) return h + self.shortcut(x) # dogball
36.652695
117
0.650711
import numpy as np import paddorch as torch import paddorch.nn as nn from paddorch.nn import init import paddorch.optim as optim import paddorch.nn.functional as F from paddorch.nn import Parameter as P def proj(x, y): return torch.mm(y, x.t()) * y / torch.mm(y, y.t()) def gram_schmidt(x, ys): for y in ys: x = x - proj(x, y) return x def power_iteration(W, u_, update=True, eps=1e-12): Wt=torch.Tensor(W).t() us, vs, svs = [], [], [] for i, u in enumerate(u_): with torch.no_grad(): if W.shape[1] == 27: a = 1 v = torch.matmul(u, W) v = F.normalize(gram_schmidt(v, vs), eps=eps) vs += [v] u = torch.matmul(v, Wt) u = F.normalize(gram_schmidt(u, us), eps=eps) us += [u] if update: torch.copy(u,u_[i]) svs += [torch.squeeze(torch.matmul(torch.matmul(v, Wt), u.t()))] return svs, us, vs class identity(nn.Module): def forward(self, input): return input class SN(object): def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12): self.num_itrs = num_itrs self.num_svs = num_svs self.transpose = transpose self.eps = eps self.register_buffer=dict() self.name="%d_%d_%d"%(num_svs, num_itrs, num_outputs) for i in range(self.num_svs): self.__setattr__('u%d' % i,torch.nn.Parameter(torch.randn(1, num_outputs))) self.__setattr__('sv%d' % i, torch.nn.Parameter(torch.ones(1))) @property def u(self): DD=[self.state_dict()['u%d' % i] for i in range(self.num_svs)] return DD @property def sv(self): return [self.state_dict()['sv%d' % i] for i in range(self.num_svs)] def W_(self): self.training=True if isinstance(self,SNLinear): W_mat = torch.Tensor(self.weight).t() e[0], -1) if self.transpose: W_mat = W_mat.t() for _ in range(self.num_itrs): svs, us, vs = power_iteration(W_mat, self.u, update=self.training, eps=self.eps) if self.training: with torch.no_grad(): for i, sv in enumerate(svs): torch.copy(sv,self.sv[i]) # self.sv[i][:] = sv return self.weight / svs[0] # 2D Conv layer with spectral norm class SNConv2d(nn.Conv2d, SN): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, num_svs=1, num_itrs=1, eps=1e-12): nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias) SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps) self.stride=stride self.dilation=dilation self.groups=groups self.padding=padding def forward(self, x): return F.conv2d(x, self.W_(), self.bias, self.stride, self.padding, self.dilation, self.groups) # Linear layer with spectral norm class SNLinear(nn.Linear, SN): def __init__(self, in_features, out_features, bias=True, num_svs=1, num_itrs=1, eps=1e-12): nn.Linear.__init__(self, in_features, out_features, bias) SN.__init__(self, num_svs, num_itrs, out_features, eps=eps) def forward(self, x): return F.linear(x, self.W_(), self.bias) # Embedding layer with spectral norm # We use num_embeddings as the dim instead of embedding_dim here # for convenience sake class SNEmbedding(nn.Embedding, SN): def __init__(self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2, scale_grad_by_freq=False, sparse=False, _weight=None, num_svs=1, num_itrs=1, eps=1e-12): nn.Embedding.__init__(self, num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight) SN.__init__(self, num_svs, num_itrs, num_embeddings, eps=eps) def forward(self, x): return F.embedding(x ,self.W_()) # A non-local block as used in SA-GAN # Note that the implementation as described in the paper is largely incorrect; # refer to the released code for the actual implementation. class Attention(nn.Module): def __init__(self, ch, which_conv=SNConv2d, name='attention'): super(Attention, self).__init__() # Channel multiplier self.ch = ch self.which_conv = which_conv self.theta = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False) self.phi = self.which_conv(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False) self.g = self.which_conv(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False) self.o = self.which_conv(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False) # Learnable gain parameter self.gamma = P(torch.tensor(0.), requires_grad=True) def forward(self, x, y=None): # Apply convs theta = self.theta(x) phi = F.max_pool2d(self.phi(x), [2,2]) g = F.max_pool2d(self.g(x), [2,2]) # Perform reshapes theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3]) phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4) g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4) # Matmul and softmax to get attention maps beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) # Attention map times g path o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3])) return self.gamma * o + x # Fused batchnorm op def fused_bn(x, mean, var, gain=None, bias=None, eps=1e-5): # Apply scale and shift--if gain and bias are provided, fuse them here # Prepare scale scale = torch.rsqrt(var + eps) # If a gain is provided, use it if gain is not None: scale = scale * gain # Prepare shift shift = mean * scale # If bias is provided, use it if bias is not None: shift = shift - bias return x * scale - shift #return ((x - mean) / ((var + eps) ** 0.5)) * gain + bias # The unfused way. # Manual BN # Calculate means and variances using mean-of-squares minus mean-squared def manual_bn(x, gain=None, bias=None, return_mean_var=False, eps=1e-5): # Cast x to float32 if necessary float_x = x.float() # Calculate expected value of x (m) and expected value of x**2 (m2) # Mean of x m = torch.mean(float_x, [0, 2, 3], keepdim=True) # Mean of x squared m2 = torch.mean(float_x ** 2, [0, 2, 3], keepdim=True) # Calculate variance as mean of squared minus mean squared. var = (m2 - m **2) # Cast back to float 16 if necessary var = var.type(x.type()) m = m.type(x.type()) # Return mean and variance for updating stored mean/var if requested if return_mean_var: return fused_bn(x, m, var, gain, bias, eps), m.squeeze(), var.squeeze() else: return fused_bn(x, m, var, gain, bias, eps) # My batchnorm, supports standing stats class myBN(nn.Module): def __init__(self, num_channels, eps=1e-5, momentum=0.1): super(myBN, self).__init__() # momentum for updating running stats self.momentum = momentum # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Register buffers self.stored_mean= torch.nn.Parameter( torch.zeros(num_channels)) self.stored_var= torch.nn.Parameter( torch.ones(num_channels)) self.accumulation_counter= torch.nn.Parameter( torch.zeros(1)) # Accumulate running means and vars self.accumulate_standing = False # reset standing stats def reset_stats(self): self.stored_mean[:] = 0 self.stored_var[:] = 0 self.accumulation_counter[:] = 0 def forward(self, x, gain, bias): if self.training: out, mean, var = manual_bn(x, gain, bias, return_mean_var=True, eps=self.eps) # If accumulating standing stats, increment them if self.accumulate_standing: self.stored_mean[:] = self.stored_mean + mean.data self.stored_var[:] = self.stored_var + var.data self.accumulation_counter += 1.0 # If not accumulating standing stats, take running averages else: self.stored_mean[:] = self.stored_mean * (1 - self.momentum) + mean * self.momentum self.stored_var[:] = self.stored_var * (1 - self.momentum) + var * self.momentum return out # If not in training mode, use the stored statistics else: mean = self.stored_mean.view(1, -1, 1, 1) var = self.stored_var.view(1, -1, 1, 1) # If using standing stats, divide them by the accumulation counter if self.accumulate_standing: mean = mean / self.accumulation_counter var = var / self.accumulation_counter return fused_bn(x, mean, var, gain, bias, self.eps) # Simple function to handle groupnorm norm stylization def groupnorm(x, norm_style): # If number of channels specified in norm_style: if 'ch' in norm_style: ch = int(norm_style.split('_')[-1]) groups = max(int(x.shape[1]) // ch, 1) # If number of groups specified in norm style elif 'grp' in norm_style: groups = int(norm_style.split('_')[-1]) # If neither, default to groups = 16 else: groups = 16 return F.group_norm(x, groups) # Class-conditional bn # output size is the number of channels, input size is for the linear layers # Andy's Note: this class feels messy but I'm not really sure how to clean it up # Suggestions welcome! (By which I mean, refactor this and make a pull request # if you want to make this more readable/usable). class ccbn(nn.Module): def __init__(self, output_size, input_size, which_linear, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False, norm_style='bn',): super(ccbn, self).__init__() self.output_size, self.input_size = output_size, input_size # Prepare gain and bias layers self.gain = which_linear(input_size, output_size) self.bias = which_linear(input_size, output_size) # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Use cross-replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn # Norm style? self.norm_style = norm_style if self.cross_replica: self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False) elif self.mybn: self.bn = myBN(output_size, self.eps, self.momentum) elif self.norm_style in ['bn', 'in']: self.stored_mean=torch.nn.Parameter(torch.zeros(output_size)) self.stored_var=torch.nn.Parameter(torch.ones(output_size)) def forward(self, x, y): # Calculate class-conditional gains and biases gain = torch.Tensor(1 + self.gain(y)).view(y.size(0), -1, 1, 1) bias = torch.Tensor(self.bias(y)).view(y.size(0), -1, 1, 1) # If using my batchnorm if self.mybn or self.cross_replica: return self.bn(x, gain=gain, bias=bias) # else: else: if self.norm_style == 'bn': out = F.batch_norm(x, self.stored_mean, self.stored_var, None, None, self.training, 0.1, self.eps) elif self.norm_style == 'in': out = F.instance_norm(x, self.stored_mean, self.stored_var, None, None, self.training, 0.1, self.eps) elif self.norm_style == 'gn': out = groupnorm(x, self.normstyle) elif self.norm_style == 'nonorm': out = x return out * gain + bias def extra_repr(self): s = 'out: {output_size}, in: {input_size},' s +=' cross_replica={cross_replica}' return s.format(**self.__dict__) # Normal, non-class-conditional BN class bn(nn.Module): def __init__(self, output_size, eps=1e-5, momentum=0.1, cross_replica=False, mybn=False): super(bn, self).__init__() self.output_size= output_size # Prepare gain and bias layers self.gain = torch.nn.Parameter(output_size,1.0) self.bias = torch.nn.Parameter(output_size,0.0) # epsilon to avoid dividing by 0 self.eps = eps # Momentum self.momentum = momentum # Use cross-replica batchnorm? self.cross_replica = cross_replica # Use my batchnorm? self.mybn = mybn if self.cross_replica: self.bn = SyncBN2d(output_size, eps=self.eps, momentum=self.momentum, affine=False) elif mybn: self.bn = myBN(output_size, self.eps, self.momentum) # Register buffers if neither of the above else: self.stored_mean = torch.nn.Parameter(torch.zeros(output_size) ) self.stored_var = torch.nn.Parameter(torch.ones(output_size)) def forward(self, x, y=None): if self.cross_replica or self.mybn: gain = self.gain.view(1,-1,1,1) bias = self.bias.view(1,-1,1,1) return self.bn(x, gain=gain, bias=bias) else: return F.batch_norm(x, self.stored_mean, self.stored_var, self.gain, self.bias, self.training, self.momentum, self.eps) # Generator blocks # Note that this class assumes the kernel size and padding (and any other # settings) have been selected in the main generator module and passed in # through the which_conv arg. Similar rules apply with which_bn (the input # size [which is actually the number of channels of the conditional info] must # be preselected) class GBlock(nn.Module): def __init__(self, in_channels, out_channels, which_conv=nn.Conv2d, which_bn=bn, activation=None, upsample=None): super(GBlock, self).__init__() self.in_channels, self.out_channels = in_channels, out_channels self.which_conv, self.which_bn = which_conv, which_bn self.activation = activation self.upsample = upsample # Conv layers self.conv1 = self.which_conv(self.in_channels, self.out_channels) self.conv2 = self.which_conv(self.out_channels, self.out_channels) self.learnable_sc = in_channels != out_channels or upsample if self.learnable_sc: self.conv_sc = self.which_conv(in_channels, out_channels, kernel_size=1, padding=0) # Batchnorm layers self.bn1 = self.which_bn(in_channels) self.bn2 = self.which_bn(out_channels) # upsample layers self.upsample = upsample def forward(self, x, y): h = self.activation(self.bn1(x, y)) if self.upsample: h = self.upsample(h) x = self.upsample(x) h = self.conv1(h) h = self.activation(self.bn2(h, y)) h = self.conv2(h) if self.learnable_sc: x = self.conv_sc(x) return h + x # Residual block for the discriminator class DBlock(nn.Module): def __init__(self, in_channels, out_channels, which_conv=SNConv2d, wide=True, preactivation=False, activation=None, downsample=None,): super(DBlock, self).__init__() self.in_channels, self.out_channels = in_channels, out_channels # If using wide D (as in SA-GAN and BigGAN), change the channel pattern self.hidden_channels = self.out_channels if wide else self.in_channels self.which_conv = which_conv self.preactivation = preactivation self.activation = activation self.downsample = downsample # Conv layers self.conv1 = self.which_conv(self.in_channels, self.hidden_channels) self.conv2 = self.which_conv(self.hidden_channels, self.out_channels) self.learnable_sc = True if (in_channels != out_channels) or downsample else False if self.learnable_sc: self.conv_sc = self.which_conv(in_channels, out_channels, kernel_size=1, padding=0) def shortcut(self, x): if self.preactivation: if self.learnable_sc: x = self.conv_sc(x) if self.downsample: x = self.downsample(x) else: if self.downsample: x = self.downsample(x) if self.learnable_sc: x = self.conv_sc(x) return x def forward(self, x): if self.preactivation: # h = self.activation(x) # NOT TODAY SATAN # Andy's note: This line *must* be an out-of-place ReLU or it h = F.relu(x) else: h = x h = self.conv1(h) h = self.conv2(self.activation(h)) if self.downsample: h = self.downsample(h) return h + self.shortcut(x)
true
true
f71371d098e9f5376f73de0b5044accfe1bd489e
1,664
py
Python
barbican-8.0.0/barbican/objects/secret_stores.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
null
null
null
barbican-8.0.0/barbican/objects/secret_stores.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
barbican-8.0.0/barbican/objects/secret_stores.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
2
2020-03-15T01:24:15.000Z
2020-07-22T20:34:26.000Z
# Copyright 2018 Fujitsu. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_versionedobjects import base as object_base from barbican.model import models from barbican.model import repositories as repos from barbican.objects import base from barbican.objects import fields @object_base.VersionedObjectRegistry.register class SecretStores(base.BarbicanObject, base.BarbicanPersistentObject, object_base.VersionedObjectDictCompat): fields = { 'store_plugin': fields.StringField(), 'crypto_plugin': fields.StringField(nullable=True), 'global_default': fields.BooleanField(default=False), 'name': fields.StringField(), 'status': fields.StringField(nullable=True, default=base.States.ACTIVE) } db_model = models.SecretStores db_repo = repos.get_secret_stores_repository() @classmethod def get_all(cls, session=None): secret_stores_db = cls.db_repo.get_all(session) secret_stores_obj = [cls()._from_db_object(secret_store_db) for secret_store_db in secret_stores_db] return secret_stores_obj
39.619048
79
0.72476
from oslo_versionedobjects import base as object_base from barbican.model import models from barbican.model import repositories as repos from barbican.objects import base from barbican.objects import fields @object_base.VersionedObjectRegistry.register class SecretStores(base.BarbicanObject, base.BarbicanPersistentObject, object_base.VersionedObjectDictCompat): fields = { 'store_plugin': fields.StringField(), 'crypto_plugin': fields.StringField(nullable=True), 'global_default': fields.BooleanField(default=False), 'name': fields.StringField(), 'status': fields.StringField(nullable=True, default=base.States.ACTIVE) } db_model = models.SecretStores db_repo = repos.get_secret_stores_repository() @classmethod def get_all(cls, session=None): secret_stores_db = cls.db_repo.get_all(session) secret_stores_obj = [cls()._from_db_object(secret_store_db) for secret_store_db in secret_stores_db] return secret_stores_obj
true
true
f713726fcee2e8beb0a47237e029791db921f487
8,823
py
Python
graphene/types/tests/test_typemap.py
EquityZen/graphene
1a29006312ad5cd68343511860d6041d16e700c6
[ "MIT" ]
2
2021-06-14T20:01:22.000Z
2022-01-07T12:56:53.000Z
graphene/types/tests/test_typemap.py
EquityZen/graphene
1a29006312ad5cd68343511860d6041d16e700c6
[ "MIT" ]
13
2020-03-24T17:53:51.000Z
2022-02-10T20:01:14.000Z
graphene/types/tests/test_typemap.py
EquityZen/graphene
1a29006312ad5cd68343511860d6041d16e700c6
[ "MIT" ]
2
2021-04-12T18:16:00.000Z
2021-06-26T05:01:18.000Z
import pytest from graphql.type import ( GraphQLArgument, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLInputObjectField, GraphQLInputObjectType, GraphQLInterfaceType, GraphQLObjectType, GraphQLString, ) from ..dynamic import Dynamic from ..enum import Enum from ..field import Field from ..inputfield import InputField from ..inputobjecttype import InputObjectType from ..interface import Interface from ..objecttype import ObjectType from ..scalars import Int, String from ..structures import List, NonNull from ..typemap import TypeMap, resolve_type def test_enum(): class MyEnum(Enum): """Description""" foo = 1 bar = 2 @property def description(self): return "Description {}={}".format(self.name, self.value) @property def deprecation_reason(self): if self == MyEnum.foo: return "Is deprecated" typemap = TypeMap([MyEnum]) assert "MyEnum" in typemap graphql_enum = typemap["MyEnum"] assert isinstance(graphql_enum, GraphQLEnumType) assert graphql_enum.name == "MyEnum" assert graphql_enum.description == "Description" values = graphql_enum.values assert values == [ GraphQLEnumValue( name="foo", value=1, description="Description foo=1", deprecation_reason="Is deprecated", ), GraphQLEnumValue(name="bar", value=2, description="Description bar=2"), ] def test_objecttype(): class MyObjectType(ObjectType): """Description""" foo = String( bar=String(description="Argument description", default_value="x"), description="Field description", ) bar = String(name="gizmo") def resolve_foo(self, bar): return bar typemap = TypeMap([MyObjectType]) assert "MyObjectType" in typemap graphql_type = typemap["MyObjectType"] assert isinstance(graphql_type, GraphQLObjectType) assert graphql_type.name == "MyObjectType" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["foo", "gizmo"] foo_field = fields["foo"] assert isinstance(foo_field, GraphQLField) assert foo_field.description == "Field description" assert foo_field.args == { "bar": GraphQLArgument( GraphQLString, description="Argument description", default_value="x", out_name="bar", ) } def test_dynamic_objecttype(): class MyObjectType(ObjectType): """Description""" bar = Dynamic(lambda: Field(String)) own = Field(lambda: MyObjectType) typemap = TypeMap([MyObjectType]) assert "MyObjectType" in typemap assert list(MyObjectType._meta.fields.keys()) == ["bar", "own"] graphql_type = typemap["MyObjectType"] fields = graphql_type.fields assert list(fields.keys()) == ["bar", "own"] assert fields["bar"].type == GraphQLString assert fields["own"].type == graphql_type def test_interface(): class MyInterface(Interface): """Description""" foo = String( bar=String(description="Argument description", default_value="x"), description="Field description", ) bar = String(name="gizmo", first_arg=String(), other_arg=String(name="oth_arg")) own = Field(lambda: MyInterface) def resolve_foo(self, args, info): return args.get("bar") typemap = TypeMap([MyInterface]) assert "MyInterface" in typemap graphql_type = typemap["MyInterface"] assert isinstance(graphql_type, GraphQLInterfaceType) assert graphql_type.name == "MyInterface" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["foo", "gizmo", "own"] assert fields["own"].type == graphql_type assert list(fields["gizmo"].args.keys()) == ["firstArg", "oth_arg"] foo_field = fields["foo"] assert isinstance(foo_field, GraphQLField) assert foo_field.description == "Field description" assert not foo_field.resolver # Resolver not attached in interfaces assert foo_field.args == { "bar": GraphQLArgument( GraphQLString, description="Argument description", default_value="x", out_name="bar", ) } def test_inputobject(): class OtherObjectType(InputObjectType): thingy = NonNull(Int) class MyInnerObjectType(InputObjectType): some_field = String() some_other_field = List(OtherObjectType) class MyInputObjectType(InputObjectType): """Description""" foo_bar = String(description="Field description") bar = String(name="gizmo") baz = NonNull(MyInnerObjectType) own = InputField(lambda: MyInputObjectType) def resolve_foo_bar(self, args, info): return args.get("bar") typemap = TypeMap([MyInputObjectType]) assert "MyInputObjectType" in typemap graphql_type = typemap["MyInputObjectType"] assert isinstance(graphql_type, GraphQLInputObjectType) assert graphql_type.name == "MyInputObjectType" assert graphql_type.description == "Description" other_graphql_type = typemap["OtherObjectType"] inner_graphql_type = typemap["MyInnerObjectType"] container = graphql_type.create_container( { "bar": "oh!", "baz": inner_graphql_type.create_container( { "some_other_field": [ other_graphql_type.create_container({"thingy": 1}), other_graphql_type.create_container({"thingy": 2}), ] } ), } ) assert isinstance(container, MyInputObjectType) assert "bar" in container assert container.bar == "oh!" assert "foo_bar" not in container assert container.foo_bar is None assert container.baz.some_field is None assert container.baz.some_other_field[0].thingy == 1 assert container.baz.some_other_field[1].thingy == 2 fields = graphql_type.fields assert list(fields.keys()) == ["fooBar", "gizmo", "baz", "own"] own_field = fields["own"] assert own_field.type == graphql_type foo_field = fields["fooBar"] assert isinstance(foo_field, GraphQLInputObjectField) assert foo_field.description == "Field description" def test_objecttype_camelcase(): class MyObjectType(ObjectType): """Description""" foo_bar = String(bar_foo=String()) typemap = TypeMap([MyObjectType]) assert "MyObjectType" in typemap graphql_type = typemap["MyObjectType"] assert isinstance(graphql_type, GraphQLObjectType) assert graphql_type.name == "MyObjectType" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["fooBar"] foo_field = fields["fooBar"] assert isinstance(foo_field, GraphQLField) assert foo_field.args == { "barFoo": GraphQLArgument(GraphQLString, out_name="bar_foo") } def test_objecttype_camelcase_disabled(): class MyObjectType(ObjectType): """Description""" foo_bar = String(bar_foo=String()) typemap = TypeMap([MyObjectType], auto_camelcase=False) assert "MyObjectType" in typemap graphql_type = typemap["MyObjectType"] assert isinstance(graphql_type, GraphQLObjectType) assert graphql_type.name == "MyObjectType" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["foo_bar"] foo_field = fields["foo_bar"] assert isinstance(foo_field, GraphQLField) assert foo_field.args == { "bar_foo": GraphQLArgument(GraphQLString, out_name="bar_foo") } def test_objecttype_with_possible_types(): class MyObjectType(ObjectType): """Description""" class Meta: possible_types = (dict,) foo_bar = String() typemap = TypeMap([MyObjectType]) graphql_type = typemap["MyObjectType"] assert graphql_type.is_type_of assert graphql_type.is_type_of({}, None) is True assert graphql_type.is_type_of(MyObjectType(), None) is False def test_resolve_type_with_missing_type(): class MyObjectType(ObjectType): foo_bar = String() class MyOtherObjectType(ObjectType): fizz_buzz = String() def resolve_type_func(root, info): return MyOtherObjectType typemap = TypeMap([MyObjectType]) with pytest.raises(AssertionError) as excinfo: resolve_type(resolve_type_func, typemap, "MyOtherObjectType", {}, {}) assert "MyOtherObjectTyp" in str(excinfo.value)
30.74216
88
0.657146
import pytest from graphql.type import ( GraphQLArgument, GraphQLEnumType, GraphQLEnumValue, GraphQLField, GraphQLInputObjectField, GraphQLInputObjectType, GraphQLInterfaceType, GraphQLObjectType, GraphQLString, ) from ..dynamic import Dynamic from ..enum import Enum from ..field import Field from ..inputfield import InputField from ..inputobjecttype import InputObjectType from ..interface import Interface from ..objecttype import ObjectType from ..scalars import Int, String from ..structures import List, NonNull from ..typemap import TypeMap, resolve_type def test_enum(): class MyEnum(Enum): foo = 1 bar = 2 @property def description(self): return "Description {}={}".format(self.name, self.value) @property def deprecation_reason(self): if self == MyEnum.foo: return "Is deprecated" typemap = TypeMap([MyEnum]) assert "MyEnum" in typemap graphql_enum = typemap["MyEnum"] assert isinstance(graphql_enum, GraphQLEnumType) assert graphql_enum.name == "MyEnum" assert graphql_enum.description == "Description" values = graphql_enum.values assert values == [ GraphQLEnumValue( name="foo", value=1, description="Description foo=1", deprecation_reason="Is deprecated", ), GraphQLEnumValue(name="bar", value=2, description="Description bar=2"), ] def test_objecttype(): class MyObjectType(ObjectType): foo = String( bar=String(description="Argument description", default_value="x"), description="Field description", ) bar = String(name="gizmo") def resolve_foo(self, bar): return bar typemap = TypeMap([MyObjectType]) assert "MyObjectType" in typemap graphql_type = typemap["MyObjectType"] assert isinstance(graphql_type, GraphQLObjectType) assert graphql_type.name == "MyObjectType" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["foo", "gizmo"] foo_field = fields["foo"] assert isinstance(foo_field, GraphQLField) assert foo_field.description == "Field description" assert foo_field.args == { "bar": GraphQLArgument( GraphQLString, description="Argument description", default_value="x", out_name="bar", ) } def test_dynamic_objecttype(): class MyObjectType(ObjectType): bar = Dynamic(lambda: Field(String)) own = Field(lambda: MyObjectType) typemap = TypeMap([MyObjectType]) assert "MyObjectType" in typemap assert list(MyObjectType._meta.fields.keys()) == ["bar", "own"] graphql_type = typemap["MyObjectType"] fields = graphql_type.fields assert list(fields.keys()) == ["bar", "own"] assert fields["bar"].type == GraphQLString assert fields["own"].type == graphql_type def test_interface(): class MyInterface(Interface): foo = String( bar=String(description="Argument description", default_value="x"), description="Field description", ) bar = String(name="gizmo", first_arg=String(), other_arg=String(name="oth_arg")) own = Field(lambda: MyInterface) def resolve_foo(self, args, info): return args.get("bar") typemap = TypeMap([MyInterface]) assert "MyInterface" in typemap graphql_type = typemap["MyInterface"] assert isinstance(graphql_type, GraphQLInterfaceType) assert graphql_type.name == "MyInterface" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["foo", "gizmo", "own"] assert fields["own"].type == graphql_type assert list(fields["gizmo"].args.keys()) == ["firstArg", "oth_arg"] foo_field = fields["foo"] assert isinstance(foo_field, GraphQLField) assert foo_field.description == "Field description" assert not foo_field.resolver assert foo_field.args == { "bar": GraphQLArgument( GraphQLString, description="Argument description", default_value="x", out_name="bar", ) } def test_inputobject(): class OtherObjectType(InputObjectType): thingy = NonNull(Int) class MyInnerObjectType(InputObjectType): some_field = String() some_other_field = List(OtherObjectType) class MyInputObjectType(InputObjectType): foo_bar = String(description="Field description") bar = String(name="gizmo") baz = NonNull(MyInnerObjectType) own = InputField(lambda: MyInputObjectType) def resolve_foo_bar(self, args, info): return args.get("bar") typemap = TypeMap([MyInputObjectType]) assert "MyInputObjectType" in typemap graphql_type = typemap["MyInputObjectType"] assert isinstance(graphql_type, GraphQLInputObjectType) assert graphql_type.name == "MyInputObjectType" assert graphql_type.description == "Description" other_graphql_type = typemap["OtherObjectType"] inner_graphql_type = typemap["MyInnerObjectType"] container = graphql_type.create_container( { "bar": "oh!", "baz": inner_graphql_type.create_container( { "some_other_field": [ other_graphql_type.create_container({"thingy": 1}), other_graphql_type.create_container({"thingy": 2}), ] } ), } ) assert isinstance(container, MyInputObjectType) assert "bar" in container assert container.bar == "oh!" assert "foo_bar" not in container assert container.foo_bar is None assert container.baz.some_field is None assert container.baz.some_other_field[0].thingy == 1 assert container.baz.some_other_field[1].thingy == 2 fields = graphql_type.fields assert list(fields.keys()) == ["fooBar", "gizmo", "baz", "own"] own_field = fields["own"] assert own_field.type == graphql_type foo_field = fields["fooBar"] assert isinstance(foo_field, GraphQLInputObjectField) assert foo_field.description == "Field description" def test_objecttype_camelcase(): class MyObjectType(ObjectType): foo_bar = String(bar_foo=String()) typemap = TypeMap([MyObjectType]) assert "MyObjectType" in typemap graphql_type = typemap["MyObjectType"] assert isinstance(graphql_type, GraphQLObjectType) assert graphql_type.name == "MyObjectType" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["fooBar"] foo_field = fields["fooBar"] assert isinstance(foo_field, GraphQLField) assert foo_field.args == { "barFoo": GraphQLArgument(GraphQLString, out_name="bar_foo") } def test_objecttype_camelcase_disabled(): class MyObjectType(ObjectType): foo_bar = String(bar_foo=String()) typemap = TypeMap([MyObjectType], auto_camelcase=False) assert "MyObjectType" in typemap graphql_type = typemap["MyObjectType"] assert isinstance(graphql_type, GraphQLObjectType) assert graphql_type.name == "MyObjectType" assert graphql_type.description == "Description" fields = graphql_type.fields assert list(fields.keys()) == ["foo_bar"] foo_field = fields["foo_bar"] assert isinstance(foo_field, GraphQLField) assert foo_field.args == { "bar_foo": GraphQLArgument(GraphQLString, out_name="bar_foo") } def test_objecttype_with_possible_types(): class MyObjectType(ObjectType): class Meta: possible_types = (dict,) foo_bar = String() typemap = TypeMap([MyObjectType]) graphql_type = typemap["MyObjectType"] assert graphql_type.is_type_of assert graphql_type.is_type_of({}, None) is True assert graphql_type.is_type_of(MyObjectType(), None) is False def test_resolve_type_with_missing_type(): class MyObjectType(ObjectType): foo_bar = String() class MyOtherObjectType(ObjectType): fizz_buzz = String() def resolve_type_func(root, info): return MyOtherObjectType typemap = TypeMap([MyObjectType]) with pytest.raises(AssertionError) as excinfo: resolve_type(resolve_type_func, typemap, "MyOtherObjectType", {}, {}) assert "MyOtherObjectTyp" in str(excinfo.value)
true
true
f713734d192bdf00dea92d3bc6a7b7a86f5a0fbf
316
py
Python
tests/pyre/ipc/selector.py
BryanRiel/pyre
179359634a7091979cced427b6133dd0ec4726ea
[ "BSD-3-Clause" ]
null
null
null
tests/pyre/ipc/selector.py
BryanRiel/pyre
179359634a7091979cced427b6133dd0ec4726ea
[ "BSD-3-Clause" ]
null
null
null
tests/pyre/ipc/selector.py
BryanRiel/pyre
179359634a7091979cced427b6133dd0ec4726ea
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # michael a.g. aïvázis # orthologue # (c) 1998-2018 all rights reserved # """ Sanity check: verify that the selector factory is accessible """ def test(): from pyre.ipc import selector return # main if __name__ == "__main__": test() # end of file
12.153846
60
0.639241
def test(): from pyre.ipc import selector return if __name__ == "__main__": test()
true
true
f71373e8f7e5bc825e55b12e30ae3f7b39684ab4
2,586
py
Python
subscription_manager/events/topic_handlers.py
eurocontrol-swim/subscription-manager
95700334cb5d58957043c6c487b56b1dd6641ec0
[ "BSD-3-Clause" ]
null
null
null
subscription_manager/events/topic_handlers.py
eurocontrol-swim/subscription-manager
95700334cb5d58957043c6c487b56b1dd6641ec0
[ "BSD-3-Clause" ]
null
null
null
subscription_manager/events/topic_handlers.py
eurocontrol-swim/subscription-manager
95700334cb5d58957043c6c487b56b1dd6641ec0
[ "BSD-3-Clause" ]
null
null
null
""" Copyright 2019 EUROCONTROL ========================================== Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ========================================== Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative: http://opensource.org/licenses/BSD-3-Clause Details on EUROCONTROL: http://www.eurocontrol.int """ from subscription_manager.db import topics as db from subscription_manager.events.subscription_handlers import delete_subscription_handler __author__ = "EUROCONTROL (SWIM)" def create_topic_handler(topic): db.create_topic(topic) # def update_topic_handler(current_topic, updated_topic): # db.update_topic(updated_topic) # # for subscription in updated_topic.subscriptions: # broker.delete_queue_binding(queue=subscription.queue, topic=current_topic.name) # broker.bind_queue_to_topic(queue=subscription.queue, topic=updated_topic.name, durable=subscription.durable) def delete_topic_handler(topic): db.delete_topic(topic) def delete_topic_subscriptions_handler(topic): for subscription in topic.subscriptions: delete_subscription_handler(subscription)
47.018182
121
0.776875
from subscription_manager.db import topics as db from subscription_manager.events.subscription_handlers import delete_subscription_handler __author__ = "EUROCONTROL (SWIM)" def create_topic_handler(topic): db.create_topic(topic) def delete_topic_handler(topic): db.delete_topic(topic) def delete_topic_subscriptions_handler(topic): for subscription in topic.subscriptions: delete_subscription_handler(subscription)
true
true
f71374fb414fca995be9a3b2c9b33160564881bd
606
py
Python
order/soq.py
ipudu/order
156525d25847a7940c5e64b7dd98bf193fb4cc9e
[ "MIT" ]
6
2017-07-13T00:32:12.000Z
2019-08-19T16:01:12.000Z
order/soq.py
ipudu/order
156525d25847a7940c5e64b7dd98bf193fb4cc9e
[ "MIT" ]
5
2017-07-19T19:00:14.000Z
2021-03-09T13:52:29.000Z
order/soq.py
ipudu/order
156525d25847a7940c5e64b7dd98bf193fb4cc9e
[ "MIT" ]
6
2017-07-13T19:46:28.000Z
2022-03-23T05:01:10.000Z
############################################################################### # -*- coding: utf-8 -*- # Order: A tool to characterize the local structure of liquid water # by geometric order parameters # # Authors: Pu Du # # Released under the MIT License ############################################################################### from __future__ import print_function, division import os import six from six.moves import range import numpy as np from progress.bar import ChargingBar from .util import pbc from . import oto class StructureFactor(): """Structure Factor""" pass
24.24
79
0.536304
true
true
f71376995ab6d6fafa06699fc9b6e0627798befe
1,375
py
Python
setup.py
lebedov/idisplay
26651a144ee4f648d7e9251ac06acd8b48e837ad
[ "BSD-3-Clause" ]
3
2016-02-28T06:20:51.000Z
2018-06-12T08:04:32.000Z
setup.py
lebedov/idisplay
26651a144ee4f648d7e9251ac06acd8b48e837ad
[ "BSD-3-Clause" ]
null
null
null
setup.py
lebedov/idisplay
26651a144ee4f648d7e9251ac06acd8b48e837ad
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python import os import re from setuptools import find_packages from setuptools import setup NAME = 'idisplay' VERSION = '0.1.2' AUTHOR = 'Lev Givon' AUTHOR_EMAIL = 'lev@columbia.edu' URL = 'https://github.com/lebedov/idisplay/' DESCRIPTION = 'IPython rich display magic functions' with open('README.rst', 'r') as f: LONG_DESCRIPTION = f.read() LONG_DESCRIPTION = re.search('.*(^Package Description.*)', LONG_DESCRIPTION, re.MULTILINE|re.DOTALL).group(1) DOWNLOAD_URL = URL LICENSE = 'BSD' CLASSIFIERS = [ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development'] PACKAGES = find_packages() if __name__ == "__main__": if os.path.exists('MANIFEST'): os.remove('MANIFEST') setup( name = NAME, version = VERSION, author = AUTHOR, author_email = AUTHOR_EMAIL, license = LICENSE, classifiers = CLASSIFIERS, description = DESCRIPTION, long_description = LONG_DESCRIPTION, url = URL, packages = find_packages(), install_requires = ['ipython>=1.0'])
29.891304
109
0.611636
import os import re from setuptools import find_packages from setuptools import setup NAME = 'idisplay' VERSION = '0.1.2' AUTHOR = 'Lev Givon' AUTHOR_EMAIL = 'lev@columbia.edu' URL = 'https://github.com/lebedov/idisplay/' DESCRIPTION = 'IPython rich display magic functions' with open('README.rst', 'r') as f: LONG_DESCRIPTION = f.read() LONG_DESCRIPTION = re.search('.*(^Package Description.*)', LONG_DESCRIPTION, re.MULTILINE|re.DOTALL).group(1) DOWNLOAD_URL = URL LICENSE = 'BSD' CLASSIFIERS = [ 'Development Status :: 3 - Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Software Development'] PACKAGES = find_packages() if __name__ == "__main__": if os.path.exists('MANIFEST'): os.remove('MANIFEST') setup( name = NAME, version = VERSION, author = AUTHOR, author_email = AUTHOR_EMAIL, license = LICENSE, classifiers = CLASSIFIERS, description = DESCRIPTION, long_description = LONG_DESCRIPTION, url = URL, packages = find_packages(), install_requires = ['ipython>=1.0'])
true
true
f71376ea631ab445975f601545e423a8d49ab6e7
264
py
Python
tests/document/__init__.py
orsinium-forks/mongoengine
abf530a506c815d77ac1a294e10c20e5e9a01733
[ "MIT" ]
1
2018-03-18T05:38:14.000Z
2018-03-18T05:38:14.000Z
tests/document/__init__.py
orsinium-forks/mongoengine
abf530a506c815d77ac1a294e10c20e5e9a01733
[ "MIT" ]
null
null
null
tests/document/__init__.py
orsinium-forks/mongoengine
abf530a506c815d77ac1a294e10c20e5e9a01733
[ "MIT" ]
1
2021-12-29T15:12:39.000Z
2021-12-29T15:12:39.000Z
import unittest from class_methods import * from delta import * from dynamic import * from indexes import * from inheritance import * from instance import * from json_serialisation import * from validation import * if __name__ == '__main__': unittest.main()
18.857143
32
0.768939
import unittest from class_methods import * from delta import * from dynamic import * from indexes import * from inheritance import * from instance import * from json_serialisation import * from validation import * if __name__ == '__main__': unittest.main()
true
true
f71377f611c9ce644e4ac20c79d79c7493d43929
3,333
py
Python
transitions/extensions/states.py
EnjoyLifeFund/py36pkgs
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
[ "MIT", "BSD-2-Clause", "BSD-3-Clause" ]
null
null
null
transitions/extensions/states.py
EnjoyLifeFund/py36pkgs
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
[ "MIT", "BSD-2-Clause", "BSD-3-Clause" ]
null
null
null
transitions/extensions/states.py
EnjoyLifeFund/py36pkgs
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
[ "MIT", "BSD-2-Clause", "BSD-3-Clause" ]
null
null
null
from threading import Timer from ..core import MachineError, listify import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class Tags(object): def __init__(self, *args, **kwargs): self.tags = kwargs.pop('tags', []) super(Tags, self).__init__(*args, **kwargs) def __getattr__(self, item): if item.startswith('is_'): return item[3:] in self.tags else: return super(Tags, self).__getattribute__(item) class Error(Tags): def __init__(self, *args, **kwargs): tags = kwargs.get('tags', []) accepted = kwargs.pop('accepted', False) if accepted: tags.append('accepted') kwargs['tags'] = tags super(Error, self).__init__(*args, **kwargs) def enter(self, event_data): if len(event_data.machine.get_triggers(self.name)) == 0 and not self.is_accepted: raise MachineError("Error state '{0}' reached!".format(self.name)) class Timeout(object): def __init__(self, *args, **kwargs): self.timeout = kwargs.pop('timeout', 0) self._on_timeout = None if self.timeout > 0: try: self.on_timeout = kwargs.pop('on_timeout') except KeyError: raise AttributeError("Timeout state requires 'on_timeout' when timeout is set.") self.runner = {} super(Timeout, self).__init__(*args, **kwargs) def enter(self, event_data): if self.timeout > 0: t = Timer(self.timeout, self._process_timeout, args=(event_data,)) t.start() self.runner[id(event_data.model)] = t super(Timeout, self).enter(event_data) def exit(self, event_data): t = self.runner.get(id(event_data.model), None) if t is not None and t.is_alive: t.cancel() super(Timeout, self).exit(event_data) def _process_timeout(self, event_data): logger.debug("%sTimeout state %s. Processing callbacks...", event_data.machine.id, self.name) for oe in self.on_timeout: event_data.machine._callback(oe, event_data) logger.info("%sTimeout state %s processed.", event_data.machine.id, self.name) @property def on_timeout(self): return self._on_timeout @on_timeout.setter def on_timeout(self, value): self._on_timeout = listify(value) class Volatile(object): def __init__(self, *args, **kwargs): self.volatile_cls = kwargs.pop('volatile', VolatileObject) self.volatile_hook = kwargs.pop('hook', 'scope') super(Volatile, self).__init__(*args, **kwargs) self.initialized = True def enter(self, event_data): setattr(event_data.model, self.volatile_hook, self.volatile_cls()) super(Volatile, self).enter(event_data) def exit(self, event_data): super(Volatile, self).exit(event_data) try: delattr(event_data.model, self.volatile_hook) except AttributeError: pass def add_state_features(*args): def class_decorator(cls): class CustomState(type('CustomMixins', args, {}), cls.state_cls): pass cls.state_cls = CustomState return cls return class_decorator class VolatileObject(object): pass
30.3
101
0.623762
from threading import Timer from ..core import MachineError, listify import logging logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) class Tags(object): def __init__(self, *args, **kwargs): self.tags = kwargs.pop('tags', []) super(Tags, self).__init__(*args, **kwargs) def __getattr__(self, item): if item.startswith('is_'): return item[3:] in self.tags else: return super(Tags, self).__getattribute__(item) class Error(Tags): def __init__(self, *args, **kwargs): tags = kwargs.get('tags', []) accepted = kwargs.pop('accepted', False) if accepted: tags.append('accepted') kwargs['tags'] = tags super(Error, self).__init__(*args, **kwargs) def enter(self, event_data): if len(event_data.machine.get_triggers(self.name)) == 0 and not self.is_accepted: raise MachineError("Error state '{0}' reached!".format(self.name)) class Timeout(object): def __init__(self, *args, **kwargs): self.timeout = kwargs.pop('timeout', 0) self._on_timeout = None if self.timeout > 0: try: self.on_timeout = kwargs.pop('on_timeout') except KeyError: raise AttributeError("Timeout state requires 'on_timeout' when timeout is set.") self.runner = {} super(Timeout, self).__init__(*args, **kwargs) def enter(self, event_data): if self.timeout > 0: t = Timer(self.timeout, self._process_timeout, args=(event_data,)) t.start() self.runner[id(event_data.model)] = t super(Timeout, self).enter(event_data) def exit(self, event_data): t = self.runner.get(id(event_data.model), None) if t is not None and t.is_alive: t.cancel() super(Timeout, self).exit(event_data) def _process_timeout(self, event_data): logger.debug("%sTimeout state %s. Processing callbacks...", event_data.machine.id, self.name) for oe in self.on_timeout: event_data.machine._callback(oe, event_data) logger.info("%sTimeout state %s processed.", event_data.machine.id, self.name) @property def on_timeout(self): return self._on_timeout @on_timeout.setter def on_timeout(self, value): self._on_timeout = listify(value) class Volatile(object): def __init__(self, *args, **kwargs): self.volatile_cls = kwargs.pop('volatile', VolatileObject) self.volatile_hook = kwargs.pop('hook', 'scope') super(Volatile, self).__init__(*args, **kwargs) self.initialized = True def enter(self, event_data): setattr(event_data.model, self.volatile_hook, self.volatile_cls()) super(Volatile, self).enter(event_data) def exit(self, event_data): super(Volatile, self).exit(event_data) try: delattr(event_data.model, self.volatile_hook) except AttributeError: pass def add_state_features(*args): def class_decorator(cls): class CustomState(type('CustomMixins', args, {}), cls.state_cls): pass cls.state_cls = CustomState return cls return class_decorator class VolatileObject(object): pass
true
true
f71378ceaaa8d777931ed7a7ad29b3a4672e1b0b
1,381
py
Python
h/views/errors.py
y3g0r/h
a057144956fe25e669aeba5d0f0eb38f9dc09566
[ "BSD-2-Clause" ]
null
null
null
h/views/errors.py
y3g0r/h
a057144956fe25e669aeba5d0f0eb38f9dc09566
[ "BSD-2-Clause" ]
null
null
null
h/views/errors.py
y3g0r/h
a057144956fe25e669aeba5d0f0eb38f9dc09566
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Application exception views. Views rendered by the web application in response to exceptions thrown within views. """ from pyramid.view import forbidden_view_config from pyramid.view import notfound_view_config from pyramid.view import view_config from h.i18n import TranslationString as _ # noqa: N813 from h.util.view import handle_exception, json_view @forbidden_view_config(renderer="h:templates/notfound.html.jinja2") @notfound_view_config(renderer="h:templates/notfound.html.jinja2", append_slash=True) def notfound(request): """Handle a request for an unknown/forbidden resource.""" request.response.status_int = 404 return {} @view_config( context=Exception, accept="text/html", renderer="h:templates/5xx.html.jinja2" ) def error(context, request): """Handle a request for which the handler threw an exception.""" handle_exception(request, exception=context) return {} @json_view(context=Exception) def json_error(context, request): """Handle an unexpected exception where the request asked for JSON.""" handle_exception(request, exception=context) message = _( "Hypothesis had a problem while handling this request. " "Our team has been notified. Please contact support@hypothes.is" " if the problem persists." ) return {"status": "failure", "reason": message}
30.688889
85
0.739319
from pyramid.view import forbidden_view_config from pyramid.view import notfound_view_config from pyramid.view import view_config from h.i18n import TranslationString as _ from h.util.view import handle_exception, json_view @forbidden_view_config(renderer="h:templates/notfound.html.jinja2") @notfound_view_config(renderer="h:templates/notfound.html.jinja2", append_slash=True) def notfound(request): request.response.status_int = 404 return {} @view_config( context=Exception, accept="text/html", renderer="h:templates/5xx.html.jinja2" ) def error(context, request): handle_exception(request, exception=context) return {} @json_view(context=Exception) def json_error(context, request): handle_exception(request, exception=context) message = _( "Hypothesis had a problem while handling this request. " "Our team has been notified. Please contact support@hypothes.is" " if the problem persists." ) return {"status": "failure", "reason": message}
true
true
f71378d9b4bbd2425f740fea28b4a8c207203bad
5,345
py
Python
docs/source/conf.py
jameshilliard/adbus
3b16f02d6cc5ff27b50f1f60b429710ecac7233b
[ "MIT" ]
null
null
null
docs/source/conf.py
jameshilliard/adbus
3b16f02d6cc5ff27b50f1f60b429710ecac7233b
[ "MIT" ]
null
null
null
docs/source/conf.py
jameshilliard/adbus
3b16f02d6cc5ff27b50f1f60b429710ecac7233b
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # python-adbus documentation build configuration file, created by # sphinx-quickstart on Mon Jul 24 13:33:19 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('../../')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx.ext.githubpages'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'python-adbus' copyright = '2017, CCX Technolgies' author = 'Charles Eidsness' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from adbus import __version__ version = '.'.join(__version__.split()[:-1]) # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', 'donate.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'python-adbusdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'python-adbus.tex', 'python-adbus Documentation', 'Charles Eidsness', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'python-adbus', 'python-adbus Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'python-adbus', 'python-adbus Documentation', author, 'python-adbus', 'One line description of project.', 'Miscellaneous'), ]
30.542857
79
0.682133
import os import sys sys.path.insert(0, os.path.abspath('../../')) extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx.ext.githubpages'] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = 'python-adbus' copyright = '2017, CCX Technolgies' author = 'Charles Eidsness' # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. from adbus import __version__ version = '.'.join(__version__.split()[:-1]) # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', # needs 'show_related': True theme option to display 'searchbox.html', 'donate.html', ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'python-adbusdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'python-adbus.tex', 'python-adbus Documentation', 'Charles Eidsness', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'python-adbus', 'python-adbus Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'python-adbus', 'python-adbus Documentation', author, 'python-adbus', 'One line description of project.', 'Miscellaneous'), ]
true
true
f7137a2801a5e44a8c74054d507cfe0d0fe72bf9
964
py
Python
project/urls.py
tinnguyentg/dictionaries-django-web-application
a28649fe8ce069cd0c78932e77bb7125ca3fa7de
[ "MIT" ]
null
null
null
project/urls.py
tinnguyentg/dictionaries-django-web-application
a28649fe8ce069cd0c78932e77bb7125ca3fa7de
[ "MIT" ]
null
null
null
project/urls.py
tinnguyentg/dictionaries-django-web-application
a28649fe8ce069cd0c78932e77bb7125ca3fa7de
[ "MIT" ]
null
null
null
"""practice URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/4.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings # from django.contrib import admin from django.urls import include, path urlpatterns = [ # path("admin/", admin.site.urls), path("", include("dictionary.urls")), ] if "debug_toolbar" in settings.INSTALLED_APPS: urlpatterns.insert(0, path("__debug__/", include("debug_toolbar.urls")))
34.428571
77
0.711618
from django.conf import settings from django.urls import include, path urlpatterns = [ path("", include("dictionary.urls")), ] if "debug_toolbar" in settings.INSTALLED_APPS: urlpatterns.insert(0, path("__debug__/", include("debug_toolbar.urls")))
true
true
f7137b0991a4259f401f051d901b7ce90d3aaed4
2,363
py
Python
setup.py
tsutterley/Spire-GNSS
26c9fb612ebfe872c18b2c2d884bfdf367c414e3
[ "MIT" ]
1
2021-10-02T00:51:09.000Z
2021-10-02T00:51:09.000Z
setup.py
tsutterley/Spire-GNSS
26c9fb612ebfe872c18b2c2d884bfdf367c414e3
[ "MIT" ]
null
null
null
setup.py
tsutterley/Spire-GNSS
26c9fb612ebfe872c18b2c2d884bfdf367c414e3
[ "MIT" ]
2
2021-12-09T03:08:07.000Z
2022-03-22T03:15:16.000Z
import os import sys import logging from setuptools import setup, find_packages logging.basicConfig(stream=sys.stderr, level=logging.INFO) log = logging.getLogger() # package description and keywords description = ('Python tools for obtaining and working with elevation data ' 'from Spire GNSS grazing angle altimetry') keywords = 'Spire GNSS, altimetry, grazing angle, surface elevation and change' # get long_description from README.rst with open("README.rst", "r") as fh: long_description = fh.read() long_description_content_type = "text/x-rst" # install requirements and dependencies on_rtd = os.environ.get('READTHEDOCS') == 'True' if on_rtd: install_requires = [] dependency_links = [] else: # get install requirements with open('requirements.txt') as fh: install_requires = [line.split().pop(0) for line in fh.read().splitlines()] dependency_links = [] # get version with open('version.txt') as fh: fallback_version = fh.read() # list of all scripts to be included with package scripts=[os.path.join('scripts',f) for f in os.listdir('scripts') if f.endswith('.py')] # semantic version configuration for setuptools-scm setup_requires = ["setuptools_scm"] use_scm_version = { "relative_to": __file__, "local_scheme": "node-and-date", "version_scheme": "python-simplified-semver", "fallback_version":fallback_version, } setup( name='spire-toolkit', description=description, long_description=long_description, long_description_content_type=long_description_content_type, url='https://github.com/tsutterley/Spire-GNSS', author='Tyler Sutterley', author_email='tsutterl@uw.edu', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Physics', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], keywords=keywords, packages=find_packages(), install_requires=install_requires, setup_requires=setup_requires, dependency_links=dependency_links, use_scm_version=use_scm_version, scripts=scripts, include_package_data=True, )
32.369863
87
0.706729
import os import sys import logging from setuptools import setup, find_packages logging.basicConfig(stream=sys.stderr, level=logging.INFO) log = logging.getLogger() description = ('Python tools for obtaining and working with elevation data ' 'from Spire GNSS grazing angle altimetry') keywords = 'Spire GNSS, altimetry, grazing angle, surface elevation and change' with open("README.rst", "r") as fh: long_description = fh.read() long_description_content_type = "text/x-rst" on_rtd = os.environ.get('READTHEDOCS') == 'True' if on_rtd: install_requires = [] dependency_links = [] else: with open('requirements.txt') as fh: install_requires = [line.split().pop(0) for line in fh.read().splitlines()] dependency_links = [] with open('version.txt') as fh: fallback_version = fh.read() scripts=[os.path.join('scripts',f) for f in os.listdir('scripts') if f.endswith('.py')] setup_requires = ["setuptools_scm"] use_scm_version = { "relative_to": __file__, "local_scheme": "node-and-date", "version_scheme": "python-simplified-semver", "fallback_version":fallback_version, } setup( name='spire-toolkit', description=description, long_description=long_description, long_description_content_type=long_description_content_type, url='https://github.com/tsutterley/Spire-GNSS', author='Tyler Sutterley', author_email='tsutterl@uw.edu', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'Topic :: Scientific/Engineering :: Physics', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], keywords=keywords, packages=find_packages(), install_requires=install_requires, setup_requires=setup_requires, dependency_links=dependency_links, use_scm_version=use_scm_version, scripts=scripts, include_package_data=True, )
true
true
f7137bf483b0b3a2a5ebf008a1189bbb0a344bf2
332
py
Python
Arrays/Easy/MoveNegative.py
dhruvilgandhi/DSA-Together-HacktoberFest
fda752e0622544c83e11f1caf1cc99f36792069e
[ "MIT" ]
16
2021-10-02T20:10:51.000Z
2022-03-06T10:31:11.000Z
Arrays/Easy/MoveNegative.py
dhruvilgandhi/DSA-Together-HacktoberFest
fda752e0622544c83e11f1caf1cc99f36792069e
[ "MIT" ]
55
2021-10-02T07:31:41.000Z
2021-10-30T06:19:26.000Z
Arrays/Easy/MoveNegative.py
dhruvilgandhi/DSA-Together-HacktoberFest
fda752e0622544c83e11f1caf1cc99f36792069e
[ "MIT" ]
36
2021-10-02T18:00:08.000Z
2022-01-03T18:50:35.000Z
def rearrange (arr , n ): j = 0 for i in range(0 , n) : if(arr[i] < 0): temp = arr[i] arr[i] = arr[j] arr[j] = temp j = j + 1 print(arr) #Driver code sequence = [1 , 3, - 6 , 9 , -3 , -1] length = len(sequence) print(sequence.sort) rearrange(sequence , length)
23.714286
37
0.463855
def rearrange (arr , n ): j = 0 for i in range(0 , n) : if(arr[i] < 0): temp = arr[i] arr[i] = arr[j] arr[j] = temp j = j + 1 print(arr) sequence = [1 , 3, - 6 , 9 , -3 , -1] length = len(sequence) print(sequence.sort) rearrange(sequence , length)
true
true
f7137c5d8b25241584c064217536cf2234217efc
717
py
Python
score/jaccard_index.py
GuangmingZhu/ContinuousGR
2b35fe673405744a694b90f1e89943846b3e1de1
[ "MIT" ]
18
2018-08-23T13:38:29.000Z
2022-02-24T09:19:14.000Z
score/jaccard_index.py
NIDONGDEA/ContinuousGR
2b35fe673405744a694b90f1e89943846b3e1de1
[ "MIT" ]
1
2021-09-16T10:20:18.000Z
2021-09-16T10:20:18.000Z
score/jaccard_index.py
NIDONGDEA/ContinuousGR
2b35fe673405744a694b90f1e89943846b3e1de1
[ "MIT" ]
2
2019-10-21T23:12:28.000Z
2022-02-28T01:53:48.000Z
def Jsi(gts, ps, label): g_set = set() p_set = set() for seg in gts: seg_points, g_l = seg s, e = seg_points if g_l == label: g_set.update(range(s, e + 1, 1)) for seg in ps: seg_points, p_l = seg s, e = seg_points if p_l == label: p_set.update(range(s, e + 1, 1)) inter_set = g_set & p_set union_set = g_set | p_set inter_v = len(inter_set) union_v = len(union_set) if union_v == 0: jsi = 0 else: jsi = float(inter_v) / float(union_v) # if jsi > 0.6: # return 1. # elif jsi < 0.2: # return 0. # else: # return jsi return jsi
24.724138
45
0.476987
def Jsi(gts, ps, label): g_set = set() p_set = set() for seg in gts: seg_points, g_l = seg s, e = seg_points if g_l == label: g_set.update(range(s, e + 1, 1)) for seg in ps: seg_points, p_l = seg s, e = seg_points if p_l == label: p_set.update(range(s, e + 1, 1)) inter_set = g_set & p_set union_set = g_set | p_set inter_v = len(inter_set) union_v = len(union_set) if union_v == 0: jsi = 0 else: jsi = float(inter_v) / float(union_v) return jsi
true
true
f7137caf42bf2e7509a5bf266ba34911e95b5fbc
4,637
py
Python
isi_sdk_8_2_1/isi_sdk_8_2_1/models/mapping_users_rules_rule_options_default_user.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
24
2018-06-22T14:13:23.000Z
2022-03-23T01:21:26.000Z
isi_sdk_8_2_1/isi_sdk_8_2_1/models/mapping_users_rules_rule_options_default_user.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
46
2018-04-30T13:28:22.000Z
2022-03-21T21:11:07.000Z
isi_sdk_8_2_1/isi_sdk_8_2_1/models/mapping_users_rules_rule_options_default_user.py
mohitjain97/isilon_sdk_python
a371f438f542568edb8cda35e929e6b300b1177c
[ "Unlicense" ]
29
2018-06-19T00:14:04.000Z
2022-02-08T17:51:19.000Z
# coding: utf-8 """ Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 8 Contact: sdk@isilon.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from isi_sdk_8_2_1.models.mapping_users_rules_rule_user2 import MappingUsersRulesRuleUser2 # noqa: F401,E501 class MappingUsersRulesRuleOptionsDefaultUser(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'domain': 'str', 'user': 'str' } attribute_map = { 'domain': 'domain', 'user': 'user' } def __init__(self, domain=None, user=None): # noqa: E501 """MappingUsersRulesRuleOptionsDefaultUser - a model defined in Swagger""" # noqa: E501 self._domain = None self._user = None self.discriminator = None if domain is not None: self.domain = domain self.user = user @property def domain(self): """Gets the domain of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501 :return: The domain of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501 :rtype: str """ return self._domain @domain.setter def domain(self, domain): """Sets the domain of this MappingUsersRulesRuleOptionsDefaultUser. :param domain: The domain of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501 :type: str """ if domain is not None and len(domain) > 255: raise ValueError("Invalid value for `domain`, length must be less than or equal to `255`") # noqa: E501 if domain is not None and len(domain) < 0: raise ValueError("Invalid value for `domain`, length must be greater than or equal to `0`") # noqa: E501 self._domain = domain @property def user(self): """Gets the user of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501 :return: The user of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501 :rtype: str """ return self._user @user.setter def user(self, user): """Sets the user of this MappingUsersRulesRuleOptionsDefaultUser. :param user: The user of this MappingUsersRulesRuleOptionsDefaultUser. # noqa: E501 :type: str """ if user is None: raise ValueError("Invalid value for `user`, must not be `None`") # noqa: E501 if user is not None and len(user) > 255: raise ValueError("Invalid value for `user`, length must be less than or equal to `255`") # noqa: E501 if user is not None and len(user) < 0: raise ValueError("Invalid value for `user`, length must be greater than or equal to `0`") # noqa: E501 self._user = user def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, MappingUsersRulesRuleOptionsDefaultUser): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
30.913333
117
0.593918
import pprint import re import six from isi_sdk_8_2_1.models.mapping_users_rules_rule_user2 import MappingUsersRulesRuleUser2 class MappingUsersRulesRuleOptionsDefaultUser(object): swagger_types = { 'domain': 'str', 'user': 'str' } attribute_map = { 'domain': 'domain', 'user': 'user' } def __init__(self, domain=None, user=None): self._domain = None self._user = None self.discriminator = None if domain is not None: self.domain = domain self.user = user @property def domain(self): return self._domain @domain.setter def domain(self, domain): if domain is not None and len(domain) > 255: raise ValueError("Invalid value for `domain`, length must be less than or equal to `255`") if domain is not None and len(domain) < 0: raise ValueError("Invalid value for `domain`, length must be greater than or equal to `0`") self._domain = domain @property def user(self): return self._user @user.setter def user(self, user): if user is None: raise ValueError("Invalid value for `user`, must not be `None`") if user is not None and len(user) > 255: raise ValueError("Invalid value for `user`, length must be less than or equal to `255`") if user is not None and len(user) < 0: raise ValueError("Invalid value for `user`, length must be greater than or equal to `0`") self._user = user def to_dict(self): result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pprint.pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, MappingUsersRulesRuleOptionsDefaultUser): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f7137d431c55a98733348c0c5c24522a29f62806
2,726
py
Python
basis/cli/commands/upload.py
kvh/dags
4d64e480b68e23cb586516d7ea0ad03bf93fb12d
[ "BSD-3-Clause" ]
null
null
null
basis/cli/commands/upload.py
kvh/dags
4d64e480b68e23cb586516d7ea0ad03bf93fb12d
[ "BSD-3-Clause" ]
null
null
null
basis/cli/commands/upload.py
kvh/dags
4d64e480b68e23cb586516d7ea0ad03bf93fb12d
[ "BSD-3-Clause" ]
null
null
null
from pathlib import Path from typer import Option, Argument from basis.cli.services.deploy import deploy_graph_version from basis.cli.services.graph_components import create_graph_component from basis.cli.services.lookup import IdLookup from basis.cli.services.output import sprint, abort_on_error from basis.cli.services.upload import upload_graph_version _graph_help = "The location of the graph.yml file for the graph to upload" _deploy_help = "Whether or not to automatically deploy the graph after upload" _organization_help = "The name of the Basis organization to upload to" _environment_help = "The name of the Basis environment to use if deploying the graph" _component_help = "After uploading, publish the graph version as a public component" def upload( deploy: bool = Option(True, "--deploy/--no-deploy", help=_deploy_help), organization: str = Option("", "-o", "--organization", help=_organization_help), environment: str = Option("", "-e", "--environment", help=_environment_help), graph: Path = Argument(None, exists=True, help=_graph_help), publish_component: bool = Option(False, help=_component_help), ): """Upload a new version of a graph to Basis""" ids = IdLookup( environment_name=environment, organization_name=organization, explicit_graph_path=graph, ) with abort_on_error("Upload failed"): resp = upload_graph_version( ids.graph_file_path, ids.organization_id, add_missing_node_ids=not publish_component, ) graph_version_id = resp["uid"] ui_url = resp["ui_url"] manifest = resp["manifest"] sprint(f"\n[success]Uploaded new graph version with id [b]{graph_version_id}") if manifest.get("errors"): sprint(f"[error]Graph contains the following errors:") for error in manifest["errors"]: sprint(f"\t[error]{error}") if publish_component: with abort_on_error("Error creating component"): resp = create_graph_component(graph_version_id) resp_org = resp["organization"]["slug"] resp_versions = resp["version_names"] resp_component = resp["component"]["slug"] resp_id = resp["uid"] sprint( f"[success]Published graph component " f"[b]{resp_org}/{resp_component}[/b] " f"with versions [b]{resp_versions}[/b] " f"at id [b]{resp_id}" ) elif deploy: with abort_on_error("Deploy failed"): deploy_graph_version(graph_version_id, ids.environment_id) sprint(f"[success]Graph deployed") sprint(f"\n[info]Visit [code]{ui_url}[/code] to view your graph")
40.686567
85
0.675715
from pathlib import Path from typer import Option, Argument from basis.cli.services.deploy import deploy_graph_version from basis.cli.services.graph_components import create_graph_component from basis.cli.services.lookup import IdLookup from basis.cli.services.output import sprint, abort_on_error from basis.cli.services.upload import upload_graph_version _graph_help = "The location of the graph.yml file for the graph to upload" _deploy_help = "Whether or not to automatically deploy the graph after upload" _organization_help = "The name of the Basis organization to upload to" _environment_help = "The name of the Basis environment to use if deploying the graph" _component_help = "After uploading, publish the graph version as a public component" def upload( deploy: bool = Option(True, "--deploy/--no-deploy", help=_deploy_help), organization: str = Option("", "-o", "--organization", help=_organization_help), environment: str = Option("", "-e", "--environment", help=_environment_help), graph: Path = Argument(None, exists=True, help=_graph_help), publish_component: bool = Option(False, help=_component_help), ): ids = IdLookup( environment_name=environment, organization_name=organization, explicit_graph_path=graph, ) with abort_on_error("Upload failed"): resp = upload_graph_version( ids.graph_file_path, ids.organization_id, add_missing_node_ids=not publish_component, ) graph_version_id = resp["uid"] ui_url = resp["ui_url"] manifest = resp["manifest"] sprint(f"\n[success]Uploaded new graph version with id [b]{graph_version_id}") if manifest.get("errors"): sprint(f"[error]Graph contains the following errors:") for error in manifest["errors"]: sprint(f"\t[error]{error}") if publish_component: with abort_on_error("Error creating component"): resp = create_graph_component(graph_version_id) resp_org = resp["organization"]["slug"] resp_versions = resp["version_names"] resp_component = resp["component"]["slug"] resp_id = resp["uid"] sprint( f"[success]Published graph component " f"[b]{resp_org}/{resp_component}[/b] " f"with versions [b]{resp_versions}[/b] " f"at id [b]{resp_id}" ) elif deploy: with abort_on_error("Deploy failed"): deploy_graph_version(graph_version_id, ids.environment_id) sprint(f"[success]Graph deployed") sprint(f"\n[info]Visit [code]{ui_url}[/code] to view your graph")
true
true
f7137df1d05ef90e02779ddb044f006b3d28e341
93,666
py
Python
cinder/tests/unit/backup/test_backup.py
inspur-storage/cinder
c41a2f05c3e01133746ca2cb4eaa480cc581cbdf
[ "Apache-2.0" ]
1
2019-01-22T05:37:38.000Z
2019-01-22T05:37:38.000Z
cinder/tests/unit/backup/test_backup.py
inspur-storage/cinder
c41a2f05c3e01133746ca2cb4eaa480cc581cbdf
[ "Apache-2.0" ]
null
null
null
cinder/tests/unit/backup/test_backup.py
inspur-storage/cinder
c41a2f05c3e01133746ca2cb4eaa480cc581cbdf
[ "Apache-2.0" ]
null
null
null
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Backup code.""" import copy import ddt import os import uuid import mock from os_brick.initiator.connectors import fake as fake_connectors from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import importutils from oslo_utils import timeutils import cinder from cinder.backup import api from cinder.backup import manager from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import test from cinder.tests import fake_driver from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils from cinder.volume import rpcapi as volume_rpcapi CONF = cfg.CONF class FakeBackupException(Exception): pass class BaseBackupTest(test.TestCase): def setUp(self): super(BaseBackupTest, self).setUp() self.backup_mgr = importutils.import_object(CONF.backup_manager) self.backup_mgr.host = 'testhost' self.backup_mgr.is_initialized = True self.ctxt = context.get_admin_context() paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot', 'cinder.volume.rpcapi.VolumeAPI.delete_volume', 'cinder.volume.rpcapi.VolumeAPI.detach_volume', 'cinder.volume.rpcapi.VolumeAPI.' 'secure_file_operations_enabled'] self.volume_patches = {} self.volume_mocks = {} for path in paths: name = path.split('.')[-1] self.volume_patches[name] = mock.patch(path) self.volume_mocks[name] = self.volume_patches[name].start() self.addCleanup(self.volume_patches[name].stop) def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()), restore_volume_id=None, display_name='test_backup', display_description='this is a test backup', container='volumebackups', status=fields.BackupStatus.CREATING, size=1, object_count=0, project_id=str(uuid.uuid4()), service=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, metadata=None, parent_id=None, encryption_key_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['restore_volume_id'] = restore_volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['host'] = 'testhost' kwargs['availability_zone'] = '1' kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['container'] = container kwargs['status'] = status kwargs['fail_reason'] = '' kwargs['service'] = service or CONF.backup_driver kwargs['snapshot_id'] = snapshot_id kwargs['parent_id'] = parent_id kwargs['size'] = size kwargs['object_count'] = object_count kwargs['temp_volume_id'] = temp_volume_id kwargs['temp_snapshot_id'] = temp_snapshot_id kwargs['metadata'] = metadata or {} kwargs['encryption_key_id'] = encryption_key_id backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1, host='testhost', encryption_key_id=None): """Create a volume entry in the DB. Return the entry ID """ vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = str(uuid.uuid4()) vol['project_id'] = str(uuid.uuid4()) vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = fields.VolumeAttachStatus.DETACHED vol['availability_zone'] = '1' vol['previous_status'] = previous_status vol['encryption_key_id'] = encryption_key_id volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume.id def _create_snapshot_db_entry(self, display_name='test_snapshot', display_description='test snapshot', status=fields.SnapshotStatus.AVAILABLE, size=1, volume_id=str(uuid.uuid4()), provider_location=None): """Create a snapshot entry in the DB. Return the entry ID. """ kwargs = {} kwargs['size'] = size kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = str(uuid.uuid4()) kwargs['status'] = status kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['volume_id'] = volume_id kwargs['cgsnapshot_id'] = None kwargs['volume_size'] = size kwargs['metadata'] = {} kwargs['provider_location'] = provider_location snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs) snapshot_obj.create() return snapshot_obj def _create_volume_attach(self, volume_id): values = {'volume_id': volume_id, 'attach_status': fields.VolumeAttachStatus.ATTACHED, } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', '/dev/vd0') def _create_exported_record_entry(self, vol_size=1, exported_id=None): """Create backup metadata export entry.""" vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) if exported_id is not None: backup.id = exported_id export = self.backup_mgr.export_record(self.ctxt, backup) return export def _create_export_record_db_entry(self, volume_id=str(uuid.uuid4()), status=fields.BackupStatus.CREATING, project_id=str(uuid.uuid4()), backup_id=None): """Create a backup entry in the DB. Return the entry ID """ kwargs = {} kwargs['volume_id'] = volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['status'] = status if backup_id: kwargs['id'] = backup_id backup = objects.BackupImport(context=self.ctxt, **kwargs) backup.create() return backup @ddt.ddt class BackupTestCase(BaseBackupTest): """Test Case for backups.""" @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'set_initialized') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'do_setup') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'check_for_setup_error') @mock.patch('cinder.context.get_admin_context') def test_init_host(self, mock_get_admin_context, mock_check, mock_setup, mock_set_initialized): """Test stuck volumes and backups. Make sure stuck volumes and backups are reset to correct states when backup_manager.init_host() is called """ def get_admin_context(): return self.ctxt self.override_config('backup_service_inithost_offload', False) self.override_config('periodic_interval', 0) vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) vol3_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol3_id, {'status': 'available'}) vol4_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) temp_vol_id = self._create_volume_db_entry() db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) vol5_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'}) temp_snap = self._create_snapshot_db_entry() temp_snap.status = fields.SnapshotStatus.AVAILABLE temp_snap.save() backup1 = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol2_id) backup3 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol3_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol4_id, temp_volume_id=temp_vol_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol5_id, temp_snapshot_id=temp_snap.id) mock_get_admin_context.side_effect = get_admin_context self.volume = importutils.import_object(CONF.volume_manager) self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual('available', vol1['status']) vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual('error_restoring', vol2['status']) vol3 = db.volume_get(self.ctxt, vol3_id) self.assertEqual('available', vol3['status']) vol4 = db.volume_get(self.ctxt, vol4_id) self.assertEqual('available', vol4['status']) vol5 = db.volume_get(self.ctxt, vol5_id) self.assertEqual('available', vol5['status']) backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual(fields.BackupStatus.ERROR, backup1['status']) backup2 = db.backup_get(self.ctxt, backup2.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status']) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3.id) temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id) self.volume_mocks['delete_volume'].assert_called_once_with( self.ctxt, temp_vol) self.assertTrue(self.volume_mocks['detach_volume'].called) @mock.patch('cinder.objects.backup.BackupList.get_all_by_host') @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') def test_init_host_with_service_inithost_offload(self, mock_add_threadpool, mock_get_all_by_host): vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'available'}) backup1 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol1_id) vol2_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol2_id, {'status': 'available'}) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol2_id) mock_get_all_by_host.return_value = [backup1, backup2] self.backup_mgr.init_host() calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1), mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)] mock_add_threadpool.assert_has_calls(calls, any_order=True) self.assertEqual(2, mock_add_threadpool.call_count) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3', 'cinder-volume': '1.7'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-backup': '1.2', 'cinder-volume': '1.4'}) def test_reset(self, get_min_obj, get_min_rpc): get_min_obj.return_value = 'liberty' backup_mgr = manager.BackupManager() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual('1.3', backup_rpcapi.client.version_cap) self.assertEqual('1.2', backup_rpcapi.client.serializer._base.version_cap) self.assertEqual('1.7', volume_rpcapi.client.version_cap) self.assertEqual('1.4', volume_rpcapi.client.serializer._base.version_cap) get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() backup_mgr.reset() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual(get_min_rpc.return_value, backup_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, backup_rpcapi.client.serializer._base.version_cap) self.assertIsNone(backup_rpcapi.client.serializer._base.manifest) self.assertEqual(get_min_rpc.return_value, volume_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, volume_rpcapi.client.serializer._base.version_cap) self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) @ddt.data(True, False) def test_is_working(self, initialized): self.backup_mgr.is_initialized = initialized self.assertEqual(initialized, self.backup_mgr.is_working()) def test_cleanup_incomplete_backup_operations_with_exceptions(self): """Test cleanup resilience in the face of exceptions.""" fake_backup_list = [{'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}] mock_backup_get_by_host = self.mock_object( objects.BackupList, 'get_all_by_host') mock_backup_get_by_host.return_value = fake_backup_list mock_backup_cleanup = self.mock_object( self.backup_mgr, '_cleanup_one_backup') mock_backup_cleanup.side_effect = [Exception] mock_temp_cleanup = self.mock_object( self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup') mock_temp_cleanup.side_effect = [Exception] self.assertIsNone( self.backup_mgr._cleanup_incomplete_backup_operations( self.ctxt)) self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count) self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count) def test_cleanup_one_backing_up_volume(self): """Test cleanup_one_volume for volume status 'backing-up'.""" volume_id = self._create_volume_db_entry(status='backing-up', previous_status='available') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('available', volume['status']) def test_cleanup_one_restoring_backup_volume(self): """Test cleanup_one_volume for volume status 'restoring-backup'.""" volume_id = self._create_volume_db_entry(status='restoring-backup') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('error_restoring', volume['status']) def test_cleanup_one_creating_backup(self): """Test cleanup_one_backup for volume status 'creating'.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.ERROR, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('available', volume.status) def test_cleanup_one_restoring_backup(self): """Test cleanup_one_backup for volume status 'restoring'.""" vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('error_restoring', volume.status) def test_cleanup_one_deleting_backup(self): """Test cleanup_one_backup for backup status 'deleting'.""" self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) def test_cleanup_one_deleting_encrypted_backup(self): """Test cleanup of backup status 'deleting' (encrypted).""" self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, encryption_key_id=fake.ENCRYPTION_KEY_ID) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertIsNotNone(backup) self.assertEqual(fields.BackupStatus.ERROR_DELETING, backup.status) def test_detach_all_attachments_handles_exceptions(self): """Test detach_all_attachments with exceptions.""" mock_log = self.mock_object(manager, 'LOG') self.volume_mocks['detach_volume'].side_effect = [Exception] fake_attachments = [ { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, }, { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, } ] fake_volume = { 'id': str(uuid.uuid4()), 'volume_attachment': fake_attachments } self.backup_mgr._detach_all_attachments(self.ctxt, fake_volume) self.assertEqual(len(fake_attachments), mock_log.exception.call_count) @ddt.data(KeyError, exception.VolumeNotFound) def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found( self, err): """Ensure we handle missing volume for a backup.""" mock_volume_get = self.mock_object(db, 'volume_get') mock_volume_get.side_effect = [err] backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) def test_cleanup_temp_snapshot_for_one_backup_not_found(self): """Ensure we handle missing temp snapshot for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry( status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_snapshot_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_snapshot'].called) self.assertIsNone(backup.temp_snapshot_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_cleanup_temp_volume_for_one_backup_not_found(self): """Ensure we handle missing temp volume for a backup.""" vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_volume_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_volume'].called) self.assertIsNone(backup.temp_volume_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_create_backup_with_bad_volume_status(self): """Test creating a backup from a volume with a bad status.""" vol_id = self._create_volume_db_entry(status='restoring', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_bad_backup_status(self): """Test creating a backup with a backup with a bad status.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_error(self): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup') mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4())) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('error_backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.backup.manager.BackupManager._run_backup', side_effect=FakeBackupException(str(uuid.uuid4()))) def test_create_backup_with_snapshot_error(self, mock_run_backup): """Test error handling when error occurs during backup creation.""" vol_id = self._create_volume_db_entry(size=1) snapshot = self._create_snapshot_db_entry(status='backing-up', volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) snapshot.refresh() self.assertEqual('available', snapshot.status) backup.refresh() self.assertEqual(fields.BackupStatus.ERROR, backup.status) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): """Test normal backup creation.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) backup_device_dict = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False, } mock_get_backup_device.return_value = ( objects.BackupDeviceInfo.from_primitive(backup_device_dict, self.ctxt, ['admin_metadata', 'metadata'])) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) mock_temporary_chown.assert_called_once_with('/dev/null') mock_attach_device.assert_called_once_with(self.ctxt, vol, properties, False) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with() mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, False, force=True, ignore_errors=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id_to_none(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id, parent_id='mock') with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.return_value = ( {'parent_id': None}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertIsNone(backup.parent_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) parent_backup = self._create_backup_db_entry(size=vol_size) with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.return_value = ( {'parent_id': parent_backup.id}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertEqual(parent_backup.id, backup.parent_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_fail_with_excep(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.side_effect = ( FakeBackupException('fake')) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol.status) self.assertEqual('error_backing-up', vol.previous_status) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup.status) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_run_backup_with_dir_device_path(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): backup_service = lambda: None backup_service.backup = mock.Mock( return_value=mock.sentinel.backup_update) self.backup_mgr.get_backup_driver = lambda x: backup_service vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) volume = objects.Volume.get_by_id(self.ctxt, vol_id) # device_path is represented by a directory device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} self.backup_mgr._attach_device = mock.Mock( return_value=attach_info) self.backup_mgr._detach_device = mock.Mock() output = self.backup_mgr._run_backup(self.ctxt, backup, volume) mock_chown.assert_not_called() mock_open.assert_not_called() backup_service.backup.assert_called_once_with( backup, device_path) self.assertEqual(mock.sentinel.backup_update, output) @mock.patch('cinder.backup.manager.BackupManager._run_backup') @ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'), (fields.SnapshotStatus.BACKING_UP, 'in-use'), (fields.SnapshotStatus.AVAILABLE, 'available'), (fields.SnapshotStatus.AVAILABLE, 'in-use')) @ddt.unpack def test_create_backup_with_snapshot(self, snapshot_status, volume_status, mock_run_backup): vol_id = self._create_volume_db_entry(status=volume_status) snapshot = self._create_snapshot_db_entry(volume_id=vol_id, status=snapshot_status) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) if snapshot_status == fields.SnapshotStatus.BACKING_UP: self.backup_mgr.create_backup(self.ctxt, backup) vol = objects.Volume.get_by_id(self.ctxt, vol_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(volume_status, vol.status) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) else: self.assertRaises(exception.InvalidSnapshot, self.backup_mgr.create_backup, self.ctxt, backup) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup_with_temp_snapshot(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): """Test backup in-use volume using temp snapshot.""" self.override_config('backup_use_same_host', True) vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') backup = self._create_backup_db_entry(volume_id=vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_get_backup_device.return_value = ( objects.BackupDeviceInfo.from_primitive({ 'backup_device': snap, 'secure_enabled': False, 'is_snapshot': True, }, self.ctxt, expected_attrs=['metadata'])) attach_info = { 'device': {'path': '/dev/null'}, 'conn': {'data': {}}, 'connector': fake_connectors.FakeConnector(None)} mock_terminate_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'terminate_connection_snapshot') mock_initialize_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'initialize_connection_snapshot') mock_connect_device = self.mock_object( manager.BackupManager, '_connect_device') mock_connect_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) mock_temporary_chown.assert_called_once_with('/dev/null') mock_initialize_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with() mock_terminate_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties, force=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('in-use', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_snapshot') def test_create_temp_snapshot(self, mock_create_snapshot): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_snapshot.return_value = {'provider_id': 'fake_provider_id'} temp_snap = volume_manager.driver._create_temp_snapshot( self.ctxt, vol) self.assertEqual('available', temp_snap['status']) self.assertEqual('fake_provider_id', temp_snap['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_cloned_volume') def test_create_temp_cloned_volume(self, mock_create_cloned_volume): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_cloned_volume.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_cloned_volume( self.ctxt, vol) self.assertEqual('available', temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_volume_from_snapshot') def test_create_temp_volume_from_snapshot(self, mock_create_vol_from_snap): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) mock_create_vol_from_snap.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_volume_from_snapshot( self.ctxt, vol, snap) self.assertEqual('available', temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_create_backup_with_notify(self, notify): """Test normal backup creation with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_run_backup') self.backup_mgr.create_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.utils.brick_get_connector_properties') def test_create_backup_encrypted_volume(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): """Test backup of encrypted volume. Test whether the volume's encryption key ID is cloned and saved in the backup. """ vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID2 self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.utils.brick_get_connector_properties') def test_create_backup_encrypted_volume_again(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): """Test backup of encrypted volume. Test when the backup already has a clone of the volume's encryption key ID. """ vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_not_called() def test_restore_backup_with_bad_volume_status(self): """Test error handling. Test error handling when restoring a backup to a volume with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) backup = db.backup_get(self.ctxt, backup.id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_restore_backup_with_bad_backup_status(self): """Test error handling. Test error handling when restoring a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_restore_backup_with_driver_error(self): """Test error handling when an error occurs during backup restore.""" vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') mock_run_restore.side_effect = FakeBackupException('fake') self.assertRaises(FakeBackupException, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertTrue(mock_run_restore.called) def test_restore_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a restore of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_restore_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_conn): """Test normal backup restoration.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'wb') mock_secure_enabled = ( self.volume_mocks['secure_file_operations_enabled']) mock_secure_enabled.return_value = False vol = objects.Volume.get_by_id(self.ctxt, vol_id) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) mock_temporary_chown.assert_called_once_with('/dev/null') mock_get_conn.assert_called_once_with() mock_secure_enabled.assert_called_once_with(self.ctxt, vol) mock_attach_device.assert_called_once_with(self.ctxt, vol, properties) mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, force=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_restore_backup_with_notify(self, notify): """Test normal backup restoration with notifications.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) self.backup_mgr._run_restore = mock.Mock() self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) self.assertEqual(2, notify.call_count) @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test restoring a volume from its own backup. In this situation, the volume's encryption key ID shouldn't change. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID1, volume.encryption_key_id) mock_clone_encryption_key.assert_not_called() mock_delete_encryption_key.assert_not_called() @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_new_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test handling of encryption key IDs when retoring to another encrypted volume, i.e. a volume whose key ID is different from the volume originally backed up. - The volume's prior encryption key ID is deleted. - The volume is assigned a fresh clone of the backup's encryption key ID. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 # Mimic the driver's side effect where it updates the volume's # metadata. For backups of encrypted volumes, this will essentially # overwrite the volume's encryption key ID prior to the restore. def restore_side_effect(backup, volume_id, volume_file): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) # Backup's encryption key ID should have been cloned mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should not have changed backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_glean_key_id(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): """Test restore of encrypted volume. Test restoring a backup that was created prior to when the encryption key ID is saved in the backup DB. The backup encryption key ID is gleaned from the restored volume. """ vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 # Mimic the driver's side effect where it updates the volume's # metadata. For backups of encrypted volumes, this will essentially # overwrite the volume's encryption key ID prior to the restore. def restore_side_effect(backup, volume_id, volume_file): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) # Backup's encryption key ID should have been cloned from # the value restored from the metadata. mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID4) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should have been gleaned from value restored # from the backup's metadata backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID4, backup.encryption_key_id) def test_delete_backup_with_bad_backup_status(self): """Test error handling. Test error handling when deleting a backup with a backup with a bad status. """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_error(self): """Test error handling when an error occurs during backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, display_name='fail_on_delete', volume_id=vol_id) self.assertRaises(IOError, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_bad_service(self): """Test error handling. Test error handling when attempting a delete of a backup with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_no_service(self): """Test error handling. Test error handling when attempting a delete of a backup with no service defined for that backup, relates to bug #1162908 """ vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) backup.service = None backup.save() self.backup_mgr.delete_backup(self.ctxt, backup) @ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService', 'cinder.tests.unit.backup.fake_service') def test_delete_backup(self, service): """Test normal backup deletion.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at) self.assertEqual(fields.BackupStatus.DELETED, backup.status) @mock.patch('cinder.volume.utils.delete_encryption_key') def test_delete_backup_of_encrypted_volume(self, mock_delete_encryption_key): """Test deletion of backup of encrypted volume""" vol_id = self._create_volume_db_entry( encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.DELETING, encryption_key_id=fake.UUID2) self.backup_mgr.delete_backup(self.ctxt, backup) mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_delete_backup_with_notify(self, notify): """Test normal backup deletion with notifications.""" vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) def test_list_backup(self): project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) self._create_backup_db_entry() b2 = self._create_backup_db_entry(project_id=project_id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(b2.id, backups[0].id) def test_backup_get_all_by_project_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes'. """ project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry(project_id=project_id) backup = self._create_backup_db_entry(project_id=project_id) db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id) self.assertEqual(2, len(backups)) def test_backup_get_all_by_host_with_deleted(self): """Test deleted backups. Test deleted backups don't show up in backup_get_all_by_project. Unless context.read_deleted is 'yes' """ backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry() backup = self._create_backup_db_entry() db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') self.assertEqual(2, len(backups)) def test_backup_manager_driver_name(self): """Test mapping between backup services and backup drivers.""" self.override_config('backup_driver', "cinder.backup.services.swift") backup_mgr = \ importutils.import_object(CONF.backup_manager) self.assertEqual('cinder.backup.drivers.swift', backup_mgr.driver_name) def test_export_record_with_bad_service(self): """Test error handling. Test error handling when attempting an export of a backup record with a different service to that used to create the backup. """ vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) def test_export_record_with_bad_backup_status(self): """Test error handling. Test error handling when exporting a backup record with a backup with a bad status. """ vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) @ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService', 'cinder.tests.unit.backup.fake_service') def test_export_record(self, service): """Test normal backup record export.""" vol_size = 1 vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) export = self.backup_mgr.export_record(self.ctxt, backup) self.assertEqual(service, export['backup_service']) self.assertIn('backup_url', export) def test_import_record_with_verify_not_implemented(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry(vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_wrong_id(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver does not support verify. """ vol_size = 1 export = self._create_exported_record_entry(vol_size=vol_size) imported_record = self._create_export_record_db_entry() backup_hosts = [] self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) def test_import_record_with_bad_service(self): """Test error handling. Test error handling when attempting an import of a backup record with a different service to that used to create the backup. """ export = self._create_exported_record_entry() export['backup_service'] = 'cinder.tests.unit.backup.bad_service' imported_record = self._create_export_record_db_entry() # Test the case where the additional hosts list is empty backup_hosts = [] self.assertRaises(exception.ServiceNotFound, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) # Test that the import backup keeps calling other hosts to find a # suitable host for the backup service backup_hosts = ['fake1', 'fake2'] backup_hosts_expect = list(backup_hosts) BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record' with mock.patch(BackupAPI_import) as _mock_backup_import: self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) next_host = backup_hosts_expect.pop() _mock_backup_import.assert_called_once_with( self.ctxt, next_host, imported_record, export['backup_service'], export['backup_url'], backup_hosts_expect) def test_import_record_with_invalid_backup(self): """Test error handling. Test error handling when attempting an import of a backup record where the backup driver returns an exception. """ export = self._create_exported_record_entry() backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_record_import_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'import_record')) imported_record = self._create_export_record_db_entry() backup_hosts = [] with mock.patch(_mock_record_import_class) as _mock_record_import: _mock_record_import.side_effect = FakeBackupException('fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_import.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_not_supported_driver_to_force_delete(self): """Test force delete check method for not supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.ceph') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertFalse(result) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_init_backup_repo_path', return_value=None) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' 'check_for_setup_error', return_value=None) def test_check_support_to_force_delete(self, mock_check_configuration, mock_init_backup_repo_path): """Test force delete check method for supported drivers.""" self.override_config('backup_driver', 'cinder.backup.drivers.nfs') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertTrue(result) def test_backup_has_dependent_backups(self): """Test backup has dependent backups. Test the query of has_dependent_backups in backup object is correct. """ vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertFalse(backup.has_dependent_backups) class BackupTestCaseWithVerify(BaseBackupTest): """Test Case for backups.""" def setUp(self): self.override_config( "backup_driver", "cinder.tests.unit.backup.fake_service_with_verify") super(BackupTestCaseWithVerify, self).setUp() def test_import_record_with_verify(self): """Test normal backup record import. Test the case when import succeeds for the case that the driver implements verify. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) def mock_verify(backup_id): backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(fields.BackupStatus.CREATING, backup['status']) with mock.patch(_mock_backup_verify_class) as mock_backup_verify: mock_backup_verify.side_effect = mock_verify self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_verify_invalid_backup(self): """Test error handling. Test error handling when attempting an import of a backup record where the backup driver returns an exception. """ vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as _mock_record_verify: _mock_record_verify.side_effect = \ exception.InvalidBackup(reason='fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_verify.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_nonrestoring_to_available( self, mock_clean_temp): vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) with mock.patch.object(manager.BackupManager, '_map_service_to_driver') as \ mock_map_service_to_driver: # It should works when the service name is a string backup_driver = 'cinder.tests.unit.backup.fake_service_with_verify' mock_map_service_to_driver.return_value = backup_driver self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) new_backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, new_backup['status']) mock_map_service_to_driver.return_value = backup_driver self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.ERROR) mock_clean_temp.reset_mock() self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_backup_reset_status_to_available_invalid_backup(self): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=volume['id']) backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as \ _mock_record_verify: _mock_record_verify.side_effect = \ exception.BackupVerifyUnsupportedDriver(reason='fake') self.assertRaises(exception.BackupVerifyUnsupportedDriver, self.backup_mgr.reset_status, self.ctxt, backup, fields.BackupStatus.AVAILABLE) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_restoring_to_available( self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_to_error(self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.ERROR) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup['id']) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @ddt.ddt class BackupAPITestCase(BaseBackupTest): def setUp(self): super(BackupAPITestCase, self).setUp() self.api = api.API() def test_get_all_wrong_all_tenants_value(self): self.assertRaises(exception.InvalidParameterValue, self.api.get_all, self.ctxt, {'all_tenants': 'bad'}) @mock.patch.object(objects, 'BackupList') def test_get_all_no_all_tenants_value(self, mock_backuplist): result = self.api.get_all(self.ctxt, {'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(False, 'false', '0', 0, 'no') def test_get_all_false_value_all_tenants( self, false_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': false_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(True, 'true', '1', 1, 'yes') def test_get_all_true_value_all_tenants( self, true_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': true_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all_by_project.called) self.assertEqual(mock_backuplist.get_all.return_value, result) mock_backuplist.get_all.assert_called_once_with( self.ctxt, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist): ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4()) result = self.api.get_all(ctxt, {'all_tenants': '1', 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(db, 'backup_create', side_effect=db_exc.DBError()) def test_create_when_failed_to_create_backup_object( self, mock_create, mock_get_service): # Create volume in admin context volume_id = utils.create_volume(self.ctxt)['id'] # Will try to backup from a different context new_context = copy.copy(self.ctxt) new_context.user_id = uuid.uuid4() new_context.project_id = uuid.uuid4() # The opposite side of this test case is a "NotImplementedError: # Cannot load 'id' in the base class" being raised. # More detailed, in the try clause, if backup.create() failed # with DB exception, backup.id won't be assigned. However, # in the except clause, backup.destroy() is invoked to do cleanup, # which internally tries to access backup.id. self.assertRaises(db_exc.DBError, self.api.create, context=new_context, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(objects.Backup, '__init__', side_effect=exception.InvalidInput( reason='Failed to new')) def test_create_when_failed_to_new_backup_object(self, mock_new, mock_get_service): volume_id = utils.create_volume(self.ctxt)['id'] # The opposite side of this test case is that a "UnboundLocalError: # local variable 'backup' referenced before assignment" is raised. # More detailed, in the try clause, backup = objects.Backup(...) # raises exception, so 'backup' is not assigned. But in the except # clause, 'backup' is referenced to invoke cleanup methods. self.assertRaises(exception.InvalidInput, self.api.create, context=self.ctxt, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @mock.patch('cinder.backup.api.API._is_backup_service_enabled') def test_create_backup_in_same_host(self, mock_is_enable, mock_create): self.override_config('backup_use_same_host', True) mock_is_enable.return_value = True self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='available', host='testhost#lvm', size=1) backup = self.api.create(self.ctxt, None, None, volume_id, None) self.assertEqual('testhost', backup.host) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') def test_create_backup_from_snapshot_with_volume_in_use( self, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='in-use') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) backup = self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) self.assertEqual(fields.BackupStatus.CREATING, backup.status) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status) self.assertEqual('in-use', volume.status) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @ddt.data(True, False) def test_create_backup_resource_status(self, is_snapshot, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='available') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) if is_snapshot: self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('backing-up', snapshot.status) self.assertEqual('available', volume.status) else: self.api.create(self.ctxt, None, None, volume_id, None) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('available', snapshot.status) self.assertEqual('backing-up', volume.status) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') def test_restore_volume(self, mock_rpcapi_restore, mock_get_backup_host): volume_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(size=1, status='available') mock_get_backup_host.return_value = 'testhost' self.api.restore(self.ctxt, backup.id, volume_id) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(volume_id, backup.restore_volume_id)
46.833
79
0.609154
import copy import ddt import os import uuid import mock from os_brick.initiator.connectors import fake as fake_connectors from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import importutils from oslo_utils import timeutils import cinder from cinder.backup import api from cinder.backup import manager from cinder import context from cinder import db from cinder import exception from cinder import objects from cinder.objects import fields from cinder import test from cinder.tests import fake_driver from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import utils from cinder.volume import rpcapi as volume_rpcapi CONF = cfg.CONF class FakeBackupException(Exception): pass class BaseBackupTest(test.TestCase): def setUp(self): super(BaseBackupTest, self).setUp() self.backup_mgr = importutils.import_object(CONF.backup_manager) self.backup_mgr.host = 'testhost' self.backup_mgr.is_initialized = True self.ctxt = context.get_admin_context() paths = ['cinder.volume.rpcapi.VolumeAPI.delete_snapshot', 'cinder.volume.rpcapi.VolumeAPI.delete_volume', 'cinder.volume.rpcapi.VolumeAPI.detach_volume', 'cinder.volume.rpcapi.VolumeAPI.' 'secure_file_operations_enabled'] self.volume_patches = {} self.volume_mocks = {} for path in paths: name = path.split('.')[-1] self.volume_patches[name] = mock.patch(path) self.volume_mocks[name] = self.volume_patches[name].start() self.addCleanup(self.volume_patches[name].stop) def _create_backup_db_entry(self, volume_id=str(uuid.uuid4()), restore_volume_id=None, display_name='test_backup', display_description='this is a test backup', container='volumebackups', status=fields.BackupStatus.CREATING, size=1, object_count=0, project_id=str(uuid.uuid4()), service=None, temp_volume_id=None, temp_snapshot_id=None, snapshot_id=None, metadata=None, parent_id=None, encryption_key_id=None): kwargs = {} kwargs['volume_id'] = volume_id kwargs['restore_volume_id'] = restore_volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['host'] = 'testhost' kwargs['availability_zone'] = '1' kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['container'] = container kwargs['status'] = status kwargs['fail_reason'] = '' kwargs['service'] = service or CONF.backup_driver kwargs['snapshot_id'] = snapshot_id kwargs['parent_id'] = parent_id kwargs['size'] = size kwargs['object_count'] = object_count kwargs['temp_volume_id'] = temp_volume_id kwargs['temp_snapshot_id'] = temp_snapshot_id kwargs['metadata'] = metadata or {} kwargs['encryption_key_id'] = encryption_key_id backup = objects.Backup(context=self.ctxt, **kwargs) backup.create() return backup def _create_volume_db_entry(self, display_name='test_volume', display_description='this is a test volume', status='backing-up', previous_status='available', size=1, host='testhost', encryption_key_id=None): vol = {} vol['size'] = size vol['host'] = host vol['user_id'] = str(uuid.uuid4()) vol['project_id'] = str(uuid.uuid4()) vol['status'] = status vol['display_name'] = display_name vol['display_description'] = display_description vol['attach_status'] = fields.VolumeAttachStatus.DETACHED vol['availability_zone'] = '1' vol['previous_status'] = previous_status vol['encryption_key_id'] = encryption_key_id volume = objects.Volume(context=self.ctxt, **vol) volume.create() return volume.id def _create_snapshot_db_entry(self, display_name='test_snapshot', display_description='test snapshot', status=fields.SnapshotStatus.AVAILABLE, size=1, volume_id=str(uuid.uuid4()), provider_location=None): kwargs = {} kwargs['size'] = size kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = str(uuid.uuid4()) kwargs['status'] = status kwargs['display_name'] = display_name kwargs['display_description'] = display_description kwargs['volume_id'] = volume_id kwargs['cgsnapshot_id'] = None kwargs['volume_size'] = size kwargs['metadata'] = {} kwargs['provider_location'] = provider_location snapshot_obj = objects.Snapshot(context=self.ctxt, **kwargs) snapshot_obj.create() return snapshot_obj def _create_volume_attach(self, volume_id): values = {'volume_id': volume_id, 'attach_status': fields.VolumeAttachStatus.ATTACHED, } attachment = db.volume_attach(self.ctxt, values) db.volume_attached(self.ctxt, attachment['id'], None, 'testhost', '/dev/vd0') def _create_exported_record_entry(self, vol_size=1, exported_id=None): vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) if exported_id is not None: backup.id = exported_id export = self.backup_mgr.export_record(self.ctxt, backup) return export def _create_export_record_db_entry(self, volume_id=str(uuid.uuid4()), status=fields.BackupStatus.CREATING, project_id=str(uuid.uuid4()), backup_id=None): kwargs = {} kwargs['volume_id'] = volume_id kwargs['user_id'] = str(uuid.uuid4()) kwargs['project_id'] = project_id kwargs['status'] = status if backup_id: kwargs['id'] = backup_id backup = objects.BackupImport(context=self.ctxt, **kwargs) backup.create() return backup @ddt.ddt class BackupTestCase(BaseBackupTest): @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'set_initialized') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'do_setup') @mock.patch.object(cinder.tests.fake_driver.FakeLoggingVolumeDriver, 'check_for_setup_error') @mock.patch('cinder.context.get_admin_context') def test_init_host(self, mock_get_admin_context, mock_check, mock_setup, mock_set_initialized): def get_admin_context(): return self.ctxt self.override_config('backup_service_inithost_offload', False) self.override_config('periodic_interval', 0) vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) vol2_id = self._create_volume_db_entry() self._create_volume_attach(vol2_id) db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'}) vol3_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol3_id, {'status': 'available'}) vol4_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol4_id, {'status': 'backing-up'}) temp_vol_id = self._create_volume_db_entry() db.volume_update(self.ctxt, temp_vol_id, {'status': 'available'}) vol5_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol5_id, {'status': 'backing-up'}) temp_snap = self._create_snapshot_db_entry() temp_snap.status = fields.SnapshotStatus.AVAILABLE temp_snap.save() backup1 = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol2_id) backup3 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol3_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol4_id, temp_volume_id=temp_vol_id) self._create_backup_db_entry(status=fields.BackupStatus.CREATING, volume_id=vol5_id, temp_snapshot_id=temp_snap.id) mock_get_admin_context.side_effect = get_admin_context self.volume = importutils.import_object(CONF.volume_manager) self.backup_mgr.init_host() vol1 = db.volume_get(self.ctxt, vol1_id) self.assertEqual('available', vol1['status']) vol2 = db.volume_get(self.ctxt, vol2_id) self.assertEqual('error_restoring', vol2['status']) vol3 = db.volume_get(self.ctxt, vol3_id) self.assertEqual('available', vol3['status']) vol4 = db.volume_get(self.ctxt, vol4_id) self.assertEqual('available', vol4['status']) vol5 = db.volume_get(self.ctxt, vol5_id) self.assertEqual('available', vol5['status']) backup1 = db.backup_get(self.ctxt, backup1.id) self.assertEqual(fields.BackupStatus.ERROR, backup1['status']) backup2 = db.backup_get(self.ctxt, backup2.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup2['status']) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3.id) temp_vol = objects.Volume.get_by_id(self.ctxt, temp_vol_id) self.volume_mocks['delete_volume'].assert_called_once_with( self.ctxt, temp_vol) self.assertTrue(self.volume_mocks['detach_volume'].called) @mock.patch('cinder.objects.backup.BackupList.get_all_by_host') @mock.patch('cinder.manager.ThreadPoolManager._add_to_threadpool') def test_init_host_with_service_inithost_offload(self, mock_add_threadpool, mock_get_all_by_host): vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'available'}) backup1 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol1_id) vol2_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol2_id, {'status': 'available'}) backup2 = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol2_id) mock_get_all_by_host.return_value = [backup1, backup2] self.backup_mgr.init_host() calls = [mock.call(self.backup_mgr.delete_backup, mock.ANY, backup1), mock.call(self.backup_mgr.delete_backup, mock.ANY, backup2)] mock_add_threadpool.assert_has_calls(calls, any_order=True) self.assertEqual(2, mock_add_threadpool.call_count) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-backup': '1.3', 'cinder-volume': '1.7'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-backup': '1.2', 'cinder-volume': '1.4'}) def test_reset(self, get_min_obj, get_min_rpc): get_min_obj.return_value = 'liberty' backup_mgr = manager.BackupManager() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual('1.3', backup_rpcapi.client.version_cap) self.assertEqual('1.2', backup_rpcapi.client.serializer._base.version_cap) self.assertEqual('1.7', volume_rpcapi.client.version_cap) self.assertEqual('1.4', volume_rpcapi.client.serializer._base.version_cap) get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() backup_mgr.reset() backup_rpcapi = backup_mgr.backup_rpcapi volume_rpcapi = backup_mgr.volume_rpcapi self.assertEqual(get_min_rpc.return_value, backup_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, backup_rpcapi.client.serializer._base.version_cap) self.assertIsNone(backup_rpcapi.client.serializer._base.manifest) self.assertEqual(get_min_rpc.return_value, volume_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, volume_rpcapi.client.serializer._base.version_cap) self.assertIsNone(volume_rpcapi.client.serializer._base.manifest) @ddt.data(True, False) def test_is_working(self, initialized): self.backup_mgr.is_initialized = initialized self.assertEqual(initialized, self.backup_mgr.is_working()) def test_cleanup_incomplete_backup_operations_with_exceptions(self): fake_backup_list = [{'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}, {'id': str(uuid.uuid4())}] mock_backup_get_by_host = self.mock_object( objects.BackupList, 'get_all_by_host') mock_backup_get_by_host.return_value = fake_backup_list mock_backup_cleanup = self.mock_object( self.backup_mgr, '_cleanup_one_backup') mock_backup_cleanup.side_effect = [Exception] mock_temp_cleanup = self.mock_object( self.backup_mgr, '_cleanup_temp_volumes_snapshots_for_one_backup') mock_temp_cleanup.side_effect = [Exception] self.assertIsNone( self.backup_mgr._cleanup_incomplete_backup_operations( self.ctxt)) self.assertEqual(len(fake_backup_list), mock_backup_cleanup.call_count) self.assertEqual(len(fake_backup_list), mock_temp_cleanup.call_count) def test_cleanup_one_backing_up_volume(self): volume_id = self._create_volume_db_entry(status='backing-up', previous_status='available') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('available', volume['status']) def test_cleanup_one_restoring_backup_volume(self): volume_id = self._create_volume_db_entry(status='restoring-backup') volume = db.volume_get(self.ctxt, volume_id) self.backup_mgr._cleanup_one_volume(self.ctxt, volume) volume = db.volume_get(self.ctxt, volume_id) self.assertEqual('error_restoring', volume['status']) def test_cleanup_one_creating_backup(self): vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.ERROR, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('available', volume.status) def test_cleanup_one_restoring_backup(self): vol1_id = self._create_volume_db_entry() db.volume_update(self.ctxt, vol1_id, {'status': 'restoring-backup', }) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, restore_volume_id=vol1_id) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) volume = objects.Volume.get_by_id(self.ctxt, vol1_id) self.assertEqual('error_restoring', volume.status) def test_cleanup_one_deleting_backup(self): self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) def test_cleanup_one_deleting_encrypted_backup(self): self.override_config('backup_service_inithost_offload', False) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, encryption_key_id=fake.ENCRYPTION_KEY_ID) self.backup_mgr._cleanup_one_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertIsNotNone(backup) self.assertEqual(fields.BackupStatus.ERROR_DELETING, backup.status) def test_detach_all_attachments_handles_exceptions(self): mock_log = self.mock_object(manager, 'LOG') self.volume_mocks['detach_volume'].side_effect = [Exception] fake_attachments = [ { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, }, { 'id': str(uuid.uuid4()), 'attached_host': 'testhost', 'instance_uuid': None, } ] fake_volume = { 'id': str(uuid.uuid4()), 'volume_attachment': fake_attachments } self.backup_mgr._detach_all_attachments(self.ctxt, fake_volume) self.assertEqual(len(fake_attachments), mock_log.exception.call_count) @ddt.data(KeyError, exception.VolumeNotFound) def test_cleanup_temp_volumes_snapshots_for_one_backup_volume_not_found( self, err): mock_volume_get = self.mock_object(db, 'volume_get') mock_volume_get.side_effect = [err] backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) def test_cleanup_temp_snapshot_for_one_backup_not_found(self): vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry( status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_snapshot_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_snapshot'].called) self.assertIsNone(backup.temp_snapshot_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_cleanup_temp_volume_for_one_backup_not_found(self): vol1_id = self._create_volume_db_entry() self._create_volume_attach(vol1_id) db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol1_id, temp_volume_id=str(uuid.uuid4())) self.assertIsNone( self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup( self.ctxt, backup)) self.assertFalse(self.volume_mocks['delete_volume'].called) self.assertIsNone(backup.temp_volume_id) backup.destroy() db.volume_destroy(self.ctxt, vol1_id) def test_create_backup_with_bad_volume_status(self): vol_id = self._create_volume_db_entry(status='restoring', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_bad_backup_status(self): vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.create_backup, self.ctxt, backup) def test_create_backup_with_error(self): vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry(volume_id=vol_id) mock_run_backup = self.mock_object(self.backup_mgr, '_run_backup') mock_run_backup.side_effect = FakeBackupException(str(uuid.uuid4())) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('error_backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.backup.manager.BackupManager._run_backup', side_effect=FakeBackupException(str(uuid.uuid4()))) def test_create_backup_with_snapshot_error(self, mock_run_backup): vol_id = self._create_volume_db_entry(size=1) snapshot = self._create_snapshot_db_entry(status='backing-up', volume_id=vol_id) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) snapshot.refresh() self.assertEqual('available', snapshot.status) backup.refresh() self.assertEqual(fields.BackupStatus.ERROR, backup.status) self.assertTrue(mock_run_backup.called) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) backup_device_dict = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False, } mock_get_backup_device.return_value = ( objects.BackupDeviceInfo.from_primitive(backup_device_dict, self.ctxt, ['admin_metadata', 'metadata'])) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) mock_temporary_chown.assert_called_once_with('/dev/null') mock_attach_device.assert_called_once_with(self.ctxt, vol, properties, False) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with() mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, False, force=True, ignore_errors=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id_to_none(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id, parent_id='mock') with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.return_value = ( {'parent_id': None}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertIsNone(backup.parent_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_set_parent_id(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) parent_backup = self._create_backup_db_entry(size=vol_size) with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.return_value = ( {'parent_id': parent_backup.id}) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.backup_mgr.create_backup(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) self.assertEqual(parent_backup.id, backup.parent_id) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_create_backup_fail_with_excep(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) with mock.patch.object(self.backup_mgr, 'get_backup_driver') as \ mock_get_backup_driver: mock_get_backup_driver.return_value.backup.side_effect = ( FakeBackupException('fake')) with mock.patch.object(self.backup_mgr, '_detach_device'): device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info properties = {} mock_brick.return_value = properties mock_open.return_value = open('/dev/null', 'rb') mock_brick.return_value = properties self.assertRaises(FakeBackupException, self.backup_mgr.create_backup, self.ctxt, backup) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('available', vol.status) self.assertEqual('error_backing-up', vol.previous_status) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup.status) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=True) def test_run_backup_with_dir_device_path(self, mock_isdir, mock_open, mock_chown, mock_backup_device, mock_brick): backup_service = lambda: None backup_service.backup = mock.Mock( return_value=mock.sentinel.backup_update) self.backup_mgr.get_backup_driver = lambda x: backup_service vol_id = self._create_volume_db_entry() backup = self._create_backup_db_entry(volume_id=vol_id) volume = objects.Volume.get_by_id(self.ctxt, vol_id) device_path = '/fake/disk/path/' attach_info = {'device': {'path': device_path}} self.backup_mgr._attach_device = mock.Mock( return_value=attach_info) self.backup_mgr._detach_device = mock.Mock() output = self.backup_mgr._run_backup(self.ctxt, backup, volume) mock_chown.assert_not_called() mock_open.assert_not_called() backup_service.backup.assert_called_once_with( backup, device_path) self.assertEqual(mock.sentinel.backup_update, output) @mock.patch('cinder.backup.manager.BackupManager._run_backup') @ddt.data((fields.SnapshotStatus.BACKING_UP, 'available'), (fields.SnapshotStatus.BACKING_UP, 'in-use'), (fields.SnapshotStatus.AVAILABLE, 'available'), (fields.SnapshotStatus.AVAILABLE, 'in-use')) @ddt.unpack def test_create_backup_with_snapshot(self, snapshot_status, volume_status, mock_run_backup): vol_id = self._create_volume_db_entry(status=volume_status) snapshot = self._create_snapshot_db_entry(volume_id=vol_id, status=snapshot_status) backup = self._create_backup_db_entry(volume_id=vol_id, snapshot_id=snapshot.id) if snapshot_status == fields.SnapshotStatus.BACKING_UP: self.backup_mgr.create_backup(self.ctxt, backup) vol = objects.Volume.get_by_id(self.ctxt, vol_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(volume_status, vol.status) self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot.status) else: self.assertRaises(exception.InvalidSnapshot, self.backup_mgr.create_backup, self.ctxt, backup) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_create_backup_with_temp_snapshot(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_backup_device, mock_get_conn): self.override_config('backup_use_same_host', True) vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') backup = self._create_backup_db_entry(volume_id=vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_get_backup_device.return_value = ( objects.BackupDeviceInfo.from_primitive({ 'backup_device': snap, 'secure_enabled': False, 'is_snapshot': True, }, self.ctxt, expected_attrs=['metadata'])) attach_info = { 'device': {'path': '/dev/null'}, 'conn': {'data': {}}, 'connector': fake_connectors.FakeConnector(None)} mock_terminate_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'terminate_connection_snapshot') mock_initialize_connection_snapshot = self.mock_object( volume_rpcapi.VolumeAPI, 'initialize_connection_snapshot') mock_connect_device = self.mock_object( manager.BackupManager, '_connect_device') mock_connect_device.return_value = attach_info properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'rb') self.backup_mgr.create_backup(self.ctxt, backup) mock_temporary_chown.assert_called_once_with('/dev/null') mock_initialize_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties) mock_get_backup_device.assert_called_once_with(self.ctxt, backup, vol) mock_get_conn.assert_called_once_with() mock_terminate_connection_snapshot.assert_called_once_with( self.ctxt, snap, properties, force=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('in-use', vol['status']) self.assertEqual('backing-up', vol['previous_status']) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup.status) self.assertEqual(vol_size, backup.size) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_snapshot') def test_create_temp_snapshot(self, mock_create_snapshot): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_snapshot.return_value = {'provider_id': 'fake_provider_id'} temp_snap = volume_manager.driver._create_temp_snapshot( self.ctxt, vol) self.assertEqual('available', temp_snap['status']) self.assertEqual('fake_provider_id', temp_snap['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_cloned_volume') def test_create_temp_cloned_volume(self, mock_create_cloned_volume): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) mock_create_cloned_volume.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_cloned_volume( self.ctxt, vol) self.assertEqual('available', temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_volume_from_snapshot') def test_create_temp_volume_from_snapshot(self, mock_create_vol_from_snap): volume_manager = importutils.import_object(CONF.volume_manager) volume_manager.driver.set_initialized() vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size, previous_status='in-use') vol = objects.Volume.get_by_id(self.ctxt, vol_id) snap = self._create_snapshot_db_entry(volume_id=vol_id) mock_create_vol_from_snap.return_value = {'provider_id': 'fake_provider_id'} temp_vol = volume_manager.driver._create_temp_volume_from_snapshot( self.ctxt, vol, snap) self.assertEqual('available', temp_vol['status']) self.assertEqual('fake_provider_id', temp_vol['provider_id']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_create_backup_with_notify(self, notify): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_run_backup') self.backup_mgr.create_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.utils.brick_get_connector_properties') def test_create_backup_encrypted_volume(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID2 self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.rpcapi.VolumeAPI.get_backup_device') @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.utils.brick_get_connector_properties') def test_create_backup_encrypted_volume_again(self, mock_connector_properties, mock_clone_encryption_key, mock_get_backup_device): vol_id = self._create_volume_db_entry(encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry(volume_id=vol_id, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.create_backup(self.ctxt, backup) mock_clone_encryption_key.assert_not_called() def test_restore_backup_with_bad_volume_status(self): vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertRaises(exception.InvalidVolume, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) backup = db.backup_get(self.ctxt, backup.id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_restore_backup_with_bad_backup_status(self): vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_restore_backup_with_driver_error(self): vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) mock_run_restore = self.mock_object( self.backup_mgr, '_run_restore') mock_run_restore.side_effect = FakeBackupException('fake') self.assertRaises(FakeBackupException, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error_restoring', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertTrue(mock_run_restore.called) def test_restore_backup_with_bad_service(self): vol_id = self._create_volume_db_entry(status='restoring-backup', size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.restore_backup, self.ctxt, backup, vol_id) vol = db.volume_get(self.ctxt, vol_id) self.assertEqual('error', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.temporary_chown') @mock.patch('six.moves.builtins.open') @mock.patch.object(os.path, 'isdir', return_value=False) def test_restore_backup(self, mock_isdir, mock_open, mock_temporary_chown, mock_get_conn): vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) properties = {} mock_get_conn.return_value = properties mock_open.return_value = open('/dev/null', 'wb') mock_secure_enabled = ( self.volume_mocks['secure_file_operations_enabled']) mock_secure_enabled.return_value = False vol = objects.Volume.get_by_id(self.ctxt, vol_id) attach_info = {'device': {'path': '/dev/null'}} mock_detach_device = self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = attach_info self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) mock_temporary_chown.assert_called_once_with('/dev/null') mock_get_conn.assert_called_once_with() mock_secure_enabled.assert_called_once_with(self.ctxt, vol) mock_attach_device.assert_called_once_with(self.ctxt, vol, properties) mock_detach_device.assert_called_once_with(self.ctxt, attach_info, vol, properties, force=True) vol = objects.Volume.get_by_id(self.ctxt, vol_id) self.assertEqual('available', vol['status']) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_restore_backup_with_notify(self, notify): vol_size = 1 vol_id = self._create_volume_db_entry(status='restoring-backup', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=vol_id) self.backup_mgr._run_restore = mock.Mock() self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) self.assertEqual(2, notify.call_count) @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID1, volume.encryption_key_id) mock_clone_encryption_key.assert_not_called() mock_delete_encryption_key.assert_not_called() @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_new_encrypted_volume(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING, encryption_key_id=fake.UUID2) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 def restore_side_effect(backup, volume_id, volume_file): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should not have changed backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID2, backup.encryption_key_id) @mock.patch('cinder.volume.utils.clone_encryption_key') @mock.patch('cinder.volume.utils.delete_encryption_key') @mock.patch( 'cinder.tests.unit.backup.fake_service.FakeBackupService.restore') @mock.patch('cinder.utils.brick_get_connector_properties') def test_restore_backup_glean_key_id(self, mock_connector_properties, mock_backup_driver_restore, mock_delete_encryption_key, mock_clone_encryption_key): vol_id = self._create_volume_db_entry(status='restoring-backup', encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.RESTORING) self.mock_object(self.backup_mgr, '_detach_device') mock_attach_device = self.mock_object(self.backup_mgr, '_attach_device') mock_attach_device.return_value = {'device': {'path': '/dev/null'}} mock_clone_encryption_key.return_value = fake.UUID3 def restore_side_effect(backup, volume_id, volume_file): db.volume_update(self.ctxt, volume_id, {'encryption_key_id': fake.UUID4}) mock_backup_driver_restore.side_effect = restore_side_effect self.backup_mgr.restore_backup(self.ctxt, backup, vol_id) # Volume's original encryption key ID should be deleted mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID1) # the value restored from the metadata. mock_clone_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID4) # Volume should have the cloned backup key ID volume = db.volume_get(self.ctxt, vol_id) self.assertEqual(fake.UUID3, volume.encryption_key_id) # Backup's key ID should have been gleaned from value restored backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fake.UUID4, backup.encryption_key_id) def test_delete_backup_with_bad_backup_status(self): vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_error(self): vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, display_name='fail_on_delete', volume_id=vol_id) self.assertRaises(IOError, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_bad_service(self): vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.delete_backup, self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_delete_backup_with_no_service(self): vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) backup.service = None backup.save() self.backup_mgr.delete_backup(self.ctxt, backup) @ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService', 'cinder.tests.unit.backup.fake_service') def test_delete_backup(self, service): vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id, service=service) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup.id) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertGreaterEqual(timeutils.utcnow(), backup.deleted_at) self.assertEqual(fields.BackupStatus.DELETED, backup.status) @mock.patch('cinder.volume.utils.delete_encryption_key') def test_delete_backup_of_encrypted_volume(self, mock_delete_encryption_key): vol_id = self._create_volume_db_entry( encryption_key_id=fake.UUID1) backup = self._create_backup_db_entry( volume_id=vol_id, status=fields.BackupStatus.DELETING, encryption_key_id=fake.UUID2) self.backup_mgr.delete_backup(self.ctxt, backup) mock_delete_encryption_key.assert_called_once_with(self.ctxt, mock.ANY, fake.UUID2) ctxt_read_deleted = context.get_admin_context('yes') backup = db.backup_get(ctxt_read_deleted, backup.id) self.assertTrue(backup.deleted) self.assertIsNone(backup.encryption_key_id) @mock.patch('cinder.volume.utils.notify_about_backup_usage') def test_delete_backup_with_notify(self, notify): vol_id = self._create_volume_db_entry(size=1) backup = self._create_backup_db_entry( status=fields.BackupStatus.DELETING, volume_id=vol_id) self.backup_mgr.delete_backup(self.ctxt, backup) self.assertEqual(2, notify.call_count) def test_list_backup(self): project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) self._create_backup_db_entry() b2 = self._create_backup_db_entry(project_id=project_id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(b2.id, backups[0].id) def test_backup_get_all_by_project_with_deleted(self): project_id = str(uuid.uuid4()) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry(project_id=project_id) backup = self._create_backup_db_entry(project_id=project_id) db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_project(self.ctxt, project_id) self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_project(ctxt_read_deleted, project_id) self.assertEqual(2, len(backups)) def test_backup_get_all_by_host_with_deleted(self): backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(0, len(backups)) backup_keep = self._create_backup_db_entry() backup = self._create_backup_db_entry() db.backup_destroy(self.ctxt, backup.id) backups = db.backup_get_all_by_host(self.ctxt, 'testhost') self.assertEqual(1, len(backups)) self.assertEqual(backup_keep.id, backups[0].id) ctxt_read_deleted = context.get_admin_context('yes') backups = db.backup_get_all_by_host(ctxt_read_deleted, 'testhost') self.assertEqual(2, len(backups)) def test_backup_manager_driver_name(self): self.override_config('backup_driver', "cinder.backup.services.swift") backup_mgr = \ importutils.import_object(CONF.backup_manager) self.assertEqual('cinder.backup.drivers.swift', backup_mgr.driver_name) def test_export_record_with_bad_service(self): vol_id = self._create_volume_db_entry(size=1) service = 'cinder.tests.backup.bad_service' backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) def test_export_record_with_bad_backup_status(self): vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) self.assertRaises(exception.InvalidBackup, self.backup_mgr.export_record, self.ctxt, backup) @ddt.data('cinder.tests.unit.backup.fake_service.FakeBackupService', 'cinder.tests.unit.backup.fake_service') def test_export_record(self, service): vol_size = 1 vol_id = self._create_volume_db_entry(status='available', size=vol_size) backup = self._create_backup_db_entry( status=fields.BackupStatus.AVAILABLE, volume_id=vol_id, service=service) export = self.backup_mgr.export_record(self.ctxt, backup) self.assertEqual(service, export['backup_service']) self.assertIn('backup_url', export) def test_import_record_with_verify_not_implemented(self): vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry(vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_wrong_id(self): vol_size = 1 export = self._create_exported_record_entry(vol_size=vol_size) imported_record = self._create_export_record_db_entry() backup_hosts = [] self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) def test_import_record_with_bad_service(self): export = self._create_exported_record_entry() export['backup_service'] = 'cinder.tests.unit.backup.bad_service' imported_record = self._create_export_record_db_entry() # Test the case where the additional hosts list is empty backup_hosts = [] self.assertRaises(exception.ServiceNotFound, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) # Test that the import backup keeps calling other hosts to find a # suitable host for the backup service backup_hosts = ['fake1', 'fake2'] backup_hosts_expect = list(backup_hosts) BackupAPI_import = 'cinder.backup.rpcapi.BackupAPI.import_record' with mock.patch(BackupAPI_import) as _mock_backup_import: self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) next_host = backup_hosts_expect.pop() _mock_backup_import.assert_called_once_with( self.ctxt, next_host, imported_record, export['backup_service'], export['backup_url'], backup_hosts_expect) def test_import_record_with_invalid_backup(self): export = self._create_exported_record_entry() backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_record_import_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'import_record')) imported_record = self._create_export_record_db_entry() backup_hosts = [] with mock.patch(_mock_record_import_class) as _mock_record_import: _mock_record_import.side_effect = FakeBackupException('fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_import.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) def test_not_supported_driver_to_force_delete(self): self.override_config('backup_driver', 'cinder.backup.drivers.ceph') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertFalse(result) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' '_init_backup_repo_path', return_value=None) @mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.' 'check_for_setup_error', return_value=None) def test_check_support_to_force_delete(self, mock_check_configuration, mock_init_backup_repo_path): self.override_config('backup_driver', 'cinder.backup.drivers.nfs') self.backup_mgr = importutils.import_object(CONF.backup_manager) result = self.backup_mgr.check_support_to_force_delete(self.ctxt) self.assertTrue(result) def test_backup_has_dependent_backups(self): vol_size = 1 vol_id = self._create_volume_db_entry(size=vol_size) backup = self._create_backup_db_entry(volume_id=vol_id) self.assertFalse(backup.has_dependent_backups) class BackupTestCaseWithVerify(BaseBackupTest): def setUp(self): self.override_config( "backup_driver", "cinder.tests.unit.backup.fake_service_with_verify") super(BackupTestCaseWithVerify, self).setUp() def test_import_record_with_verify(self): vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) def mock_verify(backup_id): backup = db.backup_get(self.ctxt, backup_id) self.assertEqual(fields.BackupStatus.CREATING, backup['status']) with mock.patch(_mock_backup_verify_class) as mock_backup_verify: mock_backup_verify.side_effect = mock_verify self.backup_mgr.import_record(self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) self.assertEqual(vol_size, backup['size']) def test_import_record_with_verify_invalid_backup(self): vol_size = 1 backup_id = uuid.uuid4() export = self._create_exported_record_entry( vol_size=vol_size, exported_id=backup_id) imported_record = self._create_export_record_db_entry( backup_id=backup_id) backup_hosts = [] backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as _mock_record_verify: _mock_record_verify.side_effect = \ exception.InvalidBackup(reason='fake') self.assertRaises(exception.InvalidBackup, self.backup_mgr.import_record, self.ctxt, imported_record, export['backup_service'], export['backup_url'], backup_hosts) self.assertTrue(_mock_record_verify.called) backup = db.backup_get(self.ctxt, imported_record.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_nonrestoring_to_available( self, mock_clean_temp): vol_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=vol_id) with mock.patch.object(manager.BackupManager, '_map_service_to_driver') as \ mock_map_service_to_driver: # It should works when the service name is a string backup_driver = 'cinder.tests.unit.backup.fake_service_with_verify' mock_map_service_to_driver.return_value = backup_driver self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) new_backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, new_backup['status']) mock_map_service_to_driver.return_value = backup_driver self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.ERROR) mock_clean_temp.reset_mock() self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) def test_backup_reset_status_to_available_invalid_backup(self): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry(status=fields.BackupStatus.ERROR, volume_id=volume['id']) backup_driver = self.backup_mgr.get_backup_driver(self.ctxt) _mock_backup_verify_class = ('%s.%s.%s' % (backup_driver.__module__, backup_driver.__class__.__name__, 'verify')) with mock.patch(_mock_backup_verify_class) as \ _mock_record_verify: _mock_record_verify.side_effect = \ exception.BackupVerifyUnsupportedDriver(reason='fake') self.assertRaises(exception.BackupVerifyUnsupportedDriver, self.backup_mgr.reset_status, self.ctxt, backup, fields.BackupStatus.AVAILABLE) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_from_restoring_to_available( self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.RESTORING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.AVAILABLE) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup.id) self.assertEqual(fields.BackupStatus.AVAILABLE, backup['status']) @mock.patch.object(manager.BackupManager, '_cleanup_temp_volumes_snapshots_for_one_backup') def test_backup_reset_status_to_error(self, mock_clean_temp): volume = db.volume_create(self.ctxt, {'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1}) backup = self._create_backup_db_entry( status=fields.BackupStatus.CREATING, volume_id=volume['id']) self.backup_mgr.reset_status(self.ctxt, backup, fields.BackupStatus.ERROR) mock_clean_temp.assert_called_once_with(self.ctxt, backup) backup = db.backup_get(self.ctxt, backup['id']) self.assertEqual(fields.BackupStatus.ERROR, backup['status']) @ddt.ddt class BackupAPITestCase(BaseBackupTest): def setUp(self): super(BackupAPITestCase, self).setUp() self.api = api.API() def test_get_all_wrong_all_tenants_value(self): self.assertRaises(exception.InvalidParameterValue, self.api.get_all, self.ctxt, {'all_tenants': 'bad'}) @mock.patch.object(objects, 'BackupList') def test_get_all_no_all_tenants_value(self, mock_backuplist): result = self.api.get_all(self.ctxt, {'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(False, 'false', '0', 0, 'no') def test_get_all_false_value_all_tenants( self, false_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': false_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( self.ctxt, self.ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') @ddt.data(True, 'true', '1', 1, 'yes') def test_get_all_true_value_all_tenants( self, true_value, mock_backuplist): result = self.api.get_all(self.ctxt, {'all_tenants': true_value, 'key': 'value'}) self.assertFalse(mock_backuplist.get_all_by_project.called) self.assertEqual(mock_backuplist.get_all.return_value, result) mock_backuplist.get_all.assert_called_once_with( self.ctxt, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(objects, 'BackupList') def test_get_all_true_value_all_tenants_non_admin(self, mock_backuplist): ctxt = context.RequestContext(uuid.uuid4(), uuid.uuid4()) result = self.api.get_all(ctxt, {'all_tenants': '1', 'key': 'value'}) self.assertFalse(mock_backuplist.get_all.called) self.assertEqual(mock_backuplist.get_all_by_project.return_value, result) mock_backuplist.get_all_by_project.assert_called_once_with( ctxt, ctxt.project_id, {'key': 'value'}, None, None, None, None, None) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(db, 'backup_create', side_effect=db_exc.DBError()) def test_create_when_failed_to_create_backup_object( self, mock_create, mock_get_service): # Create volume in admin context volume_id = utils.create_volume(self.ctxt)['id'] # Will try to backup from a different context new_context = copy.copy(self.ctxt) new_context.user_id = uuid.uuid4() new_context.project_id = uuid.uuid4() # The opposite side of this test case is a "NotImplementedError: # Cannot load 'id' in the base class" being raised. # More detailed, in the try clause, if backup.create() failed # with DB exception, backup.id won't be assigned. However, self.assertRaises(db_exc.DBError, self.api.create, context=new_context, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch.object(objects.Backup, '__init__', side_effect=exception.InvalidInput( reason='Failed to new')) def test_create_when_failed_to_new_backup_object(self, mock_new, mock_get_service): volume_id = utils.create_volume(self.ctxt)['id'] # local variable 'backup' referenced before assignment" is raised. self.assertRaises(exception.InvalidInput, self.api.create, context=self.ctxt, name="test_backup", description="test backup description", volume_id=volume_id, container='volumebackups') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @mock.patch('cinder.backup.api.API._is_backup_service_enabled') def test_create_backup_in_same_host(self, mock_is_enable, mock_create): self.override_config('backup_use_same_host', True) mock_is_enable.return_value = True self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='available', host='testhost#lvm', size=1) backup = self.api.create(self.ctxt, None, None, volume_id, None) self.assertEqual('testhost', backup.host) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') def test_create_backup_from_snapshot_with_volume_in_use( self, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='in-use') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) backup = self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) self.assertEqual(fields.BackupStatus.CREATING, backup.status) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual(fields.SnapshotStatus.BACKING_UP, snapshot.status) self.assertEqual('in-use', volume.status) @mock.patch.object(api.API, '_get_available_backup_service_host', return_value='fake_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.create_backup') @ddt.data(True, False) def test_create_backup_resource_status(self, is_snapshot, mock_create, mock_get_service): self.ctxt.user_id = 'fake_user' self.ctxt.project_id = 'fake_project' volume_id = self._create_volume_db_entry(status='available') snapshot = self._create_snapshot_db_entry(volume_id=volume_id) if is_snapshot: self.api.create(self.ctxt, None, None, volume_id, None, snapshot_id=snapshot.id) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('backing-up', snapshot.status) self.assertEqual('available', volume.status) else: self.api.create(self.ctxt, None, None, volume_id, None) volume = objects.Volume.get_by_id(self.ctxt, volume_id) snapshot = objects.Snapshot.get_by_id(self.ctxt, snapshot.id) self.assertEqual('available', snapshot.status) self.assertEqual('backing-up', volume.status) @mock.patch('cinder.backup.api.API._get_available_backup_service_host') @mock.patch('cinder.backup.rpcapi.BackupAPI.restore_backup') def test_restore_volume(self, mock_rpcapi_restore, mock_get_backup_host): volume_id = self._create_volume_db_entry(status='available', size=1) backup = self._create_backup_db_entry(size=1, status='available') mock_get_backup_host.return_value = 'testhost' self.api.restore(self.ctxt, backup.id, volume_id) backup = objects.Backup.get_by_id(self.ctxt, backup.id) self.assertEqual(volume_id, backup.restore_volume_id)
true
true
f7137e715a3e6f9fc07606848dd44c299df4bd4f
11,604
py
Python
CBCRadio.py
pilona/Utils
643db0128c24dd3b546a38f55e9d4f52aa626831
[ "0BSD" ]
2
2019-08-13T09:12:38.000Z
2019-11-20T17:23:31.000Z
CBCRadio.py
pilona/Utils
643db0128c24dd3b546a38f55e9d4f52aa626831
[ "0BSD" ]
null
null
null
CBCRadio.py
pilona/Utils
643db0128c24dd3b546a38f55e9d4f52aa626831
[ "0BSD" ]
1
2019-05-05T04:55:10.000Z
2019-05-05T04:55:10.000Z
#! /usr/bin/env python3 ''' CBC Radio streams player/downloader ''' from datetime import datetime from argparse import ArgumentParser, OPTIONAL from collections import namedtuple import subprocess import readline import requests from lxml import html _STREAM_SNAPSHOT = [ ("Radio One", "BC", "Kamloops", "http://cbc_r1_kam.akacast.akamaistream.net/7/440/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kam"), ("Radio One", "BC", "Kelowna", "http://cbc_r1_kel.akacast.akamaistream.net/7/229/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kel"), ("Radio One", "BC", "Prince George", "http://cbc_r1_prg.akacast.akamaistream.net/7/966/451661/v1/rc.akacast.akamaistream.net/cbc_r1_prg"), ("Radio One", "BC", "Vancouver", "http://cbc_r1_vcr.akacast.akamaistream.net/7/723/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vcr"), ("Radio One", "BC", "Victoria", "http://cbc_r1_vic.akacast.akamaistream.net/7/728/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vic"), ("Radio One", "Yukon", "Whitehorse", "http://cbc_r1_whs.akacast.akamaistream.net/7/319/451661/v1/rc.akacast.akamaistream.net/cbc_r1_whs"), ("Radio One", "Alberta", "Calgary", "http://cbc_r1_cgy.akacast.akamaistream.net/7/298/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cgy"), ("Radio One", "Alberta", "Edmonton", "http://cbc_r1_edm.akacast.akamaistream.net/7/904/451661/v1/rc.akacast.akamaistream.net/cbc_r1_edm"), ("Radio One", "Saskatchewan", "Regina", "http://cbc_r1_reg.akacast.akamaistream.net/7/666/451661/v1/rc.akacast.akamaistream.net/cbc_r1_reg"), ("Radio One", "Saskatchewan", "Saskatoon", "http://cbc_r1_ssk.akacast.akamaistream.net/7/842/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ssk"), ("Radio One", "Manitoba", "Winnipeg", "http://cbc_r1_wpg.akacast.akamaistream.net/7/831/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wpg"), ("Radio One", "Nunavut", "Iqaluit", "http://cbc_r1_iqa.akacast.akamaistream.net/7/325/451661/v1/rc.akacast.akamaistream.net/cbc_r1_iqa"), ("Radio One", "Ontario", "Kitchener-Waterloo", "http://cbc_r1_ekw.akacast.akamaistream.net/7/63/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ekw"), ("Radio One", "Ontario", "London", "http://cbc_r1_ldn.akacast.akamaistream.net/7/104/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ldn"), ("Radio One", "Ontario", "Ottawa", "http://cbc_r1_ott.akacast.akamaistream.net/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott"), ("Radio One", "Ontario", "Sudbury", "http://cbc_r1_sud.akacast.akamaistream.net/7/380/451661/v1/rc.akacast.akamaistream.net/cbc_r1_sud"), ("Radio One", "Ontario", "Thunder Bay", "http://cbc_r1_tba.akacast.akamaistream.net/7/245/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tba"), ("Radio One", "Ontario", "Toronto", "http://cbc_r1_tor.akacast.akamaistream.net/7/632/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tor"), ("Radio One", "Ontario", "Windsor", "http://cbc_r1_wdr.akacast.akamaistream.net/7/813/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wdr"), ("Radio One", "Quebec", "Montreal", "http://cbc_r1_mtl.akacast.akamaistream.net/7/35/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mtl"), ("Radio One", "Quebec", "Nord Quebec", "http://cbc_r1_n_mtl.akacast.akamaistream.net/7/823/451661/v1/rc.akacast.akamaistream.net/cbc_r1_n_mtl"), ("Radio One", "Quebec", "Quebec City", "http://cbc_r1_qqu.akacast.akamaistream.net/7/29/451661/v1/rc.akacast.akamaistream.net/cbc_r1_qqu"), ("Radio One", "New Brunswick", "Fredericton", "http://cbc_r1_frd.akacast.akamaistream.net/7/553/451661/v1/rc.akacast.akamaistream.net/cbc_r1_frd"), ("Radio One", "New Brunswick", "Moncton", "http://cbc_r1_mct.akacast.akamaistream.net/7/383/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mct"), ("Radio One", "New Brunswick", "Saint John", "http://cbc_r1_snb.akacast.akamaistream.net/7/754/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snb"), ("Radio One", "Prince Edward Island", "Charlottetown", "http://cbc_r1_chr.akacast.akamaistream.net/7/169/451661/v1/rc.akacast.akamaistream.net/cbc_r1_chr"), ("Radio One", "Nova Scotia", "Cape Breton", "http://cbc_r1_syd.akacast.akamaistream.net/7/897/451661/v1/rc.akacast.akamaistream.net/cbc_r1_syd"), ("Radio One", "Nova Scotia", "Halifax", "http://cbc_r1_hfx.akacast.akamaistream.net/7/981/451661/v1/rc.akacast.akamaistream.net/cbc_r1_hfx"), ("Radio One", "Newfoundland & Labrador", "Corner Brook", "http://cbc_r2_cor.akacast.akamaistream.net/7/550/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cor"), ("Radio One", "Newfoundland & Labrador", "Grand Falls/Gander", "http://cbc_r1_gfa.akacast.akamaistream.net/7/492/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gfa"), ("Radio One", "Newfoundland & Labrador", "Labrador", "http://cbc_r1_gba.akacast.akamaistream.net/7/274/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gba"), ("Radio One", "Newfoundland & Labrador", "St. John's", "http://cbc_r1_snf.akacast.akamaistream.net/7/750/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snf"), ("Radio One", "Northwest Territories", "Inuvik", "http://cbc_r1_ink.akacast.akamaistream.net/7/967/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ink"), ("Radio One", "Northwest Territories", "Yellowknife", "http://cbc_r1_ykn.akacast.akamaistream.net/7/369/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ykn"), ("Radio Two", "Atlantic", "Halifax", "http://cbc_r2_hfx.akacast.akamaistream.net/7/917/451661/v1/rc.akacast.akamaistream.net/cbc_r2_hfx"), ("Radio Two", "Eastern", "Toronto", "http://cbc_r2_tor.akacast.akamaistream.net/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor"), ("Radio Two", "Central", "Winnipeg", "http://cbc_r2_wpg.akacast.akamaistream.net/7/233/451661/v1/rc.akacast.akamaistream.net/cbc_r2_wpg"), ("Radio Two", "Mountain", "Edmonton", "http://cbc_r2_edm.akacast.akamaistream.net/7/40/451661/v1/rc.akacast.akamaistream.net/cbc_r2_edm"), ("Radio Two", "Pacific", "Vancouver", "http://cbc_r2_vcr.akacast.akamaistream.net/7/773/451661/v1/rc.akacast.akamaistream.net/cbc_r2_vcr"), ("Radio Two", "International", "Pacific", "http://cbc_r2_ipt.akacast.akamaistream.net/7/669/451661/v1/rc.akacast.akamaistream.net/cbc_r2_ipt"), ("Radio Two", "International", "Eastern", "http://cbc_r2_iet.akacast.akamaistream.net/7/50/451661/v1/rc.akacast.akamaistream.net/cbc_r2_iet"), ] # CBC Music stream list page _STREAMS = 'http://www.cbc.ca/radio/includes/streams.html' # CBC Radio 2 Eastern (Toronto) stream URL CBC_RADIO_2 = 'http://cbc_r2_tor.akacast.akamaistream.net' \ '/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor' # CBC Radio 1 Ottawa stream URL CBC_RADIO_1 = 'http://cbc_r1_ott.akacast.akamaistream.net' \ '/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott' argument_parser = ArgumentParser(__doc__) argument_parser.add_argument('-l', '--list', action='store_true') argument_parser.add_argument('-t', '--tee', action='store_true') mutex_group = argument_parser.add_mutually_exclusive_group(required=False) # Yuck, wish it was multiple arguments, # but argparse doesn't support anything but OPTIONAL. mutex_group.add_argument('stream', nargs=OPTIONAL, type=str.split, help='Name of stream to play/record') mutex_group.add_argument('-1', '--one', action='store_const', const=CBC_RADIO_1, dest='url', help='CBC Radio One Eastern') mutex_group.add_argument('-2', '--two', action='store_const', const=CBC_RADIO_2, dest='url', help='CBC Radio Two Eastern') PlaylistItem = namedtuple('PlaylistItem', ['radio', 'province', 'city', 'url']) _COMPLETION_INDEX = {' '.join((radio, region, city)): url for radio, region, city, url in _STREAM_SNAPSHOT} def get_streams(): ''' Get CBC Radio music streams as {name: stream_url}. ''' r = requests.get(_STREAMS) r.raise_for_status() h = html.fromstring(r.content, base_url=r.url) # noqa radio_one, radio_two = h.cssselect('table') for row in radio_one.cssselect('tbody td'): raise NotImplementedError() for row in radio_two.cssselect('tbody td'): raise NotImplementedError() class Completer: def __init__(self, streams): self.streams = streams self.previous_prefix = None def complete(self, text, state): if text != self.previous_prefix: #print('!' * 200) self.completions = [stream for stream in self.streams if readline.get_line_buffer().strip() in stream] self.previous_prefix = text try: return self.completions[state] except IndexError: return None def mpv_cmdline(input_url): ''' Return an mpv command-line to play BUT NOT record input_url. ''' return ['mpv', '--vo=null', input_url] def ffmpeg_cmdline(input_url, tee): ''' Return a ffmpeg command to play and maybe record input_url. :param tee: if True, also save to disk. ''' return ['ffmpeg', '-hide_banner', '-nostdin', '-i', f'async:{input_url}', *([] if not tee else ['-f', 'mpegts', '-c', 'copy', f'''./{datetime.now() .replace(microsecond=0) .isoformat()}.m2ts''']), '-f', 'alsa', 'default'] def play(input_url, tee=False): ''' Play input_url, optionally also saving to disk. ''' subprocess.check_call(ffmpeg_cmdline(input_url, tee=tee)) def print_streams(streams): ''' Pretty print streams. ''' print(*sorted(streams), sep='\n') def autocomplete(streams): ''' List choices, and prompt with autocompletion one item from streams. ''' print_streams(streams) # readline API doesn't make this undoable readline.parse_and_bind('tab: complete') try: old_delims = readline.get_completer_delims() readline.set_completer_delims('') try: old_completer = readline.get_completer() readline.set_completer(Completer(streams).complete) return streams[input('Playlist: ')] finally: readline.set_completer(old_completer) finally: readline.set_completer_delims(old_delims) if __name__ == '__main__': from sys import exit args = argument_parser.parse_args() #streams = get_streams() streams = _COMPLETION_INDEX if args.list: print_streams(streams) exit() if args.url is not None: stream_url = args.url elif args.stream is None: try: stream_url = autocomplete(streams) except (KeyboardInterrupt, EOFError): exit(1) else: matches = {stream: url for stream, url in streams.items() if all(map(stream.__contains__, args.stream))} if not matches: exit(f'Not a valid stream: {" ".join(args.stream)}') elif len(matches) > 1: try: stream_url = autocomplete(matches) except (KeyboardInterrupt, EOFError): exit(1) else: stream_url = next(iter(matches.values())) play(stream_url, tee=args.tee)
44.290076
110
0.66029
from datetime import datetime from argparse import ArgumentParser, OPTIONAL from collections import namedtuple import subprocess import readline import requests from lxml import html _STREAM_SNAPSHOT = [ ("Radio One", "BC", "Kamloops", "http://cbc_r1_kam.akacast.akamaistream.net/7/440/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kam"), ("Radio One", "BC", "Kelowna", "http://cbc_r1_kel.akacast.akamaistream.net/7/229/451661/v1/rc.akacast.akamaistream.net/cbc_r1_kel"), ("Radio One", "BC", "Prince George", "http://cbc_r1_prg.akacast.akamaistream.net/7/966/451661/v1/rc.akacast.akamaistream.net/cbc_r1_prg"), ("Radio One", "BC", "Vancouver", "http://cbc_r1_vcr.akacast.akamaistream.net/7/723/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vcr"), ("Radio One", "BC", "Victoria", "http://cbc_r1_vic.akacast.akamaistream.net/7/728/451661/v1/rc.akacast.akamaistream.net/cbc_r1_vic"), ("Radio One", "Yukon", "Whitehorse", "http://cbc_r1_whs.akacast.akamaistream.net/7/319/451661/v1/rc.akacast.akamaistream.net/cbc_r1_whs"), ("Radio One", "Alberta", "Calgary", "http://cbc_r1_cgy.akacast.akamaistream.net/7/298/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cgy"), ("Radio One", "Alberta", "Edmonton", "http://cbc_r1_edm.akacast.akamaistream.net/7/904/451661/v1/rc.akacast.akamaistream.net/cbc_r1_edm"), ("Radio One", "Saskatchewan", "Regina", "http://cbc_r1_reg.akacast.akamaistream.net/7/666/451661/v1/rc.akacast.akamaistream.net/cbc_r1_reg"), ("Radio One", "Saskatchewan", "Saskatoon", "http://cbc_r1_ssk.akacast.akamaistream.net/7/842/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ssk"), ("Radio One", "Manitoba", "Winnipeg", "http://cbc_r1_wpg.akacast.akamaistream.net/7/831/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wpg"), ("Radio One", "Nunavut", "Iqaluit", "http://cbc_r1_iqa.akacast.akamaistream.net/7/325/451661/v1/rc.akacast.akamaistream.net/cbc_r1_iqa"), ("Radio One", "Ontario", "Kitchener-Waterloo", "http://cbc_r1_ekw.akacast.akamaistream.net/7/63/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ekw"), ("Radio One", "Ontario", "London", "http://cbc_r1_ldn.akacast.akamaistream.net/7/104/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ldn"), ("Radio One", "Ontario", "Ottawa", "http://cbc_r1_ott.akacast.akamaistream.net/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott"), ("Radio One", "Ontario", "Sudbury", "http://cbc_r1_sud.akacast.akamaistream.net/7/380/451661/v1/rc.akacast.akamaistream.net/cbc_r1_sud"), ("Radio One", "Ontario", "Thunder Bay", "http://cbc_r1_tba.akacast.akamaistream.net/7/245/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tba"), ("Radio One", "Ontario", "Toronto", "http://cbc_r1_tor.akacast.akamaistream.net/7/632/451661/v1/rc.akacast.akamaistream.net/cbc_r1_tor"), ("Radio One", "Ontario", "Windsor", "http://cbc_r1_wdr.akacast.akamaistream.net/7/813/451661/v1/rc.akacast.akamaistream.net/cbc_r1_wdr"), ("Radio One", "Quebec", "Montreal", "http://cbc_r1_mtl.akacast.akamaistream.net/7/35/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mtl"), ("Radio One", "Quebec", "Nord Quebec", "http://cbc_r1_n_mtl.akacast.akamaistream.net/7/823/451661/v1/rc.akacast.akamaistream.net/cbc_r1_n_mtl"), ("Radio One", "Quebec", "Quebec City", "http://cbc_r1_qqu.akacast.akamaistream.net/7/29/451661/v1/rc.akacast.akamaistream.net/cbc_r1_qqu"), ("Radio One", "New Brunswick", "Fredericton", "http://cbc_r1_frd.akacast.akamaistream.net/7/553/451661/v1/rc.akacast.akamaistream.net/cbc_r1_frd"), ("Radio One", "New Brunswick", "Moncton", "http://cbc_r1_mct.akacast.akamaistream.net/7/383/451661/v1/rc.akacast.akamaistream.net/cbc_r1_mct"), ("Radio One", "New Brunswick", "Saint John", "http://cbc_r1_snb.akacast.akamaistream.net/7/754/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snb"), ("Radio One", "Prince Edward Island", "Charlottetown", "http://cbc_r1_chr.akacast.akamaistream.net/7/169/451661/v1/rc.akacast.akamaistream.net/cbc_r1_chr"), ("Radio One", "Nova Scotia", "Cape Breton", "http://cbc_r1_syd.akacast.akamaistream.net/7/897/451661/v1/rc.akacast.akamaistream.net/cbc_r1_syd"), ("Radio One", "Nova Scotia", "Halifax", "http://cbc_r1_hfx.akacast.akamaistream.net/7/981/451661/v1/rc.akacast.akamaistream.net/cbc_r1_hfx"), ("Radio One", "Newfoundland & Labrador", "Corner Brook", "http://cbc_r2_cor.akacast.akamaistream.net/7/550/451661/v1/rc.akacast.akamaistream.net/cbc_r1_cor"), ("Radio One", "Newfoundland & Labrador", "Grand Falls/Gander", "http://cbc_r1_gfa.akacast.akamaistream.net/7/492/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gfa"), ("Radio One", "Newfoundland & Labrador", "Labrador", "http://cbc_r1_gba.akacast.akamaistream.net/7/274/451661/v1/rc.akacast.akamaistream.net/cbc_r1_gba"), ("Radio One", "Newfoundland & Labrador", "St. John's", "http://cbc_r1_snf.akacast.akamaistream.net/7/750/451661/v1/rc.akacast.akamaistream.net/cbc_r1_snf"), ("Radio One", "Northwest Territories", "Inuvik", "http://cbc_r1_ink.akacast.akamaistream.net/7/967/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ink"), ("Radio One", "Northwest Territories", "Yellowknife", "http://cbc_r1_ykn.akacast.akamaistream.net/7/369/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ykn"), ("Radio Two", "Atlantic", "Halifax", "http://cbc_r2_hfx.akacast.akamaistream.net/7/917/451661/v1/rc.akacast.akamaistream.net/cbc_r2_hfx"), ("Radio Two", "Eastern", "Toronto", "http://cbc_r2_tor.akacast.akamaistream.net/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor"), ("Radio Two", "Central", "Winnipeg", "http://cbc_r2_wpg.akacast.akamaistream.net/7/233/451661/v1/rc.akacast.akamaistream.net/cbc_r2_wpg"), ("Radio Two", "Mountain", "Edmonton", "http://cbc_r2_edm.akacast.akamaistream.net/7/40/451661/v1/rc.akacast.akamaistream.net/cbc_r2_edm"), ("Radio Two", "Pacific", "Vancouver", "http://cbc_r2_vcr.akacast.akamaistream.net/7/773/451661/v1/rc.akacast.akamaistream.net/cbc_r2_vcr"), ("Radio Two", "International", "Pacific", "http://cbc_r2_ipt.akacast.akamaistream.net/7/669/451661/v1/rc.akacast.akamaistream.net/cbc_r2_ipt"), ("Radio Two", "International", "Eastern", "http://cbc_r2_iet.akacast.akamaistream.net/7/50/451661/v1/rc.akacast.akamaistream.net/cbc_r2_iet"), ] # CBC Music stream list page _STREAMS = 'http://www.cbc.ca/radio/includes/streams.html' # CBC Radio 2 Eastern (Toronto) stream URL CBC_RADIO_2 = 'http://cbc_r2_tor.akacast.akamaistream.net' \ '/7/364/451661/v1/rc.akacast.akamaistream.net/cbc_r2_tor' # CBC Radio 1 Ottawa stream URL CBC_RADIO_1 = 'http://cbc_r1_ott.akacast.akamaistream.net' \ '/7/613/451661/v1/rc.akacast.akamaistream.net/cbc_r1_ott' argument_parser = ArgumentParser(__doc__) argument_parser.add_argument('-l', '--list', action='store_true') argument_parser.add_argument('-t', '--tee', action='store_true') mutex_group = argument_parser.add_mutually_exclusive_group(required=False) # Yuck, wish it was multiple arguments, # but argparse doesn't support anything but OPTIONAL. mutex_group.add_argument('stream', nargs=OPTIONAL, type=str.split, help='Name of stream to play/record') mutex_group.add_argument('-1', '--one', action='store_const', const=CBC_RADIO_1, dest='url', help='CBC Radio One Eastern') mutex_group.add_argument('-2', '--two', action='store_const', const=CBC_RADIO_2, dest='url', help='CBC Radio Two Eastern') PlaylistItem = namedtuple('PlaylistItem', ['radio', 'province', 'city', 'url']) _COMPLETION_INDEX = {' '.join((radio, region, city)): url for radio, region, city, url in _STREAM_SNAPSHOT} def get_streams(): r = requests.get(_STREAMS) r.raise_for_status() h = html.fromstring(r.content, base_url=r.url) radio_one, radio_two = h.cssselect('table') for row in radio_one.cssselect('tbody td'): raise NotImplementedError() for row in radio_two.cssselect('tbody td'): raise NotImplementedError() class Completer: def __init__(self, streams): self.streams = streams self.previous_prefix = None def complete(self, text, state): if text != self.previous_prefix: self.completions = [stream for stream in self.streams if readline.get_line_buffer().strip() in stream] self.previous_prefix = text try: return self.completions[state] except IndexError: return None def mpv_cmdline(input_url): return ['mpv', '--vo=null', input_url] def ffmpeg_cmdline(input_url, tee): return ['ffmpeg', '-hide_banner', '-nostdin', '-i', f'async:{input_url}', *([] if not tee else ['-f', 'mpegts', '-c', 'copy', f'''./{datetime.now() .replace(microsecond=0) .isoformat()}.m2ts''']), '-f', 'alsa', 'default'] def play(input_url, tee=False): subprocess.check_call(ffmpeg_cmdline(input_url, tee=tee)) def print_streams(streams): print(*sorted(streams), sep='\n') def autocomplete(streams): print_streams(streams) readline.parse_and_bind('tab: complete') try: old_delims = readline.get_completer_delims() readline.set_completer_delims('') try: old_completer = readline.get_completer() readline.set_completer(Completer(streams).complete) return streams[input('Playlist: ')] finally: readline.set_completer(old_completer) finally: readline.set_completer_delims(old_delims) if __name__ == '__main__': from sys import exit args = argument_parser.parse_args() #streams = get_streams() streams = _COMPLETION_INDEX if args.list: print_streams(streams) exit() if args.url is not None: stream_url = args.url elif args.stream is None: try: stream_url = autocomplete(streams) except (KeyboardInterrupt, EOFError): exit(1) else: matches = {stream: url for stream, url in streams.items() if all(map(stream.__contains__, args.stream))} if not matches: exit(f'Not a valid stream: {" ".join(args.stream)}') elif len(matches) > 1: try: stream_url = autocomplete(matches) except (KeyboardInterrupt, EOFError): exit(1) else: stream_url = next(iter(matches.values())) play(stream_url, tee=args.tee)
true
true
f7137edee87a9a20dc66bb2873f91f59098223c8
2,115
py
Python
tests/unit/cloud/__init__.py
springborland/salt
bee85e477d57e9a171884e54fefb9a59d0835ed0
[ "Apache-2.0" ]
1
2020-04-09T03:25:10.000Z
2020-04-09T03:25:10.000Z
tests/unit/cloud/__init__.py
springborland/salt
bee85e477d57e9a171884e54fefb9a59d0835ed0
[ "Apache-2.0" ]
null
null
null
tests/unit/cloud/__init__.py
springborland/salt
bee85e477d57e9a171884e54fefb9a59d0835ed0
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- """ tests.unit.cloud ~~~~~~~~~~~~~~~~ """ from __future__ import absolute_import, print_function, unicode_literals import salt.cloud from tests.support.unit import TestCase class CloudTest(TestCase): def test_vm_config_merger(self): """ Validate the vm's config is generated correctly. https://github.com/saltstack/salt/issues/49226 """ main = { "minion": {"master": "172.31.39.213"}, "log_file": "var/log/salt/cloud.log", "pool_size": 10, } provider = { "private_key": "dwoz.pem", "grains": {"foo1": "bar", "foo2": "bang"}, "availability_zone": "us-west-2b", "driver": "ec2", "ssh_interface": "private_ips", "ssh_username": "admin", "location": "us-west-2", } profile = { "profile": "default", "grains": {"meh2": "bar", "meh1": "foo"}, "provider": "ec2-default:ec2", "ssh_username": "admin", "image": "ami-0a1fbca0e5b419fd1", "size": "t2.micro", } vm = salt.cloud.Cloud.vm_config("test_vm", main, provider, profile, {}) self.assertEqual( { "minion": {"master": "172.31.39.213"}, "log_file": "var/log/salt/cloud.log", "pool_size": 10, "private_key": "dwoz.pem", "grains": { "foo1": "bar", "foo2": "bang", "meh2": "bar", "meh1": "foo", }, "availability_zone": "us-west-2b", "driver": "ec2", "ssh_interface": "private_ips", "ssh_username": "admin", "location": "us-west-2", "profile": "default", "provider": "ec2-default:ec2", "image": "ami-0a1fbca0e5b419fd1", "size": "t2.micro", "name": "test_vm", }, vm, )
31.567164
79
0.441135
from __future__ import absolute_import, print_function, unicode_literals import salt.cloud from tests.support.unit import TestCase class CloudTest(TestCase): def test_vm_config_merger(self): main = { "minion": {"master": "172.31.39.213"}, "log_file": "var/log/salt/cloud.log", "pool_size": 10, } provider = { "private_key": "dwoz.pem", "grains": {"foo1": "bar", "foo2": "bang"}, "availability_zone": "us-west-2b", "driver": "ec2", "ssh_interface": "private_ips", "ssh_username": "admin", "location": "us-west-2", } profile = { "profile": "default", "grains": {"meh2": "bar", "meh1": "foo"}, "provider": "ec2-default:ec2", "ssh_username": "admin", "image": "ami-0a1fbca0e5b419fd1", "size": "t2.micro", } vm = salt.cloud.Cloud.vm_config("test_vm", main, provider, profile, {}) self.assertEqual( { "minion": {"master": "172.31.39.213"}, "log_file": "var/log/salt/cloud.log", "pool_size": 10, "private_key": "dwoz.pem", "grains": { "foo1": "bar", "foo2": "bang", "meh2": "bar", "meh1": "foo", }, "availability_zone": "us-west-2b", "driver": "ec2", "ssh_interface": "private_ips", "ssh_username": "admin", "location": "us-west-2", "profile": "default", "provider": "ec2-default:ec2", "image": "ami-0a1fbca0e5b419fd1", "size": "t2.micro", "name": "test_vm", }, vm, )
true
true
f71380836e3db0f659b23bbc07344408251ee1f8
602
py
Python
src/data_integration/pw_corr.py
msenosain/TMA36_dataanalysis
ba390b40e9ffb2bf8ec39b3bd6e8aa000174c313
[ "MIT" ]
null
null
null
src/data_integration/pw_corr.py
msenosain/TMA36_dataanalysis
ba390b40e9ffb2bf8ec39b3bd6e8aa000174c313
[ "MIT" ]
null
null
null
src/data_integration/pw_corr.py
msenosain/TMA36_dataanalysis
ba390b40e9ffb2bf8ec39b3bd6e8aa000174c313
[ "MIT" ]
null
null
null
import pandas as pd import pingouin as pg def pw_corr(data_path="data/TMA36_project/Radiomics/processed/rad_healthmyne.csv", cde_path="data/TMA36_project/CDE/CDE_TMA36_2020FEB25_SA_MF.csv"): rad_hm = pd.read_csv(data_path, index_col=0) cde = pd.read_csv(cde_path, index_col=1) cde_sila = pd.DataFrame(cde['SILA']) rad_hm_sila = pd.merge(rad_hm, cde_sila, how='left', left_index=True, right_index=True) pairwise = rad_hm_sila.pairwise_corr(method='spearman',padjust='holm', columns=['SILA']) pairwise_sig = pairwise[pairwise['p-corr']<0.05] return pairwise_sig
46.307692
92
0.73588
import pandas as pd import pingouin as pg def pw_corr(data_path="data/TMA36_project/Radiomics/processed/rad_healthmyne.csv", cde_path="data/TMA36_project/CDE/CDE_TMA36_2020FEB25_SA_MF.csv"): rad_hm = pd.read_csv(data_path, index_col=0) cde = pd.read_csv(cde_path, index_col=1) cde_sila = pd.DataFrame(cde['SILA']) rad_hm_sila = pd.merge(rad_hm, cde_sila, how='left', left_index=True, right_index=True) pairwise = rad_hm_sila.pairwise_corr(method='spearman',padjust='holm', columns=['SILA']) pairwise_sig = pairwise[pairwise['p-corr']<0.05] return pairwise_sig
true
true
f713813212c14f8dbe01575043d8136ba327fb4a
10,030
py
Python
utils.py
kyuhyoung/grasping-invisible
2aaaeb9e28995628ec038a79496453be9f26ffff
[ "BSD-2-Clause" ]
29
2020-01-30T00:10:59.000Z
2022-03-26T21:25:25.000Z
utils.py
kyuhyoung/grasping-invisible
2aaaeb9e28995628ec038a79496453be9f26ffff
[ "BSD-2-Clause" ]
13
2020-04-28T09:38:58.000Z
2022-03-12T00:15:46.000Z
utils.py
kyuhyoung/grasping-invisible
2aaaeb9e28995628ec038a79496453be9f26ffff
[ "BSD-2-Clause" ]
9
2020-09-15T10:34:31.000Z
2021-06-16T20:51:04.000Z
import math import numpy as np from skimage.morphology.convex_hull import convex_hull_image from scipy.ndimage.morphology import binary_dilation def check_grasp_margin(target_mask_heightmap, depth_heightmap): margin_mask = binary_dilation(target_mask_heightmap, iterations=10).astype(np.float32)-target_mask_heightmap margin_depth = margin_mask * depth_heightmap margin_depth[np.isnan(margin_depth)] = 0 margin_depth[margin_depth > 0.3] = 0 margin_depth[margin_depth < 0.02] = 0 margin_depth[margin_depth > 0] = 1 margin_value = np.sum(margin_depth) return margin_value/np.sum(margin_mask), margin_value/np.sum(target_mask_heightmap) def check_push_target_oriented(best_pix_ind, push_end_pix_yx, target_mask_heightmap, mask_count_threshold=5): mask_hull = convex_hull_image(target_mask_heightmap) mask_count = 0 x1 = best_pix_ind[2] y1 = best_pix_ind[1] x2 = push_end_pix_yx[1] y2 = push_end_pix_yx[0] x_range = abs(x2-x1) y_range = abs(y2-y1) if x_range > y_range: k = (y2-y1)/(x2-x1) b = y1-k*x1 for x in range(min(int(x1), int(x2)), max(int(x1), int(x2))+1): y = int(k*x+b) try: mask_count += mask_hull[y, x] except IndexError: pass else: k = (x2-x1)/(y2-y1) b = x1-k*y1 for y in range(min(int(y1), int(y2)), max(int(y1), int(y2))+1): x = int(k*y+b) try: mask_count += mask_hull[y, x] except IndexError: pass if mask_count > mask_count_threshold: return True else: return False def check_grasp_target_oriented(best_pix_ind, target_mask_heightmap): mask_hull = convex_hull_image(target_mask_heightmap) if mask_hull[int(best_pix_ind[1]), int(best_pix_ind[2])]: return True else: return False def get_push_pix(push_maps, num_rotations): push_pix_ind = np.unravel_index(np.argmax(push_maps), push_maps.shape) push_end_pix_yx = get_push_end_pix_yx(push_pix_ind, num_rotations) return push_pix_ind, push_end_pix_yx def get_push_end_pix_yx(push_pix_ind, num_rotations): push_orientation = [1.0, 0.0] push_length_pix = 0.1/0.002 rotation_angle = np.deg2rad(push_pix_ind[0]*(360.0/num_rotations)) push_direction = np.asarray([push_orientation[0] * np.cos(rotation_angle) - push_orientation[1] * np.sin(rotation_angle), push_orientation[0] * np.sin(rotation_angle) + push_orientation[1] * np.cos(rotation_angle)]) return [push_pix_ind[1] + push_direction[1] * push_length_pix, push_pix_ind[2] + push_direction[0] * push_length_pix] def check_env_depth_change(prev_depth_heightmap, depth_heightmap, change_threshold=300): depth_diff = abs(prev_depth_heightmap-depth_heightmap) depth_diff[np.isnan(depth_diff)] = 0 depth_diff[depth_diff > 0.3] = 0 depth_diff[depth_diff < 0.02] = 0 depth_diff[depth_diff > 0] = 1 change_value = np.sum(depth_diff) change_detected = change_value > change_threshold return change_detected, change_value def check_target_depth_change(prev_depth_heightmap, prev_target_mask_heightmap, depth_heightmap, change_threshold=50): prev_mask_hull = binary_dilation(convex_hull_image(prev_target_mask_heightmap), iterations=5) depth_diff = prev_mask_hull*(prev_depth_heightmap-depth_heightmap) depth_diff[np.isnan(depth_diff)] = 0 depth_diff[depth_diff > 0.3] = 0 depth_diff[depth_diff < 0.02] = 0 depth_diff[depth_diff > 0] = 1 change_value = np.sum(depth_diff) change_detected = change_value > change_threshold return change_detected, change_value def process_mask_heightmaps(segment_results, seg_mask_heightmaps): names = [] heightmaps = [] for i in range(len(segment_results['labels'])): name = segment_results['labels'][i] heightmap = seg_mask_heightmaps[:, :, i] if np.sum(heightmap) > 10: names.append(name) heightmaps.append(heightmap) return {'names': names, 'heightmaps': heightmaps} def get_replay_id(predicted_value_log, label_value_log, reward_value_log, sample_ind, replay_type): # Prioritized experience replay, find sample with highest surprise value sample_ind = np.asarray(sample_ind) predicted_values = np.asarray(predicted_value_log)[sample_ind] label_values = np.asarray(label_value_log)[sample_ind] reward_values = np.asarray(reward_value_log)[sample_ind] if replay_type == 'augment': # assume predicted_value for different mask input are close label_values = label_values - reward_values + 1.0 sample_surprise_values = np.abs(predicted_values - label_values) sorted_surprise_ind = np.argsort(sample_surprise_values[:, 0]) sorted_sample_ind = sample_ind[sorted_surprise_ind] pow_law_exp = 2 rand_sample_ind = int(np.round(np.random.power(pow_law_exp, 1) * (sample_ind.size - 1))) sample_iteration = sorted_sample_ind[rand_sample_ind] print(replay_type.capitalize(), 'replay: iteration %d (surprise value: %f)' % (sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]])) return sample_iteration def get_pointcloud(color_img, depth_img, masks_imgs, camera_intrinsics): # Get depth image size im_h = depth_img.shape[0] im_w = depth_img.shape[1] # Project depth into 3D point cloud in camera coordinates pix_x, pix_y = np.meshgrid(np.linspace(0, im_w-1, im_w), np.linspace(0, im_h-1, im_h)) cam_pts_x = np.multiply(pix_x-camera_intrinsics[0][2],depth_img/camera_intrinsics[0][0]) cam_pts_y = np.multiply(pix_y-camera_intrinsics[1][2],depth_img/camera_intrinsics[1][1]) cam_pts_z = depth_img.copy() cam_pts_x.shape = (im_h*im_w, 1) cam_pts_y.shape = (im_h*im_w, 1) cam_pts_z.shape = (im_h*im_w, 1) # Reshape image into colors for 3D point cloud rgb_pts_r = color_img[:, :, 0] rgb_pts_g = color_img[:, :, 1] rgb_pts_b = color_img[:, :, 2] rgb_pts_r.shape = (im_h*im_w, 1) rgb_pts_g.shape = (im_h*im_w, 1) rgb_pts_b.shape = (im_h*im_w, 1) num_masks = masks_imgs.shape[2] masks_pts = masks_imgs.copy() masks_pts = masks_pts.transpose(2, 0, 1).reshape(num_masks, -1) cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) rgb_pts = np.concatenate((rgb_pts_r, rgb_pts_g, rgb_pts_b), axis=1) return cam_pts, rgb_pts, masks_pts def get_heightmap(color_img, depth_img, masks_imgs, cam_intrinsics, cam_pose, workspace_limits, heightmap_resolution): num_masks = masks_imgs.shape[2] # Compute heightmap size heightmap_size = np.round(((workspace_limits[1][1] - workspace_limits[1][0])/heightmap_resolution, (workspace_limits[0][1] - workspace_limits[0][0])/heightmap_resolution)).astype(int) # Get 3D point cloud from RGB-D images surface_pts, color_pts, masks_pts = get_pointcloud(color_img, depth_img, masks_imgs, cam_intrinsics) # Transform 3D point cloud from camera coordinates to robot coordinates surface_pts = np.transpose(np.dot(cam_pose[0:3,0:3],np.transpose(surface_pts)) + np.tile(cam_pose[0:3,3:],(1,surface_pts.shape[0]))) # Sort surface points by z value sort_z_ind = np.argsort(surface_pts[:,2]) surface_pts = surface_pts[sort_z_ind] color_pts = color_pts[sort_z_ind] masks_pts = masks_pts[:, sort_z_ind] # Filter out surface points outside heightmap boundaries heightmap_valid_ind = np.logical_and(np.logical_and(np.logical_and(np.logical_and(surface_pts[:,0] >= workspace_limits[0][0], surface_pts[:,0] < workspace_limits[0][1]), surface_pts[:,1] >= workspace_limits[1][0]), surface_pts[:,1] < workspace_limits[1][1]), surface_pts[:,2] < workspace_limits[2][1]) surface_pts = surface_pts[heightmap_valid_ind] color_pts = color_pts[heightmap_valid_ind] masks_pts = masks_pts[:, heightmap_valid_ind] # Create orthographic top-down-view RGB-D heightmaps color_heightmap_r = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8) color_heightmap_g = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8) color_heightmap_b = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8) masks_heightmaps = np.zeros((heightmap_size[0], heightmap_size[1], num_masks), dtype=np.uint8) depth_heightmap = np.zeros(heightmap_size) heightmap_pix_x = np.floor((surface_pts[:,0] - workspace_limits[0][0])/heightmap_resolution).astype(int) heightmap_pix_y = np.floor((surface_pts[:,1] - workspace_limits[1][0])/heightmap_resolution).astype(int) color_heightmap_r[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [0]] color_heightmap_g[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [1]] color_heightmap_b[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [2]] color_heightmap = np.concatenate((color_heightmap_r, color_heightmap_g, color_heightmap_b), axis=2) for c in range(num_masks): masks_heightmaps[heightmap_pix_y, heightmap_pix_x, c] = masks_pts[c, :] depth_heightmap[heightmap_pix_y, heightmap_pix_x] = surface_pts[:, 2] z_bottom = workspace_limits[2][0] depth_heightmap = depth_heightmap - z_bottom depth_heightmap[depth_heightmap < 0] = 0 depth_heightmap[depth_heightmap == -z_bottom] = np.nan return color_heightmap, depth_heightmap, masks_heightmaps # Get rotation matrix from euler angles def euler2rotm(theta): R_x = np.array([[1, 0, 0], [0, math.cos(theta[0]), -math.sin(theta[0])], [0, math.sin(theta[0]), math.cos(theta[0])]]) R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])], [0, 1, 0], [-math.sin(theta[1]), 0, math.cos(theta[1])]]) R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0], [math.sin(theta[2]), math.cos(theta[2]), 0], [0, 0, 1]]) R = np.dot(R_z, np.dot(R_y, R_x)) return R
44.185022
305
0.701496
import math import numpy as np from skimage.morphology.convex_hull import convex_hull_image from scipy.ndimage.morphology import binary_dilation def check_grasp_margin(target_mask_heightmap, depth_heightmap): margin_mask = binary_dilation(target_mask_heightmap, iterations=10).astype(np.float32)-target_mask_heightmap margin_depth = margin_mask * depth_heightmap margin_depth[np.isnan(margin_depth)] = 0 margin_depth[margin_depth > 0.3] = 0 margin_depth[margin_depth < 0.02] = 0 margin_depth[margin_depth > 0] = 1 margin_value = np.sum(margin_depth) return margin_value/np.sum(margin_mask), margin_value/np.sum(target_mask_heightmap) def check_push_target_oriented(best_pix_ind, push_end_pix_yx, target_mask_heightmap, mask_count_threshold=5): mask_hull = convex_hull_image(target_mask_heightmap) mask_count = 0 x1 = best_pix_ind[2] y1 = best_pix_ind[1] x2 = push_end_pix_yx[1] y2 = push_end_pix_yx[0] x_range = abs(x2-x1) y_range = abs(y2-y1) if x_range > y_range: k = (y2-y1)/(x2-x1) b = y1-k*x1 for x in range(min(int(x1), int(x2)), max(int(x1), int(x2))+1): y = int(k*x+b) try: mask_count += mask_hull[y, x] except IndexError: pass else: k = (x2-x1)/(y2-y1) b = x1-k*y1 for y in range(min(int(y1), int(y2)), max(int(y1), int(y2))+1): x = int(k*y+b) try: mask_count += mask_hull[y, x] except IndexError: pass if mask_count > mask_count_threshold: return True else: return False def check_grasp_target_oriented(best_pix_ind, target_mask_heightmap): mask_hull = convex_hull_image(target_mask_heightmap) if mask_hull[int(best_pix_ind[1]), int(best_pix_ind[2])]: return True else: return False def get_push_pix(push_maps, num_rotations): push_pix_ind = np.unravel_index(np.argmax(push_maps), push_maps.shape) push_end_pix_yx = get_push_end_pix_yx(push_pix_ind, num_rotations) return push_pix_ind, push_end_pix_yx def get_push_end_pix_yx(push_pix_ind, num_rotations): push_orientation = [1.0, 0.0] push_length_pix = 0.1/0.002 rotation_angle = np.deg2rad(push_pix_ind[0]*(360.0/num_rotations)) push_direction = np.asarray([push_orientation[0] * np.cos(rotation_angle) - push_orientation[1] * np.sin(rotation_angle), push_orientation[0] * np.sin(rotation_angle) + push_orientation[1] * np.cos(rotation_angle)]) return [push_pix_ind[1] + push_direction[1] * push_length_pix, push_pix_ind[2] + push_direction[0] * push_length_pix] def check_env_depth_change(prev_depth_heightmap, depth_heightmap, change_threshold=300): depth_diff = abs(prev_depth_heightmap-depth_heightmap) depth_diff[np.isnan(depth_diff)] = 0 depth_diff[depth_diff > 0.3] = 0 depth_diff[depth_diff < 0.02] = 0 depth_diff[depth_diff > 0] = 1 change_value = np.sum(depth_diff) change_detected = change_value > change_threshold return change_detected, change_value def check_target_depth_change(prev_depth_heightmap, prev_target_mask_heightmap, depth_heightmap, change_threshold=50): prev_mask_hull = binary_dilation(convex_hull_image(prev_target_mask_heightmap), iterations=5) depth_diff = prev_mask_hull*(prev_depth_heightmap-depth_heightmap) depth_diff[np.isnan(depth_diff)] = 0 depth_diff[depth_diff > 0.3] = 0 depth_diff[depth_diff < 0.02] = 0 depth_diff[depth_diff > 0] = 1 change_value = np.sum(depth_diff) change_detected = change_value > change_threshold return change_detected, change_value def process_mask_heightmaps(segment_results, seg_mask_heightmaps): names = [] heightmaps = [] for i in range(len(segment_results['labels'])): name = segment_results['labels'][i] heightmap = seg_mask_heightmaps[:, :, i] if np.sum(heightmap) > 10: names.append(name) heightmaps.append(heightmap) return {'names': names, 'heightmaps': heightmaps} def get_replay_id(predicted_value_log, label_value_log, reward_value_log, sample_ind, replay_type): sample_ind = np.asarray(sample_ind) predicted_values = np.asarray(predicted_value_log)[sample_ind] label_values = np.asarray(label_value_log)[sample_ind] reward_values = np.asarray(reward_value_log)[sample_ind] if replay_type == 'augment': label_values = label_values - reward_values + 1.0 sample_surprise_values = np.abs(predicted_values - label_values) sorted_surprise_ind = np.argsort(sample_surprise_values[:, 0]) sorted_sample_ind = sample_ind[sorted_surprise_ind] pow_law_exp = 2 rand_sample_ind = int(np.round(np.random.power(pow_law_exp, 1) * (sample_ind.size - 1))) sample_iteration = sorted_sample_ind[rand_sample_ind] print(replay_type.capitalize(), 'replay: iteration %d (surprise value: %f)' % (sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]])) return sample_iteration def get_pointcloud(color_img, depth_img, masks_imgs, camera_intrinsics): im_h = depth_img.shape[0] im_w = depth_img.shape[1] pix_x, pix_y = np.meshgrid(np.linspace(0, im_w-1, im_w), np.linspace(0, im_h-1, im_h)) cam_pts_x = np.multiply(pix_x-camera_intrinsics[0][2],depth_img/camera_intrinsics[0][0]) cam_pts_y = np.multiply(pix_y-camera_intrinsics[1][2],depth_img/camera_intrinsics[1][1]) cam_pts_z = depth_img.copy() cam_pts_x.shape = (im_h*im_w, 1) cam_pts_y.shape = (im_h*im_w, 1) cam_pts_z.shape = (im_h*im_w, 1) rgb_pts_r = color_img[:, :, 0] rgb_pts_g = color_img[:, :, 1] rgb_pts_b = color_img[:, :, 2] rgb_pts_r.shape = (im_h*im_w, 1) rgb_pts_g.shape = (im_h*im_w, 1) rgb_pts_b.shape = (im_h*im_w, 1) num_masks = masks_imgs.shape[2] masks_pts = masks_imgs.copy() masks_pts = masks_pts.transpose(2, 0, 1).reshape(num_masks, -1) cam_pts = np.concatenate((cam_pts_x, cam_pts_y, cam_pts_z), axis=1) rgb_pts = np.concatenate((rgb_pts_r, rgb_pts_g, rgb_pts_b), axis=1) return cam_pts, rgb_pts, masks_pts def get_heightmap(color_img, depth_img, masks_imgs, cam_intrinsics, cam_pose, workspace_limits, heightmap_resolution): num_masks = masks_imgs.shape[2] heightmap_size = np.round(((workspace_limits[1][1] - workspace_limits[1][0])/heightmap_resolution, (workspace_limits[0][1] - workspace_limits[0][0])/heightmap_resolution)).astype(int) surface_pts, color_pts, masks_pts = get_pointcloud(color_img, depth_img, masks_imgs, cam_intrinsics) surface_pts = np.transpose(np.dot(cam_pose[0:3,0:3],np.transpose(surface_pts)) + np.tile(cam_pose[0:3,3:],(1,surface_pts.shape[0]))) sort_z_ind = np.argsort(surface_pts[:,2]) surface_pts = surface_pts[sort_z_ind] color_pts = color_pts[sort_z_ind] masks_pts = masks_pts[:, sort_z_ind] heightmap_valid_ind = np.logical_and(np.logical_and(np.logical_and(np.logical_and(surface_pts[:,0] >= workspace_limits[0][0], surface_pts[:,0] < workspace_limits[0][1]), surface_pts[:,1] >= workspace_limits[1][0]), surface_pts[:,1] < workspace_limits[1][1]), surface_pts[:,2] < workspace_limits[2][1]) surface_pts = surface_pts[heightmap_valid_ind] color_pts = color_pts[heightmap_valid_ind] masks_pts = masks_pts[:, heightmap_valid_ind] color_heightmap_r = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8) color_heightmap_g = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8) color_heightmap_b = np.zeros((heightmap_size[0], heightmap_size[1], 1), dtype=np.uint8) masks_heightmaps = np.zeros((heightmap_size[0], heightmap_size[1], num_masks), dtype=np.uint8) depth_heightmap = np.zeros(heightmap_size) heightmap_pix_x = np.floor((surface_pts[:,0] - workspace_limits[0][0])/heightmap_resolution).astype(int) heightmap_pix_y = np.floor((surface_pts[:,1] - workspace_limits[1][0])/heightmap_resolution).astype(int) color_heightmap_r[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [0]] color_heightmap_g[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [1]] color_heightmap_b[heightmap_pix_y,heightmap_pix_x] = color_pts[:, [2]] color_heightmap = np.concatenate((color_heightmap_r, color_heightmap_g, color_heightmap_b), axis=2) for c in range(num_masks): masks_heightmaps[heightmap_pix_y, heightmap_pix_x, c] = masks_pts[c, :] depth_heightmap[heightmap_pix_y, heightmap_pix_x] = surface_pts[:, 2] z_bottom = workspace_limits[2][0] depth_heightmap = depth_heightmap - z_bottom depth_heightmap[depth_heightmap < 0] = 0 depth_heightmap[depth_heightmap == -z_bottom] = np.nan return color_heightmap, depth_heightmap, masks_heightmaps def euler2rotm(theta): R_x = np.array([[1, 0, 0], [0, math.cos(theta[0]), -math.sin(theta[0])], [0, math.sin(theta[0]), math.cos(theta[0])]]) R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])], [0, 1, 0], [-math.sin(theta[1]), 0, math.cos(theta[1])]]) R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0], [math.sin(theta[2]), math.cos(theta[2]), 0], [0, 0, 1]]) R = np.dot(R_z, np.dot(R_y, R_x)) return R
true
true
f71381c785c544888dee52871e6f87b6e325d276
66
py
Python
desafio2.py
rafarbop/Python
e304993a6c73eacd8fffd7c67286206516e5faea
[ "MIT" ]
null
null
null
desafio2.py
rafarbop/Python
e304993a6c73eacd8fffd7c67286206516e5faea
[ "MIT" ]
null
null
null
desafio2.py
rafarbop/Python
e304993a6c73eacd8fffd7c67286206516e5faea
[ "MIT" ]
null
null
null
nome = input('Digite seu nome: ') print(f'Seja bem vindo {nome}!')
33
33
0.666667
nome = input('Digite seu nome: ') print(f'Seja bem vindo {nome}!')
true
true
f713820a7f986fb4725f995796e3a290d8e8ef14
4,604
py
Python
objectModel/Python/cdm/utilities/logging/telemetry_config.py
rt112000/CDM
34bd34f9260140a8f8aa02bd87c23033f3daad4c
[ "CC-BY-4.0", "MIT" ]
884
2019-05-10T02:09:10.000Z
2022-03-31T14:02:00.000Z
objectModel/Python/cdm/utilities/logging/telemetry_config.py
rt112000/CDM
34bd34f9260140a8f8aa02bd87c23033f3daad4c
[ "CC-BY-4.0", "MIT" ]
171
2019-06-10T11:34:37.000Z
2022-03-31T22:50:12.000Z
objectModel/Python/cdm/utilities/logging/telemetry_config.py
rt112000/CDM
34bd34f9260140a8f8aa02bd87c23033f3daad4c
[ "CC-BY-4.0", "MIT" ]
340
2019-05-07T18:00:16.000Z
2022-03-31T12:00:15.000Z
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. import msal from typing import Optional, TYPE_CHECKING from cdm.enums import EnvironmentType, AzureCloudEndpoint if TYPE_CHECKING: from cdm.utilities.network.token_provider import TokenProvider class TelemetryConfig: """ Configuration information to establish a connection with the database for telemetry collection. """ # Default Kusto database log table names CDM_INFOLOG_TABLE = 'infoLogs' CDM_WARNINGLOG_TABLE = 'warningLogs' CDM_ERRORLOG_TABLE = 'errorLogs' def __init__(self, ingest_at_level: 'EnvironmentType', region: Optional[str] = None, **kwargs) -> None: self.ingest_at_level = ingest_at_level # type: EnvironmentType self.region = region # type: Optional[str] self.tenant_id = kwargs.get('tenant_id', None) # type: Optional[str] self.client_id = kwargs.get('client_id', None) # type: Optional[str] self.secret = kwargs.get('secret', None) # type: Optional[str] self.remove_user_content = kwargs.get('remove_user_content', True) # type: Optional[bool] self.kusto_cluster_name = kwargs.get('cluster_name', None) # type: Optional[str] self.kusto_database_name = kwargs.get('database_name', None) # type: Optional[str] self.kusto_info_log_table = kwargs.get('info_table', self.CDM_INFOLOG_TABLE) # type: str self.kusto_warning_log_table = kwargs.get('warning_table', self.CDM_WARNINGLOG_TABLE) # type: str self.kusto_error_log_table = kwargs.get('error_table', self.CDM_ERRORLOG_TABLE) # type: str self.cloud_instance = kwargs.get('cloud_instance', AzureCloudEndpoint.AZURE_PUBLIC) # type: AzureCloudEndpoint self.token_provider = kwargs.get('token_provider', None) # type: Optional[TokenProvider] # --- internal --- self._context = None def _get_authentication_token(self) -> Optional[str]: """ Get the authentication token either using AAD App credentials or user-defined token provider. """ # User-defined token provider if self.token_provider: return self.token_provider.get_token() # Get token by supplying AAD App credentials elif self.tenant_id and self.client_id and self.secret \ and self.kusto_cluster_name and self.kusto_database_name \ and self.kusto_info_log_table and self.kusto_warning_log_table and self.kusto_error_log_table: result = self._generate_kusto_token() return result['token_type'] + ' ' + result['access_token'] # Throw an exception if neither method is configured else: raise Exception('Failed to get authentication token: No method configured to provide a token.') def _build_context(self): """ Build context when users make the first call. Need to ensure client Id, tenant and secret are not null. """ if self._context is None: self._context = msal.ConfidentialClientApplication( self.client_id, authority=self.cloud_instance.value + self.tenant_id, client_credential=self.secret) def _generate_kusto_token(self) -> Optional[dict]: """ Generate a Bearer token for accessing the Kusto resource using MSAL. """ # Define the resource scope to be the current Kusto cluster scope = ['https://{0}.kusto.windows.net/.default'.format(self.kusto_cluster_name)] # Authenticate with AAD App credentials self._build_context() # Acquire token using MSAL result = self._context.acquire_token_for_client(scopes=scope) if result and 'error' in result: error_description = result['error'] if 'error_description' in result: error_description += ' error_description: ' + result['error_description'] raise Exception('There was an error while acquiring Kusto authorization Token with client ID/secret authentication. ' 'Exception: ' + error_description) if result is None or 'access_token' not in result or 'token_type' not in result: raise Exception('Received invalid Kusto authentication result. ' 'The result may be None, or missing access_toke and/or token_type authorization header from the authentication result.') return result
45.137255
148
0.674848
import msal from typing import Optional, TYPE_CHECKING from cdm.enums import EnvironmentType, AzureCloudEndpoint if TYPE_CHECKING: from cdm.utilities.network.token_provider import TokenProvider class TelemetryConfig: CDM_INFOLOG_TABLE = 'infoLogs' CDM_WARNINGLOG_TABLE = 'warningLogs' CDM_ERRORLOG_TABLE = 'errorLogs' def __init__(self, ingest_at_level: 'EnvironmentType', region: Optional[str] = None, **kwargs) -> None: self.ingest_at_level = ingest_at_level self.region = region self.tenant_id = kwargs.get('tenant_id', None) self.client_id = kwargs.get('client_id', None) self.secret = kwargs.get('secret', None) self.remove_user_content = kwargs.get('remove_user_content', True) self.kusto_cluster_name = kwargs.get('cluster_name', None) self.kusto_database_name = kwargs.get('database_name', None) self.kusto_info_log_table = kwargs.get('info_table', self.CDM_INFOLOG_TABLE) self.kusto_warning_log_table = kwargs.get('warning_table', self.CDM_WARNINGLOG_TABLE) self.kusto_error_log_table = kwargs.get('error_table', self.CDM_ERRORLOG_TABLE) self.cloud_instance = kwargs.get('cloud_instance', AzureCloudEndpoint.AZURE_PUBLIC) self.token_provider = kwargs.get('token_provider', None) self._context = None def _get_authentication_token(self) -> Optional[str]: if self.token_provider: return self.token_provider.get_token() elif self.tenant_id and self.client_id and self.secret \ and self.kusto_cluster_name and self.kusto_database_name \ and self.kusto_info_log_table and self.kusto_warning_log_table and self.kusto_error_log_table: result = self._generate_kusto_token() return result['token_type'] + ' ' + result['access_token'] else: raise Exception('Failed to get authentication token: No method configured to provide a token.') def _build_context(self): if self._context is None: self._context = msal.ConfidentialClientApplication( self.client_id, authority=self.cloud_instance.value + self.tenant_id, client_credential=self.secret) def _generate_kusto_token(self) -> Optional[dict]: scope = ['https://{0}.kusto.windows.net/.default'.format(self.kusto_cluster_name)] self._build_context() result = self._context.acquire_token_for_client(scopes=scope) if result and 'error' in result: error_description = result['error'] if 'error_description' in result: error_description += ' error_description: ' + result['error_description'] raise Exception('There was an error while acquiring Kusto authorization Token with client ID/secret authentication. ' 'Exception: ' + error_description) if result is None or 'access_token' not in result or 'token_type' not in result: raise Exception('Received invalid Kusto authentication result. ' 'The result may be None, or missing access_toke and/or token_type authorization header from the authentication result.') return result
true
true
f7138493a6f92409c9f9cda230a6d7d9c80791c4
10,096
py
Python
tests/test_general_SysmetricLogger.py
aimakerspace/synergos_logger
f59ab068082fab6e1520d9ae7ddde2919beee4f6
[ "Apache-2.0" ]
null
null
null
tests/test_general_SysmetricLogger.py
aimakerspace/synergos_logger
f59ab068082fab6e1520d9ae7ddde2919beee4f6
[ "Apache-2.0" ]
null
null
null
tests/test_general_SysmetricLogger.py
aimakerspace/synergos_logger
f59ab068082fab6e1520d9ae7ddde2919beee4f6
[ "Apache-2.0" ]
1
2022-01-21T00:57:43.000Z
2022-01-21T00:57:43.000Z
#!/usr/bin/env python #################### # Required Modules # #################### # Generic/Built-in import os import logging import time from typing import Callable # Libs import pytest import structlog # Custom from conftest import ( SYSMETRIC_SUPPORTED_METADATA, SYSMETRIC_TRACKERS, DURATION, POLL_INTERVAL, extract_name, reconfigure_global_structlog_params ) from synlogger.config import SYSMETRICS_PREFIX, SYSMETRICS_PORT from synlogger.utils import StructlogUtils ################## # Configurations # ################## file_path = os.path.abspath(__file__) class_name = "SysmetricLoggerTest" function_name = "test_SysmetricLogger_initialise" ########################### # Tests - SysmetricLogger # ########################### def test_SysmetricLogger_default_attibutes(sysmetric_logger_default_params): """ Tests for the correct initialisation defaults for the TTPLogger class # C1: logger_name defaults to "TTP_XXX" # C2: logging_variant defaults to "basic" # C3: server does not need to be specified by default # C4: port is assigned TTP_PORT by default # C5: logging_level defaults to logging.INFO # C6: debugging_fields defaults to False # C7: filter_functions defaults to an empty list # C8: censor_keys defaults to an empty list (i.e. no information censored) """ # C1 assert SYSMETRICS_PREFIX in sysmetric_logger_default_params.logger_name # C2 assert sysmetric_logger_default_params.logging_variant == "basic" # C3 assert sysmetric_logger_default_params.server is None # C4 assert sysmetric_logger_default_params.port == SYSMETRICS_PORT # C5 assert sysmetric_logger_default_params.logging_level == logging.INFO # C6 assert sysmetric_logger_default_params.debugging_fields == False # C7 assert len(sysmetric_logger_default_params.filter_functions) == 0 # C8 assert len(sysmetric_logger_default_params.censor_keys) == 0 def test_SysmetricLogger_configure_processors(sysmetric_logger_default_params): """ Tests if sysmetric processors loaded are valid # C1: All processors returned are functions # C2: logging_renderer must be the last processor of the list # C3: Sysmetric processors must be included alongside default processors """ # C1 processors = sysmetric_logger_default_params._configure_processors() assert all(isinstance(_funct, Callable) for _funct in processors) # C2 last_processor = processors[-1] assert any( extract_name(last_processor) == extract_name(_funct) for _funct in [ StructlogUtils.graypy_structlog_processor, structlog.processors.JSONRenderer(indent=1) ] ) # C3 processors_names = [extract_name(processor) for processor in processors] assert all( extract_name(sys_tracker) in processors_names for sys_tracker in SYSMETRIC_TRACKERS ) def test_SysmetricLogger_is_tracking(sysmetric_logger): """ Tests if tracking state is toggling correctly. Note that while the state tested here is dependent on .track() & .terminate(), we are only testing for the change of state. The correctness of .track() & .terminate() is not enforced and is assumed to work here. # C1: is_tracking returns False before tracking is started # C2: is_tracking returns True after tracking is started # C3: is_tracking returns False after tracking has been terminated """ # C1 assert sysmetric_logger.is_tracking() == False # C2 sysmetric_logger.track( file_path=file_path, class_name=class_name, function_name=function_name, resolution=POLL_INTERVAL ) assert sysmetric_logger.is_tracking() == True # C3 sysmetric_logger.terminate() assert sysmetric_logger.is_tracking() == False def test_SysmetricLogger_track(sysmetric_logger): """ Tests if sysmetric process tracking starts & polls correctly # C1: Before tracking is initialised, sysmetric_logger.tracker is None # C2: After tracking is initialised, sysmetric_logger.tracker is not None # C3: After tracking is initialised, tracking process is actively running # C4: No. of trials recorded tallies with expected no. of records given a predetermined polling interval over a specified duration # C5: Each record detected has the appropriate metadata logged # C6: Each sysmetric metadata logged has valid values """ # C1 assert sysmetric_logger.tracker is None # Start tracking process to check for state changes sysmetric_logger.track( file_path=file_path, class_name=class_name, function_name=function_name, resolution=POLL_INTERVAL ) # C2 assert sysmetric_logger.tracker is not None # C3 assert sysmetric_logger.tracker.is_alive() with reconfigure_global_structlog_params(sysmetric_logger) as cap_logs: sysmetric_logger.synlog.setLevel(logging.INFO) # V.IMPT!!! ########################### # Implementation Footnote # ########################### # [Cause] # Structlog's log capture mechanism does not allow for log capturing # from multiprocessed loggers with custom processors (i.e. non-global). # [Problems] # Sysmetric tracking is performed by running a backgrounded process # polling for logs once every specified interval. Being a struclog # logger, it suffers from the aforementioned limitations DURING TESTING. # This results in the failure to capture logs for analysis/testing. # [Solution] # Manually simulate probing behaviour in the global context, using # custom processors that are same as the ones running in the # backgrounded logger. trial_count = int(DURATION/POLL_INTERVAL) for _ in range(trial_count): sysmetric_logger._probe( resolution=POLL_INTERVAL, descriptors={ "ID_path": file_path, "ID_class": class_name, "ID_function": function_name } ) # C4 assert len(cap_logs) == trial_count # C5 assert all( set(SYSMETRIC_SUPPORTED_METADATA).issubset(list(record.keys())) for record in cap_logs ) for record in cap_logs: # C6 assert record.get('logger') == sysmetric_logger.logger_name assert record.get('file_path') == sysmetric_logger.file_path level_name = logging.getLevelName(sysmetric_logger.logging_level) assert record.get('level') == level_name.lower() assert record.get('log_level') == level_name.lower() assert record.get('level_number') == sysmetric_logger.logging_level assert isinstance(record.get('timestamp'), str) assert isinstance(record.get('ID_path'), str) assert isinstance(record.get('ID_class'), str) assert isinstance(record.get('ID_function'), str) assert isinstance(record.get('cpu_percent'), float) assert isinstance(record.get('memory_total'), int) assert isinstance(record.get('memory_available'), int) assert isinstance(record.get('memory_used'), int) assert isinstance(record.get('memory_free'), int) assert isinstance(record.get('disk_read_counter'), int) assert isinstance(record.get('disk_write_counter'), int) assert isinstance(record.get('disk_read_bytes'), int) assert isinstance(record.get('disk_write_bytes'), int) assert isinstance(record.get('net_bytes_sent'), int) assert isinstance(record.get('net_bytes_recv'), int) assert isinstance(record.get('net_packets_sent'), int) assert isinstance(record.get('net_packets_recv'), int) # Manually clean up process (no dependency on .terminate()) sysmetric_logger.tracker.terminate() # send SIGTERM signal to the child sysmetric_logger.tracker.join() exit_code = sysmetric_logger.tracker.exitcode sysmetric_logger.tracker.close() sysmetric_logger.tracker = None # Reset state of tracker # assert exit_code == 0 # successful termination def test_SysmetricLogger_terminate(sysmetric_logger): """ Tests if sysmetric process tracking terminates correctly # C1: Before tracking is terminated, sysmetric_logger.tracker is not None # C2: Before tracking is terminated, tracking process is actively running # C3: After tracking is terminated, sysmetric_logger.tracker is None # C4: After tracking is terminated, saved tracker is no longer running # C5: Tracking was terminated gracefully """ sysmetric_logger.track( file_path=file_path, class_name=class_name, function_name=function_name, resolution=POLL_INTERVAL ) time.sleep(DURATION) # C1 assert sysmetric_logger.tracker is not None # C2 assert sysmetric_logger.tracker.is_alive() saved_tracker = sysmetric_logger.tracker exit_code = sysmetric_logger.terminate() # C3 assert sysmetric_logger.tracker is None # C4 assert saved_tracker._closed # C5 assert exit_code == 0 @pytest.mark.xfail(raises=RuntimeError) def test_SysmetricLogger_premature_termination(sysmetric_logger): """ Tests if premature termination condition was caught and handled # C1: Check that 'RuntimeError(f"Attempted to terminate logger XXX before initialisation!")' is caught, due to Exception being raised when checking for initialisation state in sysmetric_logger """ sysmetric_logger.terminate()
36.712727
87
0.666204
tils unct in processors) last_processor = processors[-1] assert any( extract_name(last_processor) == extract_name(_funct) for _funct in [ StructlogUtils.graypy_structlog_processor, structlog.processors.JSONRenderer(indent=1) ] ) processors_names = [extract_name(processor) for processor in processors] assert all( extract_name(sys_tracker) in processors_names for sys_tracker in SYSMETRIC_TRACKERS ) def test_SysmetricLogger_is_tracking(sysmetric_logger): assert sysmetric_logger.is_tracking() == False sysmetric_logger.track( file_path=file_path, class_name=class_name, function_name=function_name, resolution=POLL_INTERVAL ) assert sysmetric_logger.is_tracking() == True sysmetric_logger.terminate() assert sysmetric_logger.is_tracking() == False def test_SysmetricLogger_track(sysmetric_logger): assert sysmetric_logger.tracker is None sysmetric_logger.track( file_path=file_path, class_name=class_name, function_name=function_name, resolution=POLL_INTERVAL ) assert sysmetric_logger.tracker is not None assert sysmetric_logger.tracker.is_alive() with reconfigure_global_structlog_params(sysmetric_logger) as cap_logs: sysmetric_logger.synlog.setLevel(logging.INFO) for _ in range(trial_count): sysmetric_logger._probe( resolution=POLL_INTERVAL, descriptors={ "ID_path": file_path, "ID_class": class_name, "ID_function": function_name } ) # C4 assert len(cap_logs) == trial_count # C5 assert all( set(SYSMETRIC_SUPPORTED_METADATA).issubset(list(record.keys())) for record in cap_logs ) for record in cap_logs: # C6 assert record.get('logger') == sysmetric_logger.logger_name assert record.get('file_path') == sysmetric_logger.file_path level_name = logging.getLevelName(sysmetric_logger.logging_level) assert record.get('level') == level_name.lower() assert record.get('log_level') == level_name.lower() assert record.get('level_number') == sysmetric_logger.logging_level assert isinstance(record.get('timestamp'), str) assert isinstance(record.get('ID_path'), str) assert isinstance(record.get('ID_class'), str) assert isinstance(record.get('ID_function'), str) assert isinstance(record.get('cpu_percent'), float) assert isinstance(record.get('memory_total'), int) assert isinstance(record.get('memory_available'), int) assert isinstance(record.get('memory_used'), int) assert isinstance(record.get('memory_free'), int) assert isinstance(record.get('disk_read_counter'), int) assert isinstance(record.get('disk_write_counter'), int) assert isinstance(record.get('disk_read_bytes'), int) assert isinstance(record.get('disk_write_bytes'), int) assert isinstance(record.get('net_bytes_sent'), int) assert isinstance(record.get('net_bytes_recv'), int) assert isinstance(record.get('net_packets_sent'), int) assert isinstance(record.get('net_packets_recv'), int) # Manually clean up process (no dependency on .terminate()) sysmetric_logger.tracker.terminate() # send SIGTERM signal to the child sysmetric_logger.tracker.join() exit_code = sysmetric_logger.tracker.exitcode sysmetric_logger.tracker.close() sysmetric_logger.tracker = None # Reset state of tracker # assert exit_code == 0 # successful termination def test_SysmetricLogger_terminate(sysmetric_logger): sysmetric_logger.track( file_path=file_path, class_name=class_name, function_name=function_name, resolution=POLL_INTERVAL ) time.sleep(DURATION) # C1 assert sysmetric_logger.tracker is not None # C2 assert sysmetric_logger.tracker.is_alive() saved_tracker = sysmetric_logger.tracker exit_code = sysmetric_logger.terminate() # C3 assert sysmetric_logger.tracker is None # C4 assert saved_tracker._closed # C5 assert exit_code == 0 @pytest.mark.xfail(raises=RuntimeError) def test_SysmetricLogger_premature_termination(sysmetric_logger): sysmetric_logger.terminate()
true
true
f71384f7b1ad31496b13808388d1421a7c466c4e
2,707
py
Python
ic/identity.py
zkung/ic-py
426d0a95b5826ced58ec3164a99a6994f088957a
[ "MIT" ]
1
2022-03-19T23:23:53.000Z
2022-03-19T23:23:53.000Z
ic/identity.py
dfinity-lab/ic-py
3e6458e245c565339047847c70c5bac50700f9dd
[ "MIT" ]
null
null
null
ic/identity.py
dfinity-lab/ic-py
3e6458e245c565339047847c70c5bac50700f9dd
[ "MIT" ]
null
null
null
import hashlib from ecdsa.curves import Ed25519, SECP256k1 from .principal import Principal import ecdsa class Identity: def __init__(self, privkey = "", type = "ed25519", anonymous = False): privkey = bytes(bytearray.fromhex(privkey)) self.anonymous = anonymous if anonymous: return self.key_type = type if type == 'secp256k1': if len(privkey) > 0: self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256) else: self.sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256) self._privkey = self.sk.to_string().hex() self.vk = self.sk.get_verifying_key() self._pubkey = self.vk.to_string().hex() self._der_pubkey = self.vk.to_der() elif type == 'ed25519': if len(privkey) > 0: self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.Ed25519) else: self.sk = ecdsa.SigningKey.generate(curve=ecdsa.Ed25519) self._privkey = self.sk.to_string().hex() self.vk = self.sk.get_verifying_key() self._pubkey = self.vk.to_string().hex() self._der_pubkey = self.vk.to_der() else: raise 'unsupported identity type' @staticmethod def from_pem(pem: str): key = ecdsa.SigningKey.from_pem(pem) privkey = key.to_string().hex() type = "unknown" if key.curve == Ed25519: type = 'ed25519' elif key.curve == SECP256k1: type = 'secp256k1' return Identity(privkey=privkey, type=type) def to_pem(self): pem = self.sk.to_pem(format="pkcs8") return pem def sender(self): if self.anonymous: return Principal.anonymous() return Principal.self_authenticating(self._der_pubkey) def sign(self, msg: bytes): if self.anonymous: return (None, None) if self.key_type == 'ed25519': sig = self.sk.sign(msg) return (self._der_pubkey, sig) elif self.key_type == 'secp256k1': sig = self.sk.sign(msg) return (self._der_pubkey, sig) @property def privkey(self): return self._privkey @property def pubkey(self): return self._pubkey @property def der_pubkey(self): return self._der_pubkey def __repr__(self): return "Identity(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")" def __str__(self): return "(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
33.012195
111
0.579978
import hashlib from ecdsa.curves import Ed25519, SECP256k1 from .principal import Principal import ecdsa class Identity: def __init__(self, privkey = "", type = "ed25519", anonymous = False): privkey = bytes(bytearray.fromhex(privkey)) self.anonymous = anonymous if anonymous: return self.key_type = type if type == 'secp256k1': if len(privkey) > 0: self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256) else: self.sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256) self._privkey = self.sk.to_string().hex() self.vk = self.sk.get_verifying_key() self._pubkey = self.vk.to_string().hex() self._der_pubkey = self.vk.to_der() elif type == 'ed25519': if len(privkey) > 0: self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.Ed25519) else: self.sk = ecdsa.SigningKey.generate(curve=ecdsa.Ed25519) self._privkey = self.sk.to_string().hex() self.vk = self.sk.get_verifying_key() self._pubkey = self.vk.to_string().hex() self._der_pubkey = self.vk.to_der() else: raise 'unsupported identity type' @staticmethod def from_pem(pem: str): key = ecdsa.SigningKey.from_pem(pem) privkey = key.to_string().hex() type = "unknown" if key.curve == Ed25519: type = 'ed25519' elif key.curve == SECP256k1: type = 'secp256k1' return Identity(privkey=privkey, type=type) def to_pem(self): pem = self.sk.to_pem(format="pkcs8") return pem def sender(self): if self.anonymous: return Principal.anonymous() return Principal.self_authenticating(self._der_pubkey) def sign(self, msg: bytes): if self.anonymous: return (None, None) if self.key_type == 'ed25519': sig = self.sk.sign(msg) return (self._der_pubkey, sig) elif self.key_type == 'secp256k1': sig = self.sk.sign(msg) return (self._der_pubkey, sig) @property def privkey(self): return self._privkey @property def pubkey(self): return self._pubkey @property def der_pubkey(self): return self._der_pubkey def __repr__(self): return "Identity(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")" def __str__(self): return "(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
true
true
f713854f3b5d2bc3bace7fa4697551d52b3eeb02
98
py
Python
ludwig/__init__.py
phueb/Ludw
de426de1e396e700007869cda27dd5bc9b8f5d2d
[ "MIT" ]
null
null
null
ludwig/__init__.py
phueb/Ludw
de426de1e396e700007869cda27dd5bc9b8f5d2d
[ "MIT" ]
1
2022-03-30T14:07:13.000Z
2022-03-30T14:07:13.000Z
ludwig/__init__.py
phueb/Ludw
de426de1e396e700007869cda27dd5bc9b8f5d2d
[ "MIT" ]
2
2020-06-15T13:06:53.000Z
2021-02-12T00:33:29.000Z
__version__ = '4.0.6' def print_ludwig(s): print(f'Ludwig-{__version__}: {s}', flush=True)
14
51
0.642857
__version__ = '4.0.6' def print_ludwig(s): print(f'Ludwig-{__version__}: {s}', flush=True)
true
true
f71385a86952e71464ade64291844060b175054d
4,714
py
Python
indy_node/test/auth_rule/test_auth_map.py
Rob-S/indy-node
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
[ "Apache-2.0" ]
627
2017-07-06T12:38:08.000Z
2022-03-30T13:18:43.000Z
indy_node/test/auth_rule/test_auth_map.py
Rob-S/indy-node
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
[ "Apache-2.0" ]
580
2017-06-29T17:59:57.000Z
2022-03-29T21:37:52.000Z
indy_node/test/auth_rule/test_auth_map.py
Rob-S/indy-node
0aefbda62c5a7412d7e03b2fb9795c500ea67e9f
[ "Apache-2.0" ]
704
2017-06-29T17:45:34.000Z
2022-03-30T07:08:58.000Z
from indy_common.authorize import auth_map def test_auth_map_node(): node_rules = [(auth_map.adding_new_node, "0--ADD--services--*--['VALIDATOR']"), (auth_map.adding_new_node_with_empty_services, "0--ADD--services--*--[]"), (auth_map.demote_node, "0--EDIT--services--['VALIDATOR']--[]"), (auth_map.promote_node, "0--EDIT--services--[]--['VALIDATOR']"), (auth_map.change_node_ip, '0--EDIT--node_ip--*--*'), (auth_map.change_node_port, '0--EDIT--node_port--*--*'), (auth_map.change_client_ip, '0--EDIT--client_ip--*--*'), (auth_map.change_client_port, '0--EDIT--client_port--*--*'), (auth_map.change_bls_key, '0--EDIT--blskey--*--*')] for (rule, rule_str) in node_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_nym(): nym_rules = [(auth_map.add_new_trustee, "1--ADD--role--*--0"), (auth_map.add_new_steward, "1--ADD--role--*--2"), (auth_map.add_new_endorser, "1--ADD--role--*--101"), (auth_map.add_new_network_monitor, "1--ADD--role--*--201"), (auth_map.add_new_identity_owner, '1--ADD--role--*--'), (auth_map.key_rotation, '1--EDIT--verkey--*--*')] for (rule, rule_str) in nym_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_txn_author_agreement(): rules = [(auth_map.txn_author_agreement, "4--ADD--*--*--*"), ] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_txn_author_agreement_aml(): rules = [(auth_map.txn_author_agreement_aml, "5--ADD--*--*--*"), ] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_attrib(): rules = [(auth_map.add_attrib, "100--ADD--*--*--*"), (auth_map.edit_attrib, "100--EDIT--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_schema(): rules = [(auth_map.add_schema, "101--ADD--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_schema_for_omitted(): rules = [(auth_map.edit_schema, "101--EDIT--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_claim_def(): rules = [(auth_map.add_claim_def, "102--ADD--*--*--*"), (auth_map.edit_claim_def, "102--EDIT--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_upgrade(): rules = [(auth_map.start_upgrade, "109--ADD--action--*--start"), (auth_map.cancel_upgrade, "109--EDIT--action--start--cancel")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_config(): rules = [(auth_map.pool_config, "111--EDIT--action--*--*"), ] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_action(): nym_rules = [(auth_map.pool_restart, "118--ADD--action--*--*"), (auth_map.auth_rule, "120--EDIT--*--*--*"), (auth_map.auth_rules, "122--EDIT--*--*--*"), (auth_map.validator_info, "119--ADD--*--*--*")] for (rule, rule_str) in nym_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_revoc_reg(): nym_rules = [(auth_map.add_revoc_reg_def, "113--ADD--*--*--*"), (auth_map.add_revoc_reg_entry, "114--ADD--*--*--*"), (auth_map.edit_revoc_reg_def, "113--EDIT--*--*--*"), (auth_map.edit_revoc_reg_entry, "114--EDIT--*--*--*")] for (rule, rule_str) in nym_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_disable_taa(): rules = [(auth_map.disable_txn_author_agreement, '8--ADD--*--*--*')] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys()
36.828125
92
0.602885
from indy_common.authorize import auth_map def test_auth_map_node(): node_rules = [(auth_map.adding_new_node, "0--ADD--services--*--['VALIDATOR']"), (auth_map.adding_new_node_with_empty_services, "0--ADD--services--*--[]"), (auth_map.demote_node, "0--EDIT--services--['VALIDATOR']--[]"), (auth_map.promote_node, "0--EDIT--services--[]--['VALIDATOR']"), (auth_map.change_node_ip, '0--EDIT--node_ip--*--*'), (auth_map.change_node_port, '0--EDIT--node_port--*--*'), (auth_map.change_client_ip, '0--EDIT--client_ip--*--*'), (auth_map.change_client_port, '0--EDIT--client_port--*--*'), (auth_map.change_bls_key, '0--EDIT--blskey--*--*')] for (rule, rule_str) in node_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_nym(): nym_rules = [(auth_map.add_new_trustee, "1--ADD--role--*--0"), (auth_map.add_new_steward, "1--ADD--role--*--2"), (auth_map.add_new_endorser, "1--ADD--role--*--101"), (auth_map.add_new_network_monitor, "1--ADD--role--*--201"), (auth_map.add_new_identity_owner, '1--ADD--role--*--'), (auth_map.key_rotation, '1--EDIT--verkey--*--*')] for (rule, rule_str) in nym_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_txn_author_agreement(): rules = [(auth_map.txn_author_agreement, "4--ADD--*--*--*"), ] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_txn_author_agreement_aml(): rules = [(auth_map.txn_author_agreement_aml, "5--ADD--*--*--*"), ] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_attrib(): rules = [(auth_map.add_attrib, "100--ADD--*--*--*"), (auth_map.edit_attrib, "100--EDIT--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_schema(): rules = [(auth_map.add_schema, "101--ADD--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_schema_for_omitted(): rules = [(auth_map.edit_schema, "101--EDIT--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_claim_def(): rules = [(auth_map.add_claim_def, "102--ADD--*--*--*"), (auth_map.edit_claim_def, "102--EDIT--*--*--*")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_upgrade(): rules = [(auth_map.start_upgrade, "109--ADD--action--*--start"), (auth_map.cancel_upgrade, "109--EDIT--action--start--cancel")] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_config(): rules = [(auth_map.pool_config, "111--EDIT--action--*--*"), ] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_action(): nym_rules = [(auth_map.pool_restart, "118--ADD--action--*--*"), (auth_map.auth_rule, "120--EDIT--*--*--*"), (auth_map.auth_rules, "122--EDIT--*--*--*"), (auth_map.validator_info, "119--ADD--*--*--*")] for (rule, rule_str) in nym_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_revoc_reg(): nym_rules = [(auth_map.add_revoc_reg_def, "113--ADD--*--*--*"), (auth_map.add_revoc_reg_entry, "114--ADD--*--*--*"), (auth_map.edit_revoc_reg_def, "113--EDIT--*--*--*"), (auth_map.edit_revoc_reg_entry, "114--EDIT--*--*--*")] for (rule, rule_str) in nym_rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys() def test_auth_map_disable_taa(): rules = [(auth_map.disable_txn_author_agreement, '8--ADD--*--*--*')] for (rule, rule_str) in rules: assert rule.get_action_id() == rule_str assert rule_str in auth_map.auth_map.keys()
true
true
f713861bd53f0726e5bf5659818fa3fb95359fd7
173,565
py
Python
tests/unit/test_blob.py
yun-cn/python-storage
475c1f946439dc9df9ea9809aabc2f7847f0f590
[ "Apache-2.0" ]
null
null
null
tests/unit/test_blob.py
yun-cn/python-storage
475c1f946439dc9df9ea9809aabc2f7847f0f590
[ "Apache-2.0" ]
null
null
null
tests/unit/test_blob.py
yun-cn/python-storage
475c1f946439dc9df9ea9809aabc2f7847f0f590
[ "Apache-2.0" ]
null
null
null
# Copyright 2014 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import datetime import hashlib import io import json import os import tempfile import unittest import mock import pytest import six from six.moves import http_client from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED def _make_credentials(): import google.auth.credentials return mock.Mock(spec=google.auth.credentials.Credentials) class Test_Blob(unittest.TestCase): @staticmethod def _make_one(*args, **kw): from google.cloud.storage.blob import Blob properties = kw.pop("properties", {}) blob = Blob(*args, **kw) blob._properties.update(properties) return blob @staticmethod def _get_default_timeout(): from google.cloud.storage.constants import _DEFAULT_TIMEOUT return _DEFAULT_TIMEOUT def test_ctor_wo_encryption_key(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"key": "value"} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertIs(blob.bucket, bucket) self.assertEqual(blob.name, BLOB_NAME) self.assertEqual(blob._properties, properties) self.assertFalse(blob._acl.loaded) self.assertIs(blob._acl.blob, blob) self.assertEqual(blob._encryption_key, None) self.assertEqual(blob.kms_key_name, None) def test_ctor_with_encoded_unicode(self): blob_name = b"wet \xe2\x9b\xb5" blob = self._make_one(blob_name, bucket=None) unicode_name = u"wet \N{sailboat}" self.assertNotIsInstance(blob.name, bytes) self.assertIsInstance(blob.name, six.text_type) self.assertEqual(blob.name, unicode_name) def test_ctor_w_encryption_key(self): KEY = b"01234567890123456789012345678901" # 32 bytes BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY) self.assertEqual(blob._encryption_key, KEY) self.assertEqual(blob.kms_key_name, None) def test_ctor_w_kms_key_name_and_encryption_key(self): KEY = b"01234567890123456789012345678901" # 32 bytes KMS_RESOURCE = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) BLOB_NAME = "blob-name" bucket = _Bucket() with self.assertRaises(ValueError): self._make_one( BLOB_NAME, bucket=bucket, encryption_key=KEY, kms_key_name=KMS_RESOURCE ) def test_ctor_w_kms_key_name(self): KMS_RESOURCE = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=KMS_RESOURCE) self.assertEqual(blob._encryption_key, None) self.assertEqual(blob.kms_key_name, KMS_RESOURCE) def test_ctor_with_generation(self): BLOB_NAME = "blob-name" GENERATION = 12345 bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) self.assertEqual(blob.generation, GENERATION) def _set_properties_helper(self, kms_key_name=None): import datetime from google.cloud._helpers import UTC from google.cloud._helpers import _RFC3339_MICROS now = datetime.datetime.utcnow().replace(tzinfo=UTC) NOW = now.strftime(_RFC3339_MICROS) BLOB_NAME = "blob-name" GENERATION = 12345 BLOB_ID = "name/{}/{}".format(BLOB_NAME, GENERATION) SELF_LINK = "http://example.com/self/" METAGENERATION = 23456 SIZE = 12345 MD5_HASH = "DEADBEEF" MEDIA_LINK = "http://example.com/media/" ENTITY = "project-owner-12345" ENTITY_ID = "23456" CRC32C = "FACE0DAC" COMPONENT_COUNT = 2 ETAG = "ETAG" resource = { "id": BLOB_ID, "selfLink": SELF_LINK, "generation": GENERATION, "metageneration": METAGENERATION, "contentType": "text/plain", "timeCreated": NOW, "updated": NOW, "timeDeleted": NOW, "storageClass": "NEARLINE", "timeStorageClassUpdated": NOW, "size": SIZE, "md5Hash": MD5_HASH, "mediaLink": MEDIA_LINK, "contentEncoding": "gzip", "contentDisposition": "inline", "contentLanguage": "en-US", "cacheControl": "private", "metadata": {"foo": "Foo"}, "owner": {"entity": ENTITY, "entityId": ENTITY_ID}, "crc32c": CRC32C, "componentCount": COMPONENT_COUNT, "etag": ETAG, "customTime": NOW, } if kms_key_name is not None: resource["kmsKeyName"] = kms_key_name bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) blob._set_properties(resource) self.assertEqual(blob.id, BLOB_ID) self.assertEqual(blob.self_link, SELF_LINK) self.assertEqual(blob.generation, GENERATION) self.assertEqual(blob.metageneration, METAGENERATION) self.assertEqual(blob.content_type, "text/plain") self.assertEqual(blob.time_created, now) self.assertEqual(blob.updated, now) self.assertEqual(blob.time_deleted, now) self.assertEqual(blob.storage_class, "NEARLINE") self.assertEqual(blob.size, SIZE) self.assertEqual(blob.md5_hash, MD5_HASH) self.assertEqual(blob.media_link, MEDIA_LINK) self.assertEqual(blob.content_encoding, "gzip") self.assertEqual(blob.content_disposition, "inline") self.assertEqual(blob.content_language, "en-US") self.assertEqual(blob.cache_control, "private") self.assertEqual(blob.metadata, {"foo": "Foo"}) self.assertEqual(blob.owner, {"entity": ENTITY, "entityId": ENTITY_ID}) self.assertEqual(blob.crc32c, CRC32C) self.assertEqual(blob.component_count, COMPONENT_COUNT) self.assertEqual(blob.etag, ETAG) self.assertEqual(blob.custom_time, now) if kms_key_name is not None: self.assertEqual(blob.kms_key_name, kms_key_name) else: self.assertIsNone(blob.kms_key_name) def test__set_properties_wo_kms_key_name(self): self._set_properties_helper() def test__set_properties_w_kms_key_name(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) self._set_properties_helper(kms_key_name=kms_resource) def test_chunk_size_ctor(self): from google.cloud.storage.blob import Blob BLOB_NAME = "blob-name" BUCKET = object() chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size) self.assertEqual(blob._chunk_size, chunk_size) def test_chunk_size_getter(self): BLOB_NAME = "blob-name" BUCKET = object() blob = self._make_one(BLOB_NAME, bucket=BUCKET) self.assertIsNone(blob.chunk_size) VALUE = object() blob._chunk_size = VALUE self.assertIs(blob.chunk_size, VALUE) def test_chunk_size_setter(self): BLOB_NAME = "blob-name" BUCKET = object() blob = self._make_one(BLOB_NAME, bucket=BUCKET) self.assertIsNone(blob._chunk_size) blob._CHUNK_SIZE_MULTIPLE = 10 blob.chunk_size = 20 self.assertEqual(blob._chunk_size, 20) def test_chunk_size_setter_bad_value(self): BLOB_NAME = "blob-name" BUCKET = object() blob = self._make_one(BLOB_NAME, bucket=BUCKET) self.assertIsNone(blob._chunk_size) blob._CHUNK_SIZE_MULTIPLE = 10 with self.assertRaises(ValueError): blob.chunk_size = 11 def test_acl_property(self): from google.cloud.storage.acl import ObjectACL fake_bucket = _Bucket() blob = self._make_one(u"name", bucket=fake_bucket) acl = blob.acl self.assertIsInstance(acl, ObjectACL) self.assertIs(acl, blob._acl) def test_path_bad_bucket(self): fake_bucket = object() name = u"blob-name" blob = self._make_one(name, bucket=fake_bucket) self.assertRaises(AttributeError, getattr, blob, "path") def test_path_no_name(self): bucket = _Bucket() blob = self._make_one(u"", bucket=bucket) self.assertRaises(ValueError, getattr, blob, "path") def test_path_normal(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob.path, "/b/name/o/%s" % BLOB_NAME) def test_path_w_slash_in_name(self): BLOB_NAME = "parent/child" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob.path, "/b/name/o/parent%2Fchild") def test_path_with_non_ascii(self): blob_name = u"Caf\xe9" bucket = _Bucket() blob = self._make_one(blob_name, bucket=bucket) self.assertEqual(blob.path, "/b/name/o/Caf%C3%A9") def test_bucket_readonly_property(self): blob_name = "BLOB" bucket = _Bucket() other = _Bucket() blob = self._make_one(blob_name, bucket=bucket) with self.assertRaises(AttributeError): blob.bucket = other def test_client(self): blob_name = "BLOB" bucket = _Bucket() blob = self._make_one(blob_name, bucket=bucket) self.assertIs(blob.client, bucket.client) def test_user_project(self): user_project = "user-project-123" blob_name = "BLOB" bucket = _Bucket(user_project=user_project) blob = self._make_one(blob_name, bucket=bucket) self.assertEqual(blob.user_project, user_project) def test__encryption_headers_wo_encryption_key(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) expected = {} self.assertEqual(blob._encryption_headers(), expected) def test__encryption_headers_w_encryption_key(self): key = b"aa426195405adee2c8081bb9e7e74b19" header_key_value = "YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk=" header_key_hash_value = "V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0=" BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=key) expected = { "X-Goog-Encryption-Algorithm": "AES256", "X-Goog-Encryption-Key": header_key_value, "X-Goog-Encryption-Key-Sha256": header_key_hash_value, } self.assertEqual(blob._encryption_headers(), expected) def test__query_params_default(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob._query_params, {}) def test__query_params_w_user_project(self): user_project = "user-project-123" BLOB_NAME = "BLOB" bucket = _Bucket(user_project=user_project) blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob._query_params, {"userProject": user_project}) def test__query_params_w_generation(self): generation = 123456 BLOB_NAME = "BLOB" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, generation=generation) self.assertEqual(blob._query_params, {"generation": generation}) def test_public_url(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual( blob.public_url, "https://storage.googleapis.com/name/%s" % BLOB_NAME ) def test_public_url_w_slash_in_name(self): BLOB_NAME = "parent/child" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual( blob.public_url, "https://storage.googleapis.com/name/parent/child" ) def test_public_url_w_tilde_in_name(self): BLOB_NAME = "foo~bar" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob.public_url, "https://storage.googleapis.com/name/foo~bar") def test_public_url_with_non_ascii(self): blob_name = u"winter \N{snowman}" bucket = _Bucket() blob = self._make_one(blob_name, bucket=bucket) expected_url = "https://storage.googleapis.com/name/winter%20%E2%98%83" self.assertEqual(blob.public_url, expected_url) def test_generate_signed_url_w_invalid_version(self): BLOB_NAME = "blob-name" EXPIRATION = "2014-10-16T20:34:37.000Z" connection = _Connection() client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket) with self.assertRaises(ValueError): blob.generate_signed_url(EXPIRATION, version="nonesuch") def _generate_signed_url_helper( self, version=None, blob_name="blob-name", api_access_endpoint=None, method="GET", content_md5=None, content_type=None, response_type=None, response_disposition=None, generation=None, headers=None, query_parameters=None, credentials=None, expiration=None, encryption_key=None, access_token=None, service_account_email=None, virtual_hosted_style=False, bucket_bound_hostname=None, scheme="http", ): from six.moves.urllib import parse from google.cloud._helpers import UTC from google.cloud.storage._helpers import _bucket_bound_hostname_url from google.cloud.storage.blob import _API_ACCESS_ENDPOINT from google.cloud.storage.blob import _get_encryption_headers api_access_endpoint = api_access_endpoint or _API_ACCESS_ENDPOINT delta = datetime.timedelta(hours=1) if expiration is None: expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) + delta connection = _Connection() client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket, encryption_key=encryption_key) if version is None: effective_version = "v2" else: effective_version = version to_patch = "google.cloud.storage.blob.generate_signed_url_{}".format( effective_version ) with mock.patch(to_patch) as signer: signed_uri = blob.generate_signed_url( expiration=expiration, api_access_endpoint=api_access_endpoint, method=method, credentials=credentials, content_md5=content_md5, content_type=content_type, response_type=response_type, response_disposition=response_disposition, generation=generation, headers=headers, query_parameters=query_parameters, version=version, access_token=access_token, service_account_email=service_account_email, virtual_hosted_style=virtual_hosted_style, bucket_bound_hostname=bucket_bound_hostname, ) self.assertEqual(signed_uri, signer.return_value) if credentials is None: expected_creds = _Connection.credentials else: expected_creds = credentials encoded_name = blob_name.encode("utf-8") quoted_name = parse.quote(encoded_name, safe=b"/~") if virtual_hosted_style: expected_api_access_endpoint = "https://{}.storage.googleapis.com".format( bucket.name ) elif bucket_bound_hostname: expected_api_access_endpoint = _bucket_bound_hostname_url( bucket_bound_hostname, scheme ) else: expected_api_access_endpoint = api_access_endpoint expected_resource = "/{}/{}".format(bucket.name, quoted_name) if virtual_hosted_style or bucket_bound_hostname: expected_resource = "/{}".format(quoted_name) if encryption_key is not None: expected_headers = headers or {} if effective_version == "v2": expected_headers["X-Goog-Encryption-Algorithm"] = "AES256" else: expected_headers.update(_get_encryption_headers(encryption_key)) else: expected_headers = headers expected_kwargs = { "resource": expected_resource, "expiration": expiration, "api_access_endpoint": expected_api_access_endpoint, "method": method.upper(), "content_md5": content_md5, "content_type": content_type, "response_type": response_type, "response_disposition": response_disposition, "generation": generation, "headers": expected_headers, "query_parameters": query_parameters, "access_token": access_token, "service_account_email": service_account_email, } signer.assert_called_once_with(expected_creds, **expected_kwargs) def test_generate_signed_url_no_version_passed_warning(self): self._generate_signed_url_helper() def _generate_signed_url_v2_helper(self, **kw): version = "v2" self._generate_signed_url_helper(version, **kw) def test_generate_signed_url_v2_w_defaults(self): self._generate_signed_url_v2_helper() def test_generate_signed_url_v2_w_expiration(self): from google.cloud._helpers import UTC expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) self._generate_signed_url_v2_helper(expiration=expiration) def test_generate_signed_url_v2_w_non_ascii_name(self): BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt" self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v2_w_slash_in_name(self): BLOB_NAME = "parent/child" self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v2_w_tilde_in_name(self): BLOB_NAME = "foo~bar" self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v2_w_endpoint(self): self._generate_signed_url_v2_helper( api_access_endpoint="https://api.example.com/v1" ) def test_generate_signed_url_v2_w_method(self): self._generate_signed_url_v2_helper(method="POST") def test_generate_signed_url_v2_w_lowercase_method(self): self._generate_signed_url_v2_helper(method="get") def test_generate_signed_url_v2_w_content_md5(self): self._generate_signed_url_v2_helper(content_md5="FACEDACE") def test_generate_signed_url_v2_w_content_type(self): self._generate_signed_url_v2_helper(content_type="text.html") def test_generate_signed_url_v2_w_response_type(self): self._generate_signed_url_v2_helper(response_type="text.html") def test_generate_signed_url_v2_w_response_disposition(self): self._generate_signed_url_v2_helper(response_disposition="inline") def test_generate_signed_url_v2_w_generation(self): self._generate_signed_url_v2_helper(generation=12345) def test_generate_signed_url_v2_w_headers(self): self._generate_signed_url_v2_helper(headers={"x-goog-foo": "bar"}) def test_generate_signed_url_v2_w_csek(self): self._generate_signed_url_v2_helper(encryption_key=os.urandom(32)) def test_generate_signed_url_v2_w_csek_and_headers(self): self._generate_signed_url_v2_helper( encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"} ) def test_generate_signed_url_v2_w_credentials(self): credentials = object() self._generate_signed_url_v2_helper(credentials=credentials) def _generate_signed_url_v4_helper(self, **kw): version = "v4" self._generate_signed_url_helper(version, **kw) def test_generate_signed_url_v4_w_defaults(self): self._generate_signed_url_v4_helper() def test_generate_signed_url_v4_w_non_ascii_name(self): BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt" self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v4_w_slash_in_name(self): BLOB_NAME = "parent/child" self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v4_w_tilde_in_name(self): BLOB_NAME = "foo~bar" self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v4_w_endpoint(self): self._generate_signed_url_v4_helper( api_access_endpoint="https://api.example.com/v1" ) def test_generate_signed_url_v4_w_method(self): self._generate_signed_url_v4_helper(method="POST") def test_generate_signed_url_v4_w_lowercase_method(self): self._generate_signed_url_v4_helper(method="get") def test_generate_signed_url_v4_w_content_md5(self): self._generate_signed_url_v4_helper(content_md5="FACEDACE") def test_generate_signed_url_v4_w_content_type(self): self._generate_signed_url_v4_helper(content_type="text.html") def test_generate_signed_url_v4_w_response_type(self): self._generate_signed_url_v4_helper(response_type="text.html") def test_generate_signed_url_v4_w_response_disposition(self): self._generate_signed_url_v4_helper(response_disposition="inline") def test_generate_signed_url_v4_w_generation(self): self._generate_signed_url_v4_helper(generation=12345) def test_generate_signed_url_v4_w_headers(self): self._generate_signed_url_v4_helper(headers={"x-goog-foo": "bar"}) def test_generate_signed_url_v4_w_csek(self): self._generate_signed_url_v4_helper(encryption_key=os.urandom(32)) def test_generate_signed_url_v4_w_csek_and_headers(self): self._generate_signed_url_v4_helper( encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"} ) def test_generate_signed_url_v4_w_virtual_hostname(self): self._generate_signed_url_v4_helper(virtual_hosted_style=True) def test_generate_signed_url_v4_w_bucket_bound_hostname_w_scheme(self): self._generate_signed_url_v4_helper( bucket_bound_hostname="http://cdn.example.com" ) def test_generate_signed_url_v4_w_bucket_bound_hostname_w_bare_hostname(self): self._generate_signed_url_v4_helper(bucket_bound_hostname="cdn.example.com") def test_generate_signed_url_v4_w_credentials(self): credentials = object() self._generate_signed_url_v4_helper(credentials=credentials) def test_exists_miss(self): NONESUCH = "nonesuch" not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(NONESUCH, bucket=bucket) self.assertFalse(blob.exists(timeout=42)) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(NONESUCH), "query_params": {"fields": "name"}, "_target_object": None, "timeout": 42, }, ) def test_exists_hit_w_user_project(self): BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" found_response = ({"status": http_client.OK}, b"") connection = _Connection(found_response) client = _Client(connection) bucket = _Bucket(client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) bucket._blobs[BLOB_NAME] = 1 self.assertTrue(blob.exists()) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(BLOB_NAME), "query_params": {"fields": "name", "userProject": USER_PROJECT}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_exists_hit_w_generation(self): BLOB_NAME = "blob-name" GENERATION = 123456 found_response = ({"status": http_client.OK}, b"") connection = _Connection(found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) bucket._blobs[BLOB_NAME] = 1 self.assertTrue(blob.exists()) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(BLOB_NAME), "query_params": {"fields": "name", "generation": GENERATION}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_exists_w_generation_match(self): BLOB_NAME = "blob-name" GENERATION_NUMBER = 123456 METAGENERATION_NUMBER = 6 found_response = ({"status": http_client.OK}, b"") connection = _Connection(found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket) bucket._blobs[BLOB_NAME] = 1 self.assertTrue( blob.exists( if_generation_match=GENERATION_NUMBER, if_metageneration_match=METAGENERATION_NUMBER, ) ) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(BLOB_NAME), "query_params": { "fields": "name", "ifGenerationMatch": GENERATION_NUMBER, "ifMetagenerationMatch": METAGENERATION_NUMBER, }, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_delete_wo_generation(self): BLOB_NAME = "blob-name" not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket) bucket._blobs[BLOB_NAME] = 1 blob.delete() self.assertFalse(blob.exists()) self.assertEqual( bucket._deleted, [ ( BLOB_NAME, None, None, self._get_default_timeout(), None, None, None, None, ) ], ) def test_delete_w_generation(self): BLOB_NAME = "blob-name" GENERATION = 123456 not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) bucket._blobs[BLOB_NAME] = 1 blob.delete(timeout=42) self.assertFalse(blob.exists()) self.assertEqual( bucket._deleted, [(BLOB_NAME, None, GENERATION, 42, None, None, None, None)] ) def test_delete_w_generation_match(self): BLOB_NAME = "blob-name" GENERATION = 123456 not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) bucket._blobs[BLOB_NAME] = 1 blob.delete(timeout=42, if_generation_match=GENERATION) self.assertFalse(blob.exists()) self.assertEqual( bucket._deleted, [(BLOB_NAME, None, GENERATION, 42, GENERATION, None, None, None)], ) def test__get_transport(self): client = mock.Mock(spec=[u"_credentials", "_http"]) client._http = mock.sentinel.transport blob = self._make_one(u"blob-name", bucket=None) transport = blob._get_transport(client) self.assertIs(transport, mock.sentinel.transport) def test__get_download_url_with_media_link(self): blob_name = "something.txt" bucket = _Bucket(name="IRRELEVANT") blob = self._make_one(blob_name, bucket=bucket) media_link = "http://test.invalid" # Set the media link on the blob blob._properties["mediaLink"] = media_link client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) self.assertEqual(download_url, media_link) def test__get_download_url_with_generation_match(self): GENERATION_NUMBER = 6 MEDIA_LINK = "http://test.invalid" blob = self._make_one("something.txt", bucket=_Bucket(name="IRRELEVANT")) # Set the media link on the blob blob._properties["mediaLink"] = MEDIA_LINK client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url( client, if_generation_match=GENERATION_NUMBER ) self.assertEqual( download_url, "{}?ifGenerationMatch={}".format(MEDIA_LINK, GENERATION_NUMBER), ) def test__get_download_url_with_media_link_w_user_project(self): blob_name = "something.txt" user_project = "user-project-123" bucket = _Bucket(name="IRRELEVANT", user_project=user_project) blob = self._make_one(blob_name, bucket=bucket) media_link = "http://test.invalid" # Set the media link on the blob blob._properties["mediaLink"] = media_link client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) self.assertEqual( download_url, "{}?userProject={}".format(media_link, user_project) ) def test__get_download_url_on_the_fly(self): blob_name = "bzzz-fly.txt" bucket = _Bucket(name="buhkit") blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "buhkit/o/bzzz-fly.txt?alt=media" ) self.assertEqual(download_url, expected_url) def test__get_download_url_on_the_fly_with_generation(self): blob_name = "pretend.txt" bucket = _Bucket(name="fictional") blob = self._make_one(blob_name, bucket=bucket) generation = 1493058489532987 # Set the media link on the blob blob._properties["generation"] = str(generation) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "fictional/o/pretend.txt?alt=media&generation=1493058489532987" ) self.assertEqual(download_url, expected_url) def test__get_download_url_on_the_fly_with_user_project(self): blob_name = "pretend.txt" user_project = "user-project-123" bucket = _Bucket(name="fictional", user_project=user_project) blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "fictional/o/pretend.txt?alt=media&userProject={}".format(user_project) ) self.assertEqual(download_url, expected_url) def test__get_download_url_on_the_fly_with_kms_key_name(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) blob_name = "bzzz-fly.txt" bucket = _Bucket(name="buhkit") blob = self._make_one(blob_name, bucket=bucket, kms_key_name=kms_resource) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "buhkit/o/bzzz-fly.txt?alt=media" ) self.assertEqual(download_url, expected_url) @staticmethod def _mock_requests_response(status_code, headers, content=b""): import requests response = requests.Response() response.status_code = status_code response.headers.update(headers) response.raw = None response._content = content response.request = requests.Request("POST", "http://example.com").prepare() return response def _do_download_helper_wo_chunks(self, w_range, raw_download, timeout=None): blob_name = "blob-name" client = mock.Mock() bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.chunk_size) transport = object() file_obj = io.BytesIO() download_url = "http://test.invalid" headers = {} if raw_download: patch = mock.patch("google.cloud.storage.blob.RawDownload") else: patch = mock.patch("google.cloud.storage.blob.Download") if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} with patch as patched: if w_range: blob._do_download( transport, file_obj, download_url, headers, start=1, end=3, raw_download=raw_download, **timeout_kwarg ) else: blob._do_download( transport, file_obj, download_url, headers, raw_download=raw_download, **timeout_kwarg ) if w_range: patched.assert_called_once_with( download_url, stream=file_obj, headers=headers, start=1, end=3, checksum="md5", ) else: patched.assert_called_once_with( download_url, stream=file_obj, headers=headers, start=None, end=None, checksum="md5", ) patched.return_value.consume.assert_called_once_with( transport, timeout=expected_timeout ) def test__do_download_wo_chunks_wo_range_wo_raw(self): self._do_download_helper_wo_chunks(w_range=False, raw_download=False) def test__do_download_wo_chunks_w_range_wo_raw(self): self._do_download_helper_wo_chunks(w_range=True, raw_download=False) def test__do_download_wo_chunks_wo_range_w_raw(self): self._do_download_helper_wo_chunks(w_range=False, raw_download=True) def test__do_download_wo_chunks_w_range_w_raw(self): self._do_download_helper_wo_chunks(w_range=True, raw_download=True) def test__do_download_wo_chunks_w_custom_timeout(self): self._do_download_helper_wo_chunks( w_range=False, raw_download=False, timeout=9.58 ) def _do_download_helper_w_chunks( self, w_range, raw_download, timeout=None, checksum="md5" ): blob_name = "blob-name" client = mock.Mock(_credentials=_make_credentials(), spec=["_credentials"]) bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) blob._CHUNK_SIZE_MULTIPLE = 1 chunk_size = blob.chunk_size = 3 transport = object() file_obj = io.BytesIO() download_url = "http://test.invalid" headers = {} download = mock.Mock(finished=False, spec=["finished", "consume_next_chunk"]) def side_effect(*args, **kwargs): download.finished = True download.consume_next_chunk.side_effect = side_effect if raw_download: patch = mock.patch("google.cloud.storage.blob.RawChunkedDownload") else: patch = mock.patch("google.cloud.storage.blob.ChunkedDownload") if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} with patch as patched: patched.return_value = download if w_range: blob._do_download( transport, file_obj, download_url, headers, start=1, end=3, raw_download=raw_download, checksum=checksum, **timeout_kwarg ) else: blob._do_download( transport, file_obj, download_url, headers, raw_download=raw_download, checksum=checksum, **timeout_kwarg ) if w_range: patched.assert_called_once_with( download_url, chunk_size, file_obj, headers=headers, start=1, end=3 ) else: patched.assert_called_once_with( download_url, chunk_size, file_obj, headers=headers, start=0, end=None ) download.consume_next_chunk.assert_called_once_with( transport, timeout=expected_timeout ) def test__do_download_w_chunks_wo_range_wo_raw(self): self._do_download_helper_w_chunks(w_range=False, raw_download=False) def test__do_download_w_chunks_w_range_wo_raw(self): self._do_download_helper_w_chunks(w_range=True, raw_download=False) def test__do_download_w_chunks_wo_range_w_raw(self): self._do_download_helper_w_chunks(w_range=False, raw_download=True) def test__do_download_w_chunks_w_range_w_raw(self): self._do_download_helper_w_chunks(w_range=True, raw_download=True) def test__do_download_w_chunks_w_custom_timeout(self): self._do_download_helper_w_chunks(w_range=True, raw_download=True, timeout=9.58) def test__do_download_w_chunks_w_checksum(self): from google.cloud.storage import blob as blob_module with mock.patch("logging.info") as patch: self._do_download_helper_w_chunks( w_range=False, raw_download=False, checksum="md5" ) patch.assert_called_once_with( blob_module._CHUNKED_DOWNLOAD_CHECKSUM_MESSAGE.format("md5") ) def test__do_download_w_chunks_wo_checksum(self): with mock.patch("logging.info") as patch: self._do_download_helper_w_chunks( w_range=False, raw_download=False, checksum=None ) patch.assert_not_called() def test_download_to_file_with_failure(self): import requests from google.resumable_media import InvalidResponse from google.cloud import exceptions raw_response = requests.Response() raw_response.status_code = http_client.NOT_FOUND raw_request = requests.Request("GET", "http://example.com") raw_response.request = raw_request.prepare() grmp_response = InvalidResponse(raw_response) blob_name = "blob-name" media_link = "http://test.invalid" client = mock.Mock(spec=[u"_http"]) bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) blob._properties["mediaLink"] = media_link blob._do_download = mock.Mock() blob._do_download.side_effect = grmp_response file_obj = io.BytesIO() with self.assertRaises(exceptions.NotFound): blob.download_to_file(file_obj) self.assertEqual(file_obj.tell(), 0) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, file_obj, media_link, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_to_file_wo_media_link(self): blob_name = "blob-name" client = mock.Mock(_connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) blob._do_download = mock.Mock() file_obj = io.BytesIO() blob.download_to_file(file_obj) # Make sure the media link is still unknown. self.assertIsNone(blob.media_link) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "name/o/blob-name?alt=media" ) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, file_obj, expected_url, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_to_file_w_generation_match(self): GENERATION_NUMBER = 6 HEADERS = {"accept-encoding": "gzip"} EXPECTED_URL = ( "https://storage.googleapis.com/download/storage/v1/b/" "name/o/blob-name?alt=media&ifGenerationNotMatch={}".format( GENERATION_NUMBER ) ) client = mock.Mock(_connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" blob = self._make_one("blob-name", bucket=_Bucket(client)) blob._do_download = mock.Mock() file_obj = io.BytesIO() blob.download_to_file(file_obj, if_generation_not_match=GENERATION_NUMBER) blob._do_download.assert_called_once_with( client._http, file_obj, EXPECTED_URL, HEADERS, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def _download_to_file_helper(self, use_chunks, raw_download, timeout=None): blob_name = "blob-name" client = mock.Mock(spec=[u"_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) if use_chunks: blob._CHUNK_SIZE_MULTIPLE = 1 blob.chunk_size = 3 blob._do_download = mock.Mock() if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} file_obj = io.BytesIO() if raw_download: blob.download_to_file(file_obj, raw_download=True, **timeout_kwarg) else: blob.download_to_file(file_obj, **timeout_kwarg) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, file_obj, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) def test_download_to_file_wo_chunks_wo_raw(self): self._download_to_file_helper(use_chunks=False, raw_download=False) def test_download_to_file_w_chunks_wo_raw(self): self._download_to_file_helper(use_chunks=True, raw_download=False) def test_download_to_file_wo_chunks_w_raw(self): self._download_to_file_helper(use_chunks=False, raw_download=True) def test_download_to_file_w_chunks_w_raw(self): self._download_to_file_helper(use_chunks=True, raw_download=True) def test_download_to_file_w_custom_timeout(self): self._download_to_file_helper( use_chunks=False, raw_download=False, timeout=9.58 ) def _download_to_filename_helper(self, updated, raw_download, timeout=None): import os from google.cloud.storage._helpers import _convert_to_timestamp from google.cloud._testing import _NamedTemporaryFile blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} if updated is not None: properties["updated"] = updated blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() with _NamedTemporaryFile() as temp: if timeout is None: blob.download_to_filename(temp.name, raw_download=raw_download) else: blob.download_to_filename( temp.name, raw_download=raw_download, timeout=timeout, ) if updated is None: self.assertIsNone(blob.updated) else: mtime = os.path.getmtime(temp.name) if six.PY2: updated_time = _convert_to_timestamp(blob.updated) else: updated_time = blob.updated.timestamp() self.assertEqual(mtime, updated_time) expected_timeout = self._get_default_timeout() if timeout is None else timeout headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertEqual(stream.name, temp.name) def test_download_to_filename_w_generation_match(self): from google.cloud._testing import _NamedTemporaryFile GENERATION_NUMBER = 6 MEDIA_LINK = "http://example.com/media/" EXPECTED_LINK = MEDIA_LINK + "?ifGenerationMatch={}".format(GENERATION_NUMBER) HEADERS = {"accept-encoding": "gzip"} client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob._do_download = mock.Mock() with _NamedTemporaryFile() as temp: blob.download_to_filename(temp.name, if_generation_match=GENERATION_NUMBER) blob._do_download.assert_called_once_with( client._http, mock.ANY, EXPECTED_LINK, HEADERS, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_to_filename_w_updated_wo_raw(self): updated = "2014-12-06T13:13:50.690Z" self._download_to_filename_helper(updated=updated, raw_download=False) def test_download_to_filename_wo_updated_wo_raw(self): self._download_to_filename_helper(updated=None, raw_download=False) def test_download_to_filename_w_updated_w_raw(self): updated = "2014-12-06T13:13:50.690Z" self._download_to_filename_helper(updated=updated, raw_download=True) def test_download_to_filename_wo_updated_w_raw(self): self._download_to_filename_helper(updated=None, raw_download=True) def test_download_to_filename_w_custom_timeout(self): self._download_to_filename_helper( updated=None, raw_download=False, timeout=9.58 ) def test_download_to_filename_corrupted(self): from google.resumable_media import DataCorruption blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() blob._do_download.side_effect = DataCorruption("testing") # Try to download into a temporary file (don't use # `_NamedTemporaryFile` it will try to remove after the file is # already removed) filehandle, filename = tempfile.mkstemp() os.close(filehandle) self.assertTrue(os.path.exists(filename)) with self.assertRaises(DataCorruption): blob.download_to_filename(filename) # Make sure the file was cleaned up. self.assertFalse(os.path.exists(filename)) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertEqual(stream.name, filename) def test_download_to_filename_w_key(self): from google.cloud._testing import _NamedTemporaryFile from google.cloud.storage.blob import _get_encryption_headers blob_name = "blob-name" # Create a fake client/bucket and use them in the Blob() constructor. client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} key = b"aa426195405adee2c8081bb9e7e74b19" blob = self._make_one( blob_name, bucket=bucket, properties=properties, encryption_key=key ) blob._do_download = mock.Mock() with _NamedTemporaryFile() as temp: blob.download_to_filename(temp.name) headers = {"accept-encoding": "gzip"} headers.update(_get_encryption_headers(key)) blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertEqual(stream.name, temp.name) def _download_as_bytes_helper(self, raw_download, timeout=None): blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() if timeout is None: expected_timeout = self._get_default_timeout() fetched = blob.download_as_bytes(raw_download=raw_download) else: expected_timeout = timeout fetched = blob.download_as_bytes(raw_download=raw_download, timeout=timeout) self.assertEqual(fetched, b"") headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertIsInstance(stream, io.BytesIO) def test_download_as_string_w_response_headers(self): blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) response = self._mock_requests_response( http_client.OK, headers={ "Content-Type": "application/json", "Content-Language": "ko-kr", "Cache-Control": "max-age=1337;public", "Content-Encoding": "gzip", "X-Goog-Storage-Class": "STANDARD", "X-Goog-Hash": "crc32c=4gcgLQ==,md5=CS9tHYTtyFntzj7B9nkkJQ==", }, # { "x": 5 } gzipped content=b"\x1f\x8b\x08\x00\xcfo\x17_\x02\xff\xabVP\xaaP\xb2R0U\xa8\x05\x00\xa1\xcaQ\x93\n\x00\x00\x00", ) blob._extract_headers_from_download(response) self.assertEqual(blob.content_type, "application/json") self.assertEqual(blob.content_language, "ko-kr") self.assertEqual(blob.content_encoding, "gzip") self.assertEqual(blob.cache_control, "max-age=1337;public") self.assertEqual(blob.storage_class, "STANDARD") self.assertEqual(blob.md5_hash, "CS9tHYTtyFntzj7B9nkkJQ==") self.assertEqual(blob.crc32c, "4gcgLQ==") response = self._mock_requests_response( http_client.OK, headers={ "Content-Type": "application/octet-stream", "Content-Language": "en-US", "Cache-Control": "max-age=1337;public", "Content-Encoding": "gzip", "X-Goog-Storage-Class": "STANDARD", "X-Goog-Hash": "crc32c=4/c+LQ==,md5=CS9tHYTt/+ntzj7B9nkkJQ==", }, content=b"", ) blob._extract_headers_from_download(response) self.assertEqual(blob.content_type, "application/octet-stream") self.assertEqual(blob.content_language, "en-US") self.assertEqual(blob.md5_hash, "CS9tHYTt/+ntzj7B9nkkJQ==") self.assertEqual(blob.crc32c, "4/c+LQ==") def test_download_as_string_w_hash_response_header_none(self): blob_name = "blob-name" md5_hash = "CS9tHYTtyFntzj7B9nkkJQ==" crc32c = "4gcgLQ==" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = { "mediaLink": media_link, "md5Hash": md5_hash, "crc32c": crc32c, } blob = self._make_one(blob_name, bucket=bucket, properties=properties) response = self._mock_requests_response( http_client.OK, headers={"X-Goog-Hash": ""}, # { "x": 5 } gzipped content=b"\x1f\x8b\x08\x00\xcfo\x17_\x02\xff\xabVP\xaaP\xb2R0U\xa8\x05\x00\xa1\xcaQ\x93\n\x00\x00\x00", ) blob._extract_headers_from_download(response) self.assertEqual(blob.md5_hash, md5_hash) self.assertEqual(blob.crc32c, crc32c) def test_download_as_bytes_w_generation_match(self): GENERATION_NUMBER = 6 MEDIA_LINK = "http://example.com/media/" client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob.download_to_file = mock.Mock() fetched = blob.download_as_bytes(if_generation_match=GENERATION_NUMBER) self.assertEqual(fetched, b"") blob.download_to_file.assert_called_once_with( mock.ANY, client=None, start=None, end=None, raw_download=False, if_generation_match=GENERATION_NUMBER, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_as_bytes_wo_raw(self): self._download_as_bytes_helper(raw_download=False) def test_download_as_bytes_w_raw(self): self._download_as_bytes_helper(raw_download=True) def test_download_as_byte_w_custom_timeout(self): self._download_as_bytes_helper(raw_download=False, timeout=9.58) def _download_as_text_helper(self, raw_download, encoding=None, timeout=None): blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} if encoding: properties["contentEncoding"] = encoding blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() if timeout is None: expected_timeout = self._get_default_timeout() fetched = blob.download_as_text(raw_download=raw_download) else: expected_timeout = timeout fetched = blob.download_as_text(raw_download=raw_download, timeout=timeout) self.assertEqual(fetched, "") headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertIsInstance(stream, io.BytesIO) def test_download_as_text_w_generation_match(self): GENERATION_NUMBER = 6 MEDIA_LINK = "http://example.com/media/" client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob.download_to_file = mock.Mock() fetched = blob.download_as_text(if_generation_match=GENERATION_NUMBER) self.assertEqual(fetched, "") blob.download_to_file.assert_called_once_with( mock.ANY, client=None, start=None, end=None, raw_download=False, if_generation_match=GENERATION_NUMBER, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_as_text_wo_raw(self): self._download_as_text_helper(raw_download=False) def test_download_as_text_w_raw(self): self._download_as_text_helper(raw_download=True) def test_download_as_text_w_custom_timeout(self): self._download_as_text_helper(raw_download=False, timeout=9.58) def test_download_as_text_w_encoding(self): self._download_as_text_helper(raw_download=False, encoding="utf-8") @mock.patch("warnings.warn") def test_download_as_string(self, mock_warn): MEDIA_LINK = "http://example.com/media/" client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob.download_to_file = mock.Mock() fetched = blob.download_as_string() self.assertEqual(fetched, b"") blob.download_to_file.assert_called_once_with( mock.ANY, client=None, start=None, end=None, raw_download=False, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=self._get_default_timeout(), checksum="md5", ) mock_warn.assert_called_with( "Blob.download_as_string() is deprecated and will be removed in future." "Use Blob.download_as_bytes() instead.", PendingDeprecationWarning, stacklevel=1, ) def test__get_content_type_explicit(self): blob = self._make_one(u"blob-name", bucket=None) content_type = u"text/plain" return_value = blob._get_content_type(content_type) self.assertEqual(return_value, content_type) def test__get_content_type_from_blob(self): blob = self._make_one(u"blob-name", bucket=None) blob.content_type = u"video/mp4" return_value = blob._get_content_type(None) self.assertEqual(return_value, blob.content_type) def test__get_content_type_from_filename(self): blob = self._make_one(u"blob-name", bucket=None) return_value = blob._get_content_type(None, filename="archive.tar") self.assertEqual(return_value, "application/x-tar") def test__get_content_type_default(self): blob = self._make_one(u"blob-name", bucket=None) return_value = blob._get_content_type(None) self.assertEqual(return_value, u"application/octet-stream") def test__get_writable_metadata_no_changes(self): name = u"blob-name" blob = self._make_one(name, bucket=None) object_metadata = blob._get_writable_metadata() expected = {"name": name} self.assertEqual(object_metadata, expected) def test__get_writable_metadata_with_changes(self): name = u"blob-name" blob = self._make_one(name, bucket=None) blob.storage_class = "NEARLINE" blob.cache_control = "max-age=3600" blob.metadata = {"color": "red"} object_metadata = blob._get_writable_metadata() expected = { "cacheControl": blob.cache_control, "metadata": blob.metadata, "name": name, "storageClass": blob.storage_class, } self.assertEqual(object_metadata, expected) def test__get_writable_metadata_unwritable_field(self): name = u"blob-name" properties = {"updated": "2016-10-16T18:18:18.181Z"} blob = self._make_one(name, bucket=None, properties=properties) # Fake that `updated` is in changes. blob._changes.add("updated") object_metadata = blob._get_writable_metadata() expected = {"name": name} self.assertEqual(object_metadata, expected) def test__set_metadata_to_none(self): name = u"blob-name" blob = self._make_one(name, bucket=None) blob.storage_class = "NEARLINE" blob.cache_control = "max-age=3600" with mock.patch("google.cloud.storage.blob.Blob._patch_property") as patch_prop: blob.metadata = None patch_prop.assert_called_once_with("metadata", None) def test__get_upload_arguments(self): name = u"blob-name" key = b"[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO" blob = self._make_one(name, bucket=None, encryption_key=key) blob.content_disposition = "inline" content_type = u"image/jpeg" info = blob._get_upload_arguments(content_type) headers, object_metadata, new_content_type = info header_key_value = "W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8=" header_key_hash_value = "G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg=" expected_headers = { "X-Goog-Encryption-Algorithm": "AES256", "X-Goog-Encryption-Key": header_key_value, "X-Goog-Encryption-Key-Sha256": header_key_hash_value, } self.assertEqual(headers, expected_headers) expected_metadata = { "contentDisposition": blob.content_disposition, "name": name, } self.assertEqual(object_metadata, expected_metadata) self.assertEqual(new_content_type, content_type) def _mock_transport(self, status_code, headers, content=b""): fake_transport = mock.Mock(spec=["request"]) fake_response = self._mock_requests_response( status_code, headers, content=content ) fake_transport.request.return_value = fake_response return fake_transport def _do_multipart_success( self, mock_get_boundary, size=None, num_retries=None, user_project=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, kms_key_name=None, timeout=None, ): from six.moves.urllib.parse import urlencode bucket = _Bucket(name="w00t", user_project=user_project) blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) self.assertIsNone(blob.chunk_size) # Create mocks to be checked for doing transport. transport = self._mock_transport(http_client.OK, {}) # Create some mock arguments. client = mock.Mock(_http=transport, _connection=_Connection, spec=["_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" data = b"data here hear hier" stream = io.BytesIO(data) content_type = u"application/xml" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} response = blob._do_multipart_upload( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, **timeout_kwarg ) # Check the mocks and the returned value. self.assertIs(response, transport.request.return_value) if size is None: data_read = data self.assertEqual(stream.tell(), len(data)) else: data_read = data[:size] self.assertEqual(stream.tell(), size) mock_get_boundary.assert_called_once_with() upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o" ) qs_params = [("uploadType", "multipart")] if user_project is not None: qs_params.append(("userProject", user_project)) if predefined_acl is not None: qs_params.append(("predefinedAcl", predefined_acl)) if kms_key_name is not None and "cryptoKeyVersions" not in kms_key_name: qs_params.append(("kmsKeyName", kms_key_name)) if if_generation_match is not None: qs_params.append(("ifGenerationMatch", if_generation_match)) if if_generation_not_match is not None: qs_params.append(("ifGenerationNotMatch", if_generation_not_match)) if if_metageneration_match is not None: qs_params.append(("ifMetagenerationMatch", if_metageneration_match)) if if_metageneration_not_match is not None: qs_params.append(("ifMetaGenerationNotMatch", if_metageneration_not_match)) upload_url += "?" + urlencode(qs_params) payload = ( b"--==0==\r\n" + b"content-type: application/json; charset=UTF-8\r\n\r\n" + b'{"name": "blob-name"}\r\n' + b"--==0==\r\n" + b"content-type: application/xml\r\n\r\n" + data_read + b"\r\n--==0==--" ) headers = {"content-type": b'multipart/related; boundary="==0=="'} transport.request.assert_called_once_with( "POST", upload_url, data=payload, headers=headers, timeout=expected_timeout ) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_no_size(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, predefined_acl="private") @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_size(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, size=10) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_user_project(self, mock_get_boundary): user_project = "user-project-123" self._do_multipart_success(mock_get_boundary, user_project=user_project) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_kms(self, mock_get_boundary): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) self._do_multipart_success(mock_get_boundary, kms_key_name=kms_resource) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_kms_with_version(self, mock_get_boundary): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" "cryptoKeyVersions/1" ) self._do_multipart_success(mock_get_boundary, kms_key_name=kms_resource) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_retry(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, num_retries=8) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_generation_match(self, mock_get_boundary): self._do_multipart_success( mock_get_boundary, if_generation_match=4, if_metageneration_match=4 ) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_custom_timeout(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, timeout=9.58) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_generation_not_match(self, mock_get_boundary): self._do_multipart_success( mock_get_boundary, if_generation_not_match=4, if_metageneration_not_match=4 ) def test__do_multipart_upload_bad_size(self): blob = self._make_one(u"blob-name", bucket=None) data = b"data here hear hier" stream = io.BytesIO(data) size = 50 self.assertGreater(size, len(data)) with self.assertRaises(ValueError) as exc_info: blob._do_multipart_upload( None, stream, None, size, None, None, None, None, None, None ) exc_contents = str(exc_info.exception) self.assertIn("was specified but the file-like object only had", exc_contents) self.assertEqual(stream.tell(), len(data)) def _initiate_resumable_helper( self, size=None, extra_headers=None, chunk_size=None, num_retries=None, user_project=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, blob_chunk_size=786432, kms_key_name=None, timeout=None, ): from six.moves.urllib.parse import urlencode from google.resumable_media.requests import ResumableUpload from google.cloud.storage.blob import _DEFAULT_CHUNKSIZE bucket = _Bucket(name="whammy", user_project=user_project) blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) blob.metadata = {"rook": "takes knight"} blob.chunk_size = blob_chunk_size if blob_chunk_size is not None: self.assertIsNotNone(blob.chunk_size) else: self.assertIsNone(blob.chunk_size) # Need to make sure **same** dict is used because ``json.dumps()`` # will depend on the hash order. object_metadata = blob._get_writable_metadata() blob._get_writable_metadata = mock.Mock(return_value=object_metadata, spec=[]) # Create mocks to be checked for doing transport. resumable_url = "http://test.invalid?upload_id=hey-you" response_headers = {"location": resumable_url} transport = self._mock_transport(http_client.OK, response_headers) # Create some mock arguments and call the method under test. client = mock.Mock(_http=transport, _connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" data = b"hello hallo halo hi-low" stream = io.BytesIO(data) content_type = u"text/plain" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} upload, transport = blob._initiate_resumable_upload( client, stream, content_type, size, num_retries, extra_headers=extra_headers, chunk_size=chunk_size, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, **timeout_kwarg ) # Check the returned values. self.assertIsInstance(upload, ResumableUpload) upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o" ) qs_params = [("uploadType", "resumable")] if user_project is not None: qs_params.append(("userProject", user_project)) if predefined_acl is not None: qs_params.append(("predefinedAcl", predefined_acl)) if kms_key_name is not None and "cryptoKeyVersions" not in kms_key_name: qs_params.append(("kmsKeyName", kms_key_name)) if if_generation_match is not None: qs_params.append(("ifGenerationMatch", if_generation_match)) if if_generation_not_match is not None: qs_params.append(("ifGenerationNotMatch", if_generation_not_match)) if if_metageneration_match is not None: qs_params.append(("ifMetagenerationMatch", if_metageneration_match)) if if_metageneration_not_match is not None: qs_params.append(("ifMetaGenerationNotMatch", if_metageneration_not_match)) upload_url += "?" + urlencode(qs_params) self.assertEqual(upload.upload_url, upload_url) if extra_headers is None: self.assertEqual(upload._headers, {}) else: self.assertEqual(upload._headers, extra_headers) self.assertIsNot(upload._headers, extra_headers) self.assertFalse(upload.finished) if chunk_size is None: if blob_chunk_size is None: self.assertEqual(upload._chunk_size, _DEFAULT_CHUNKSIZE) else: self.assertEqual(upload._chunk_size, blob.chunk_size) else: self.assertNotEqual(blob.chunk_size, chunk_size) self.assertEqual(upload._chunk_size, chunk_size) self.assertIs(upload._stream, stream) if size is None: self.assertIsNone(upload._total_bytes) else: self.assertEqual(upload._total_bytes, size) self.assertEqual(upload._content_type, content_type) self.assertEqual(upload.resumable_url, resumable_url) retry_strategy = upload._retry_strategy self.assertEqual(retry_strategy.max_sleep, 64.0) if num_retries is None: self.assertEqual(retry_strategy.max_cumulative_retry, 600.0) self.assertIsNone(retry_strategy.max_retries) else: self.assertIsNone(retry_strategy.max_cumulative_retry) self.assertEqual(retry_strategy.max_retries, num_retries) self.assertIs(transport, transport) # Make sure we never read from the stream. self.assertEqual(stream.tell(), 0) # Check the mocks. blob._get_writable_metadata.assert_called_once_with() payload = json.dumps(object_metadata).encode("utf-8") expected_headers = { "content-type": "application/json; charset=UTF-8", "x-upload-content-type": content_type, } if size is not None: expected_headers["x-upload-content-length"] = str(size) if extra_headers is not None: expected_headers.update(extra_headers) transport.request.assert_called_once_with( "POST", upload_url, data=payload, headers=expected_headers, timeout=expected_timeout, ) def test__initiate_resumable_upload_with_custom_timeout(self): self._initiate_resumable_helper(timeout=9.58) def test__initiate_resumable_upload_no_size(self): self._initiate_resumable_helper() def test__initiate_resumable_upload_with_size(self): self._initiate_resumable_helper(size=10000) def test__initiate_resumable_upload_with_user_project(self): user_project = "user-project-123" self._initiate_resumable_helper(user_project=user_project) def test__initiate_resumable_upload_with_kms(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) self._initiate_resumable_helper(kms_key_name=kms_resource) def test__initiate_resumable_upload_with_kms_with_version(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" "cryptoKeyVersions/1" ) self._initiate_resumable_helper(kms_key_name=kms_resource) def test__initiate_resumable_upload_without_chunk_size(self): self._initiate_resumable_helper(blob_chunk_size=None) def test__initiate_resumable_upload_with_chunk_size(self): one_mb = 1048576 self._initiate_resumable_helper(chunk_size=one_mb) def test__initiate_resumable_upload_with_extra_headers(self): extra_headers = {"origin": "http://not-in-kansas-anymore.invalid"} self._initiate_resumable_helper(extra_headers=extra_headers) def test__initiate_resumable_upload_with_retry(self): self._initiate_resumable_helper(num_retries=11) def test__initiate_resumable_upload_with_generation_match(self): self._initiate_resumable_helper( if_generation_match=4, if_metageneration_match=4 ) def test__initiate_resumable_upload_with_generation_not_match(self): self._initiate_resumable_helper( if_generation_not_match=4, if_metageneration_not_match=4 ) def test__initiate_resumable_upload_with_predefined_acl(self): self._initiate_resumable_helper(predefined_acl="private") def _make_resumable_transport( self, headers1, headers2, headers3, total_bytes, data_corruption=False ): from google import resumable_media fake_transport = mock.Mock(spec=["request"]) fake_response1 = self._mock_requests_response(http_client.OK, headers1) fake_response2 = self._mock_requests_response( resumable_media.PERMANENT_REDIRECT, headers2 ) json_body = '{{"size": "{:d}"}}'.format(total_bytes) if data_corruption: fake_response3 = resumable_media.DataCorruption(None) else: fake_response3 = self._mock_requests_response( http_client.OK, headers3, content=json_body.encode("utf-8") ) responses = [fake_response1, fake_response2, fake_response3] fake_transport.request.side_effect = responses return fake_transport, responses @staticmethod def _do_resumable_upload_call0( blob, content_type, size=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, ): # First mock transport.request() does initiates upload. upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + blob.bucket.path + "/o?uploadType=resumable" ) if predefined_acl is not None: upload_url += "&predefinedAcl={}".format(predefined_acl) expected_headers = { "content-type": "application/json; charset=UTF-8", "x-upload-content-type": content_type, } if size is not None: expected_headers["x-upload-content-length"] = str(size) payload = json.dumps({"name": blob.name}).encode("utf-8") return mock.call( "POST", upload_url, data=payload, headers=expected_headers, timeout=timeout ) @staticmethod def _do_resumable_upload_call1( blob, content_type, data, resumable_url, size=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, ): # Second mock transport.request() does sends first chunk. if size is None: content_range = "bytes 0-{:d}/*".format(blob.chunk_size - 1) else: content_range = "bytes 0-{:d}/{:d}".format(blob.chunk_size - 1, size) expected_headers = { "content-type": content_type, "content-range": content_range, } payload = data[: blob.chunk_size] return mock.call( "PUT", resumable_url, data=payload, headers=expected_headers, timeout=timeout, ) @staticmethod def _do_resumable_upload_call2( blob, content_type, data, resumable_url, total_bytes, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, ): # Third mock transport.request() does sends last chunk. content_range = "bytes {:d}-{:d}/{:d}".format( blob.chunk_size, total_bytes - 1, total_bytes ) expected_headers = { "content-type": content_type, "content-range": content_range, } payload = data[blob.chunk_size :] return mock.call( "PUT", resumable_url, data=payload, headers=expected_headers, timeout=timeout, ) def _do_resumable_helper( self, use_size=False, num_retries=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, data_corruption=False, ): bucket = _Bucket(name="yesterday") blob = self._make_one(u"blob-name", bucket=bucket) blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE self.assertIsNotNone(blob.chunk_size) # Data to be uploaded. data = b"<html>" + (b"A" * blob.chunk_size) + b"</html>" total_bytes = len(data) if use_size: size = total_bytes else: size = None # Create mocks to be checked for doing transport. resumable_url = "http://test.invalid?upload_id=and-then-there-was-1" headers1 = {"location": resumable_url} headers2 = {"range": "bytes=0-{:d}".format(blob.chunk_size - 1)} transport, responses = self._make_resumable_transport( headers1, headers2, {}, total_bytes, data_corruption=data_corruption ) # Create some mock arguments and call the method under test. client = mock.Mock(_http=transport, _connection=_Connection, spec=["_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" stream = io.BytesIO(data) content_type = u"text/html" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} response = blob._do_resumable_upload( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, **timeout_kwarg ) # Check the returned values. self.assertIs(response, responses[2]) self.assertEqual(stream.tell(), total_bytes) # Check the mocks. call0 = self._do_resumable_upload_call0( blob, content_type, size=size, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, timeout=expected_timeout, ) call1 = self._do_resumable_upload_call1( blob, content_type, data, resumable_url, size=size, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, timeout=expected_timeout, ) call2 = self._do_resumable_upload_call2( blob, content_type, data, resumable_url, total_bytes, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, timeout=expected_timeout, ) self.assertEqual(transport.request.mock_calls, [call0, call1, call2]) def test__do_resumable_upload_with_custom_timeout(self): self._do_resumable_helper(timeout=9.58) def test__do_resumable_upload_no_size(self): self._do_resumable_helper() def test__do_resumable_upload_with_size(self): self._do_resumable_helper(use_size=True) def test__do_resumable_upload_with_retry(self): self._do_resumable_helper(num_retries=6) def test__do_resumable_upload_with_predefined_acl(self): self._do_resumable_helper(predefined_acl="private") def test__do_resumable_upload_with_data_corruption(self): from google.resumable_media import DataCorruption with mock.patch("google.cloud.storage.blob.Blob.delete") as patch: try: self._do_resumable_helper(data_corruption=True) except Exception as e: self.assertTrue(patch.called) self.assertIsInstance(e, DataCorruption) def _do_upload_helper( self, chunk_size=None, num_retries=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, size=None, timeout=None, ): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE blob = self._make_one(u"blob-name", bucket=None) # Create a fake response. response = mock.Mock(spec=[u"json"]) response.json.return_value = mock.sentinel.json # Mock **both** helpers. blob._do_multipart_upload = mock.Mock(return_value=response, spec=[]) blob._do_resumable_upload = mock.Mock(return_value=response, spec=[]) if chunk_size is None: self.assertIsNone(blob.chunk_size) else: blob.chunk_size = chunk_size self.assertIsNotNone(blob.chunk_size) client = mock.sentinel.client stream = mock.sentinel.stream content_type = u"video/mp4" if size is None: size = 12345654321 if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} # Make the request and check the mocks. created_json = blob._do_upload( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, **timeout_kwarg ) self.assertIs(created_json, mock.sentinel.json) response.json.assert_called_once_with() if size is not None and size <= _MAX_MULTIPART_SIZE: blob._do_multipart_upload.assert_called_once_with( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, timeout=expected_timeout, checksum=None, ) blob._do_resumable_upload.assert_not_called() else: blob._do_multipart_upload.assert_not_called() blob._do_resumable_upload.assert_called_once_with( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, timeout=expected_timeout, checksum=None, ) def test__do_upload_uses_multipart(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE self._do_upload_helper(size=_MAX_MULTIPART_SIZE) def test__do_upload_uses_multipart_w_custom_timeout(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE self._do_upload_helper(size=_MAX_MULTIPART_SIZE, timeout=9.58) def test__do_upload_uses_resumable(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE chunk_size = 256 * 1024 # 256KB self._do_upload_helper(chunk_size=chunk_size, size=_MAX_MULTIPART_SIZE + 1) def test__do_upload_uses_resumable_w_custom_timeout(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE chunk_size = 256 * 1024 # 256KB self._do_upload_helper( chunk_size=chunk_size, size=_MAX_MULTIPART_SIZE + 1, timeout=9.58 ) def test__do_upload_with_retry(self): self._do_upload_helper(num_retries=20) def _upload_from_file_helper(self, side_effect=None, **kwargs): from google.cloud._helpers import UTC blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"updated": "2017-01-01T09:09:09.081Z"} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) if side_effect is not None: blob._do_upload.side_effect = side_effect # Make sure `updated` is empty before the request. self.assertIsNone(blob.updated) data = b"data is here" stream = io.BytesIO(data) stream.seek(2) # Not at zero. content_type = u"font/woff" client = mock.sentinel.client predefined_acl = kwargs.get("predefined_acl", None) if_generation_match = kwargs.get("if_generation_match", None) if_generation_not_match = kwargs.get("if_generation_not_match", None) if_metageneration_match = kwargs.get("if_metageneration_match", None) if_metageneration_not_match = kwargs.get("if_metageneration_not_match", None) ret_val = blob.upload_from_file( stream, size=len(data), content_type=content_type, client=client, **kwargs ) # Check the response and side-effects. self.assertIsNone(ret_val) new_updated = datetime.datetime(2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC) self.assertEqual(blob.updated, new_updated) expected_timeout = kwargs.get("timeout", self._get_default_timeout()) # Check the mock. num_retries = kwargs.get("num_retries") blob._do_upload.assert_called_once_with( client, stream, content_type, len(data), num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, timeout=expected_timeout, checksum=None, ) return stream def test_upload_from_file_success(self): stream = self._upload_from_file_helper(predefined_acl="private") assert stream.tell() == 2 @mock.patch("warnings.warn") def test_upload_from_file_with_retries(self, mock_warn): from google.cloud.storage import blob as blob_module self._upload_from_file_helper(num_retries=20) mock_warn.assert_called_once_with( blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2 ) def test_upload_from_file_with_rewind(self): stream = self._upload_from_file_helper(rewind=True) assert stream.tell() == 0 def test_upload_from_file_with_custom_timeout(self): self._upload_from_file_helper(timeout=9.58) def test_upload_from_file_failure(self): import requests from google.resumable_media import InvalidResponse from google.cloud import exceptions message = "Someone is already in this spot." response = requests.Response() response.status_code = http_client.CONFLICT response.request = requests.Request("POST", "http://example.com").prepare() side_effect = InvalidResponse(response, message) with self.assertRaises(exceptions.Conflict) as exc_info: self._upload_from_file_helper(side_effect=side_effect) self.assertIn(message, exc_info.exception.message) self.assertEqual(exc_info.exception.errors, []) def _do_upload_mock_call_helper( self, blob, client, content_type, size, timeout=None ): self.assertEqual(blob._do_upload.call_count, 1) mock_call = blob._do_upload.mock_calls[0] call_name, pos_args, kwargs = mock_call self.assertEqual(call_name, "") self.assertEqual(len(pos_args), 10) self.assertEqual(pos_args[0], client) self.assertEqual(pos_args[2], content_type) self.assertEqual(pos_args[3], size) self.assertIsNone(pos_args[4]) # num_retries self.assertIsNone(pos_args[5]) # predefined_acl self.assertIsNone(pos_args[6]) # if_generation_match self.assertIsNone(pos_args[7]) # if_generation_not_match self.assertIsNone(pos_args[8]) # if_metageneration_match self.assertIsNone(pos_args[9]) # if_metageneration_not_match expected_timeout = self._get_default_timeout() if timeout is None else timeout self.assertEqual(kwargs, {"timeout": expected_timeout, "checksum": None}) return pos_args[1] def test_upload_from_filename(self): from google.cloud._testing import _NamedTemporaryFile blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"metadata": {"mint": "ice-cream"}} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) # Make sure `metadata` is empty before the request. self.assertIsNone(blob.metadata) data = b"soooo much data" content_type = u"image/svg+xml" client = mock.sentinel.client with _NamedTemporaryFile() as temp: with open(temp.name, "wb") as file_obj: file_obj.write(data) ret_val = blob.upload_from_filename( temp.name, content_type=content_type, client=client ) # Check the response and side-effects. self.assertIsNone(ret_val) self.assertEqual(blob.metadata, created_json["metadata"]) # Check the mock. stream = self._do_upload_mock_call_helper(blob, client, content_type, len(data)) self.assertTrue(stream.closed) self.assertEqual(stream.mode, "rb") self.assertEqual(stream.name, temp.name) def test_upload_from_filename_w_custom_timeout(self): from google.cloud._testing import _NamedTemporaryFile blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"metadata": {"mint": "ice-cream"}} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) # Make sure `metadata` is empty before the request. self.assertIsNone(blob.metadata) data = b"soooo much data" content_type = u"image/svg+xml" client = mock.sentinel.client with _NamedTemporaryFile() as temp: with open(temp.name, "wb") as file_obj: file_obj.write(data) blob.upload_from_filename( temp.name, content_type=content_type, client=client, timeout=9.58 ) # Check the mock. self._do_upload_mock_call_helper( blob, client, content_type, len(data), timeout=9.58 ) def _upload_from_string_helper(self, data, **kwargs): from google.cloud._helpers import _to_bytes blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"componentCount": "5"} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) # Make sure `metadata` is empty before the request. self.assertIsNone(blob.component_count) client = mock.sentinel.client ret_val = blob.upload_from_string(data, client=client, **kwargs) # Check the response and side-effects. self.assertIsNone(ret_val) self.assertEqual(blob.component_count, 5) # Check the mock. payload = _to_bytes(data, encoding="utf-8") stream = self._do_upload_mock_call_helper( blob, client, "text/plain", len(payload), kwargs.get("timeout", self._get_default_timeout()), ) self.assertIsInstance(stream, io.BytesIO) self.assertEqual(stream.getvalue(), payload) def test_upload_from_string_w_custom_timeout(self): data = b"XB]jb\xb8tad\xe0" self._upload_from_string_helper(data, timeout=9.58) def test_upload_from_string_w_bytes(self): data = b"XB]jb\xb8tad\xe0" self._upload_from_string_helper(data) def test_upload_from_string_w_text(self): data = u"\N{snowman} \N{sailboat}" self._upload_from_string_helper(data) def _create_resumable_upload_session_helper( self, origin=None, side_effect=None, timeout=None ): bucket = _Bucket(name="alex-trebek") blob = self._make_one("blob-name", bucket=bucket) chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE blob.chunk_size = chunk_size # Create mocks to be checked for doing transport. resumable_url = "http://test.invalid?upload_id=clean-up-everybody" response_headers = {"location": resumable_url} transport = self._mock_transport(http_client.OK, response_headers) if side_effect is not None: transport.request.side_effect = side_effect # Create some mock arguments and call the method under test. content_type = u"text/plain" size = 10000 client = mock.Mock(_http=transport, _connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} new_url = blob.create_resumable_upload_session( content_type=content_type, size=size, origin=origin, client=client, **timeout_kwarg ) # Check the returned value and (lack of) side-effect. self.assertEqual(new_url, resumable_url) self.assertEqual(blob.chunk_size, chunk_size) # Check the mocks. upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o?uploadType=resumable" ) payload = b'{"name": "blob-name"}' expected_headers = { "content-type": "application/json; charset=UTF-8", "x-upload-content-length": str(size), "x-upload-content-type": content_type, } if origin is not None: expected_headers["Origin"] = origin transport.request.assert_called_once_with( "POST", upload_url, data=payload, headers=expected_headers, timeout=expected_timeout, ) def test_create_resumable_upload_session(self): self._create_resumable_upload_session_helper() def test_create_resumable_upload_session_with_custom_timeout(self): self._create_resumable_upload_session_helper(timeout=9.58) def test_create_resumable_upload_session_with_origin(self): self._create_resumable_upload_session_helper(origin="http://google.com") def test_create_resumable_upload_session_with_failure(self): from google.resumable_media import InvalidResponse from google.cloud import exceptions message = "5-oh-3 woe is me." response = self._mock_requests_response( status_code=http_client.SERVICE_UNAVAILABLE, headers={} ) side_effect = InvalidResponse(response, message) with self.assertRaises(exceptions.ServiceUnavailable) as exc_info: self._create_resumable_upload_session_helper(side_effect=side_effect) self.assertIn(message, exc_info.exception.message) self.assertEqual(exc_info.exception.errors, []) def test_get_iam_policy(self): from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_EDITOR_ROLE from google.cloud.storage.iam import STORAGE_VIEWER_ROLE from google.api_core.iam import Policy BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 OWNER1 = "user:phred@example.com" OWNER2 = "group:cloud-logs@google.com" EDITOR1 = "domain:google.com" EDITOR2 = "user:phred@example.com" VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" VIEWER2 = "user:phred@example.com" RETURNED = { "resourceId": PATH, "etag": ETAG, "version": VERSION, "bindings": [ {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, ], } after = ({"status": http_client.OK}, RETURNED) EXPECTED = { binding["role"]: set(binding["members"]) for binding in RETURNED["bindings"] } connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) policy = blob.get_iam_policy(timeout=42) self.assertIsInstance(policy, Policy) self.assertEqual(policy.etag, RETURNED["etag"]) self.assertEqual(policy.version, RETURNED["version"]) self.assertEqual(dict(policy), EXPECTED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "GET", "path": "%s/iam" % (PATH,), "query_params": {}, "_target_object": None, "timeout": 42, }, ) def test_get_iam_policy_w_requested_policy_version(self): from google.cloud.storage.iam import STORAGE_OWNER_ROLE BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 OWNER1 = "user:phred@example.com" OWNER2 = "group:cloud-logs@google.com" RETURNED = { "resourceId": PATH, "etag": ETAG, "version": VERSION, "bindings": [{"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}], } after = ({"status": http_client.OK}, RETURNED) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.get_iam_policy(requested_policy_version=3) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "GET", "path": "%s/iam" % (PATH,), "query_params": {"optionsRequestedPolicyVersion": 3}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_get_iam_policy_w_user_project(self): from google.api_core.iam import Policy BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 RETURNED = { "resourceId": PATH, "etag": ETAG, "version": VERSION, "bindings": [], } after = ({"status": http_client.OK}, RETURNED) EXPECTED = {} connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) policy = blob.get_iam_policy() self.assertIsInstance(policy, Policy) self.assertEqual(policy.etag, RETURNED["etag"]) self.assertEqual(policy.version, RETURNED["version"]) self.assertEqual(dict(policy), EXPECTED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "GET", "path": "%s/iam" % (PATH,), "query_params": {"userProject": USER_PROJECT}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_set_iam_policy(self): import operator from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_EDITOR_ROLE from google.cloud.storage.iam import STORAGE_VIEWER_ROLE from google.api_core.iam import Policy BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 OWNER1 = "user:phred@example.com" OWNER2 = "group:cloud-logs@google.com" EDITOR1 = "domain:google.com" EDITOR2 = "user:phred@example.com" VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" VIEWER2 = "user:phred@example.com" BINDINGS = [ {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, ] RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} after = ({"status": http_client.OK}, RETURNED) policy = Policy() for binding in BINDINGS: policy[binding["role"]] = binding["members"] connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) returned = blob.set_iam_policy(policy, timeout=42) self.assertEqual(returned.etag, ETAG) self.assertEqual(returned.version, VERSION) self.assertEqual(dict(returned), dict(policy)) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PUT") self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) self.assertEqual(kw[0]["query_params"], {}) self.assertEqual(kw[0]["timeout"], 42) sent = kw[0]["data"] self.assertEqual(sent["resourceId"], PATH) self.assertEqual(len(sent["bindings"]), len(BINDINGS)) key = operator.itemgetter("role") for found, expected in zip( sorted(sent["bindings"], key=key), sorted(BINDINGS, key=key) ): self.assertEqual(found["role"], expected["role"]) self.assertEqual(sorted(found["members"]), sorted(expected["members"])) def test_set_iam_policy_w_user_project(self): from google.api_core.iam import Policy BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 BINDINGS = [] RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} after = ({"status": http_client.OK}, RETURNED) policy = Policy() connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) returned = blob.set_iam_policy(policy) self.assertEqual(returned.etag, ETAG) self.assertEqual(returned.version, VERSION) self.assertEqual(dict(returned), dict(policy)) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PUT") self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) self.assertEqual(kw[0]["data"], {"resourceId": PATH}) def test_test_iam_permissions(self): from google.cloud.storage.iam import STORAGE_OBJECTS_LIST from google.cloud.storage.iam import STORAGE_BUCKETS_GET from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) PERMISSIONS = [ STORAGE_OBJECTS_LIST, STORAGE_BUCKETS_GET, STORAGE_BUCKETS_UPDATE, ] ALLOWED = PERMISSIONS[1:] RETURNED = {"permissions": ALLOWED} after = ({"status": http_client.OK}, RETURNED) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) allowed = blob.test_iam_permissions(PERMISSIONS, timeout=42) self.assertEqual(allowed, ALLOWED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "GET") self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) self.assertEqual(kw[0]["query_params"], {"permissions": PERMISSIONS}) self.assertEqual(kw[0]["timeout"], 42) def test_test_iam_permissions_w_user_project(self): from google.cloud.storage.iam import STORAGE_OBJECTS_LIST from google.cloud.storage.iam import STORAGE_BUCKETS_GET from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" PATH = "/b/name/o/%s" % (BLOB_NAME,) PERMISSIONS = [ STORAGE_OBJECTS_LIST, STORAGE_BUCKETS_GET, STORAGE_BUCKETS_UPDATE, ] ALLOWED = PERMISSIONS[1:] RETURNED = {"permissions": ALLOWED} after = ({"status": http_client.OK}, RETURNED) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) allowed = blob.test_iam_permissions(PERMISSIONS) self.assertEqual(allowed, ALLOWED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "GET") self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) self.assertEqual( kw[0]["query_params"], {"permissions": PERMISSIONS, "userProject": USER_PROJECT}, ) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) def test_make_public(self): from google.cloud.storage.acl import _ACLEntity BLOB_NAME = "blob-name" permissive = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}] after = ({"status": http_client.OK}, {"acl": permissive}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.acl.loaded = True blob.make_public() self.assertEqual(list(blob.acl), permissive) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PATCH") self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME) self.assertEqual(kw[0]["data"], {"acl": permissive}) self.assertEqual(kw[0]["query_params"], {"projection": "full"}) def test_make_private(self): BLOB_NAME = "blob-name" no_permissions = [] after = ({"status": http_client.OK}, {"acl": no_permissions}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.acl.loaded = True blob.make_private() self.assertEqual(list(blob.acl), no_permissions) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PATCH") self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME) self.assertEqual(kw[0]["data"], {"acl": no_permissions}) self.assertEqual(kw[0]["query_params"], {"projection": "full"}) def test_compose_wo_content_type_set(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {} after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) # no destination.content_type set destination.compose(sources=[source_1, source_2]) self.assertIsNone(destination.content_type) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], "destination": {}, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_minimal_w_user_project(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {"etag": "DEADBEEF"} USER_PROJECT = "user-project-123" after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.content_type = "text/plain" destination.compose(sources=[source_1, source_2], timeout=42) self.assertEqual(destination.etag, "DEADBEEF") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {"userProject": USER_PROJECT}, "data": { "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], "destination": {"contentType": "text/plain"}, }, "_target_object": destination, "timeout": 42, "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_w_additional_property_changes(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {"etag": "DEADBEEF"} after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.content_type = "text/plain" destination.content_language = "en-US" destination.metadata = {"my-key": "my-value"} destination.compose(sources=[source_1, source_2]) self.assertEqual(destination.etag, "DEADBEEF") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], "destination": { "contentType": "text/plain", "contentLanguage": "en-US", "metadata": {"my-key": "my-value"}, }, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_w_generation_match(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {} GENERATION_NUMBERS = [6, 9] METAGENERATION_NUMBERS = [7, 1] after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.compose( sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS, if_metageneration_match=METAGENERATION_NUMBERS, ) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [ { "name": source_1.name, "objectPreconditions": { "ifGenerationMatch": GENERATION_NUMBERS[0], "ifMetagenerationMatch": METAGENERATION_NUMBERS[0], }, }, { "name": source_2.name, "objectPreconditions": { "ifGenerationMatch": GENERATION_NUMBERS[1], "ifMetagenerationMatch": METAGENERATION_NUMBERS[1], }, }, ], "destination": {}, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_w_generation_match_bad_length(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" GENERATION_NUMBERS = [6] METAGENERATION_NUMBERS = [7] after = ({"status": http_client.OK}, {}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) with self.assertRaises(ValueError): destination.compose( sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS ) with self.assertRaises(ValueError): destination.compose( sources=[source_1, source_2], if_metageneration_match=METAGENERATION_NUMBERS, ) def test_compose_w_generation_match_nones(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" GENERATION_NUMBERS = [6, None] after = ({"status": http_client.OK}, {}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.compose( sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS ) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [ { "name": source_1.name, "objectPreconditions": { "ifGenerationMatch": GENERATION_NUMBERS[0] }, }, {"name": source_2.name}, ], "destination": {}, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_rewrite_response_without_resource(self): SOURCE_BLOB = "source" DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket) token, rewritten, size = dest_blob.rewrite(source_blob) self.assertEqual(token, TOKEN) self.assertEqual(rewritten, 33) self.assertEqual(size, 42) def test_rewrite_w_generations(self): SOURCE_BLOB = "source" SOURCE_GENERATION = 42 DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" DEST_GENERATION = 43 TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one( SOURCE_BLOB, bucket=source_bucket, generation=SOURCE_GENERATION ) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one( DEST_BLOB, bucket=dest_bucket, generation=DEST_GENERATION ) token, rewritten, size = dest_blob.rewrite(source_blob, timeout=42) self.assertEqual(token, TOKEN) self.assertEqual(rewritten, 33) self.assertEqual(size, 42) (kw,) = connection._requested self.assertEqual(kw["method"], "POST") self.assertEqual( kw["path"], "/b/%s/o/%s/rewriteTo/b/%s/o/%s" % ( (source_bucket.name, source_blob.name, dest_bucket.name, dest_blob.name) ), ) self.assertEqual(kw["query_params"], {"sourceGeneration": SOURCE_GENERATION}) self.assertEqual(kw["timeout"], 42) def test_rewrite_w_generation_match(self): SOURCE_BLOB = "source" SOURCE_GENERATION_NUMBER = 42 DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" DEST_GENERATION_NUMBER = 16 TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one( SOURCE_BLOB, bucket=source_bucket, generation=SOURCE_GENERATION_NUMBER ) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one( DEST_BLOB, bucket=dest_bucket, generation=DEST_GENERATION_NUMBER ) token, rewritten, size = dest_blob.rewrite( source_blob, timeout=42, if_generation_match=dest_blob.generation, if_source_generation_match=source_blob.generation, ) (kw,) = connection._requested self.assertEqual(kw["method"], "POST") self.assertEqual( kw["path"], "/b/%s/o/%s/rewriteTo/b/%s/o/%s" % ( (source_bucket.name, source_blob.name, dest_bucket.name, dest_blob.name) ), ) self.assertEqual( kw["query_params"], { "ifSourceGenerationMatch": SOURCE_GENERATION_NUMBER, "ifGenerationMatch": DEST_GENERATION_NUMBER, "sourceGeneration": SOURCE_GENERATION_NUMBER, }, ) self.assertEqual(kw["timeout"], 42) def test_rewrite_other_bucket_other_name_no_encryption_partial(self): SOURCE_BLOB = "source" DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket) token, rewritten, size = dest_blob.rewrite(source_blob) self.assertEqual(token, TOKEN) self.assertEqual(rewritten, 33) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/%s/o/%s" % ( SOURCE_BLOB, DEST_BUCKET, DEST_BLOB, ) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {}) SENT = {} self.assertEqual(kw[0]["data"], SENT) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) self.assertNotIn("X-Goog-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Encryption-Key", headers) self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers) def test_rewrite_same_name_no_old_key_new_key_done_w_user_project(self): KEY = b"01234567890123456789012345678901" # 32 bytes KEY_B64 = base64.b64encode(KEY).rstrip().decode("ascii") KEY_HASH = hashlib.sha256(KEY).digest() KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode("ascii") BLOB_NAME = "blob" USER_PROJECT = "user-project-123" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) plain = self._make_one(BLOB_NAME, bucket=bucket) encrypted = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY) token, rewritten, size = encrypted.rewrite(plain) self.assertIsNone(token) self.assertEqual(rewritten, 42) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) SENT = {} self.assertEqual(kw[0]["data"], SENT) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Encryption-Key"], KEY_B64) self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], KEY_HASH_B64) def test_rewrite_same_name_no_key_new_key_w_token(self): SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii") SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest() SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii") DEST_KEY = b"90123456789012345678901234567890" # 32 bytes DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode("ascii") DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest() DEST_KEY_HASH_B64 = base64.b64encode(DEST_KEY_HASH).rstrip().decode("ascii") BLOB_NAME = "blob" TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY) dest = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=DEST_KEY) token, rewritten, size = dest.rewrite(source, token=TOKEN) self.assertIsNone(token) self.assertEqual(rewritten, 42) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {"rewriteToken": TOKEN}) SENT = {} self.assertEqual(kw[0]["data"], SENT) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64) self.assertEqual( headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64 ) self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Encryption-Key"], DEST_KEY_B64) self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], DEST_KEY_HASH_B64) def test_rewrite_same_name_w_old_key_new_kms_key(self): SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii") SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest() SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii") DEST_KMS_RESOURCE = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) BLOB_NAME = "blob" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY) dest = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=DEST_KMS_RESOURCE) token, rewritten, size = dest.rewrite(source) self.assertIsNone(token) self.assertEqual(rewritten, 42) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual( kw[0]["query_params"], {"destinationKmsKeyName": DEST_KMS_RESOURCE} ) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) SENT = {"kmsKeyName": DEST_KMS_RESOURCE} self.assertEqual(kw[0]["data"], SENT) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64) self.assertEqual( headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64 ) def test_update_storage_class_invalid(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) with self.assertRaises(ValueError): blob.update_storage_class(u"BOGUS") def test_update_storage_class_large_file(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" TOKEN = "TOKEN" INCOMPLETE_RESPONSE = { "totalBytesRewritten": 42, "objectSize": 84, "done": False, "rewriteToken": TOKEN, "resource": {"storageClass": STORAGE_CLASS}, } COMPLETE_RESPONSE = { "totalBytesRewritten": 84, "objectSize": 84, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response_1 = ({"status": http_client.OK}, INCOMPLETE_RESPONSE) response_2 = ({"status": http_client.OK}, COMPLETE_RESPONSE) connection = _Connection(response_1, response_2) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class("NEARLINE") self.assertEqual(blob.storage_class, "NEARLINE") def test_update_storage_class_with_custom_timeout(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" TOKEN = "TOKEN" INCOMPLETE_RESPONSE = { "totalBytesRewritten": 42, "objectSize": 84, "done": False, "rewriteToken": TOKEN, "resource": {"storageClass": STORAGE_CLASS}, } COMPLETE_RESPONSE = { "totalBytesRewritten": 84, "objectSize": 84, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response_1 = ({"status": http_client.OK}, INCOMPLETE_RESPONSE) response_2 = ({"status": http_client.OK}, COMPLETE_RESPONSE) connection = _Connection(response_1, response_2) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class("NEARLINE", timeout=9.58) self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 2) for kw_item in kw: self.assertIn("timeout", kw_item) self.assertEqual(kw_item["timeout"], 9.58) def test_update_storage_class_wo_encryption_key(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class("NEARLINE") self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {}) SENT = {"storageClass": STORAGE_CLASS} self.assertEqual(kw[0]["data"], SENT) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} # Blob has no key, and therefore the relevant headers are not sent. self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) self.assertNotIn("X-Goog-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Encryption-Key", headers) self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers) def test_update_storage_class_w_encryption_key_w_user_project(self): BLOB_NAME = "blob-name" BLOB_KEY = b"01234567890123456789012345678901" # 32 bytes BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode("ascii") BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest() BLOB_KEY_HASH_B64 = base64.b64encode(BLOB_KEY_HASH).rstrip().decode("ascii") STORAGE_CLASS = u"NEARLINE" USER_PROJECT = "user-project-123" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY) blob.update_storage_class("NEARLINE") self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) SENT = {"storageClass": STORAGE_CLASS} self.assertEqual(kw[0]["data"], SENT) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} # Blob has key, and therefore the relevant headers are sent. self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], BLOB_KEY_B64) self.assertEqual( headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64 ) self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Encryption-Key"], BLOB_KEY_B64) self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64) def test_update_storage_class_w_generation_match(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" GENERATION_NUMBER = 6 SOURCE_GENERATION_NUMBER = 9 RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class( "NEARLINE", if_generation_match=GENERATION_NUMBER, if_source_generation_match=SOURCE_GENERATION_NUMBER, ) self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual( kw[0]["query_params"], { "ifGenerationMatch": GENERATION_NUMBER, "ifSourceGenerationMatch": SOURCE_GENERATION_NUMBER, }, ) SENT = {"storageClass": STORAGE_CLASS} self.assertEqual(kw[0]["data"], SENT) def test_cache_control_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CACHE_CONTROL = "no-cache" properties = {"cacheControl": CACHE_CONTROL} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.cache_control, CACHE_CONTROL) def test_cache_control_setter(self): BLOB_NAME = "blob-name" CACHE_CONTROL = "no-cache" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.cache_control) blob.cache_control = CACHE_CONTROL self.assertEqual(blob.cache_control, CACHE_CONTROL) def test_component_count(self): BUCKET = object() COMPONENT_COUNT = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"componentCount": COMPONENT_COUNT} ) self.assertEqual(blob.component_count, COMPONENT_COUNT) def test_component_count_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.component_count) def test_component_count_string_val(self): BUCKET = object() COMPONENT_COUNT = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"componentCount": str(COMPONENT_COUNT)}, ) self.assertEqual(blob.component_count, COMPONENT_COUNT) def test_content_disposition_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_DISPOSITION = "Attachment; filename=example.jpg" properties = {"contentDisposition": CONTENT_DISPOSITION} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) def test_content_disposition_setter(self): BLOB_NAME = "blob-name" CONTENT_DISPOSITION = "Attachment; filename=example.jpg" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_disposition) blob.content_disposition = CONTENT_DISPOSITION self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) def test_content_encoding_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_ENCODING = "gzip" properties = {"contentEncoding": CONTENT_ENCODING} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_encoding, CONTENT_ENCODING) def test_content_encoding_setter(self): BLOB_NAME = "blob-name" CONTENT_ENCODING = "gzip" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_encoding) blob.content_encoding = CONTENT_ENCODING self.assertEqual(blob.content_encoding, CONTENT_ENCODING) def test_content_language_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_LANGUAGE = "pt-BR" properties = {"contentLanguage": CONTENT_LANGUAGE} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_language, CONTENT_LANGUAGE) def test_content_language_setter(self): BLOB_NAME = "blob-name" CONTENT_LANGUAGE = "pt-BR" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_language) blob.content_language = CONTENT_LANGUAGE self.assertEqual(blob.content_language, CONTENT_LANGUAGE) def test_content_type_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_TYPE = "image/jpeg" properties = {"contentType": CONTENT_TYPE} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_type, CONTENT_TYPE) def test_content_type_setter(self): BLOB_NAME = "blob-name" CONTENT_TYPE = "image/jpeg" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_type) blob.content_type = CONTENT_TYPE self.assertEqual(blob.content_type, CONTENT_TYPE) def test_crc32c_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CRC32C = "DEADBEEF" properties = {"crc32c": CRC32C} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.crc32c, CRC32C) def test_crc32c_setter(self): BLOB_NAME = "blob-name" CRC32C = "DEADBEEF" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.crc32c) blob.crc32c = CRC32C self.assertEqual(blob.crc32c, CRC32C) def test_etag(self): BLOB_NAME = "blob-name" bucket = _Bucket() ETAG = "ETAG" properties = {"etag": ETAG} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.etag, ETAG) def test_event_based_hold_getter_missing(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertIsNone(blob.event_based_hold) def test_event_based_hold_getter_false(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"eventBasedHold": False} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertFalse(blob.event_based_hold) def test_event_based_hold_getter_true(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"eventBasedHold": True} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertTrue(blob.event_based_hold) def test_event_based_hold_setter(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.event_based_hold) blob.event_based_hold = True self.assertEqual(blob.event_based_hold, True) def test_generation(self): BUCKET = object() GENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"generation": GENERATION} ) self.assertEqual(blob.generation, GENERATION) def test_generation_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.generation) def test_generation_string_val(self): BUCKET = object() GENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"generation": str(GENERATION)} ) self.assertEqual(blob.generation, GENERATION) def test_id(self): BLOB_NAME = "blob-name" bucket = _Bucket() ID = "ID" properties = {"id": ID} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.id, ID) def test_md5_hash_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() MD5_HASH = "DEADBEEF" properties = {"md5Hash": MD5_HASH} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.md5_hash, MD5_HASH) def test_md5_hash_setter(self): BLOB_NAME = "blob-name" MD5_HASH = "DEADBEEF" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.md5_hash) blob.md5_hash = MD5_HASH self.assertEqual(blob.md5_hash, MD5_HASH) def test_media_link(self): BLOB_NAME = "blob-name" bucket = _Bucket() MEDIA_LINK = "http://example.com/media/" properties = {"mediaLink": MEDIA_LINK} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.media_link, MEDIA_LINK) def test_metadata_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() METADATA = {"foo": "Foo"} properties = {"metadata": METADATA} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.metadata, METADATA) def test_metadata_setter(self): BLOB_NAME = "blob-name" METADATA = {"foo": "Foo"} bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.metadata) blob.metadata = METADATA self.assertEqual(blob.metadata, METADATA) def test_metadata_setter_w_nan(self): BLOB_NAME = "blob-name" METADATA = {"foo": float("nan")} bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.metadata) blob.metadata = METADATA value = blob.metadata["foo"] self.assertIsInstance(value, str) def test_metageneration(self): BUCKET = object() METAGENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"metageneration": METAGENERATION} ) self.assertEqual(blob.metageneration, METAGENERATION) def test_metageneration_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.metageneration) def test_metageneration_string_val(self): BUCKET = object() METAGENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"metageneration": str(METAGENERATION)}, ) self.assertEqual(blob.metageneration, METAGENERATION) def test_owner(self): BLOB_NAME = "blob-name" bucket = _Bucket() OWNER = {"entity": "project-owner-12345", "entityId": "23456"} properties = {"owner": OWNER} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) owner = blob.owner self.assertEqual(owner["entity"], "project-owner-12345") self.assertEqual(owner["entityId"], "23456") def test_retention_expiration_time(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"retentionExpirationTime": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.retention_expiration_time, TIMESTAMP) def test_retention_expiration_time_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.retention_expiration_time) def test_self_link(self): BLOB_NAME = "blob-name" bucket = _Bucket() SELF_LINK = "http://example.com/self/" properties = {"selfLink": SELF_LINK} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.self_link, SELF_LINK) def test_size(self): BUCKET = object() SIZE = 42 blob = self._make_one("blob-name", bucket=BUCKET, properties={"size": SIZE}) self.assertEqual(blob.size, SIZE) def test_size_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.size) def test_size_string_val(self): BUCKET = object() SIZE = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"size": str(SIZE)} ) self.assertEqual(blob.size, SIZE) def test_storage_class_getter(self): blob_name = "blob-name" bucket = _Bucket() storage_class = "COLDLINE" properties = {"storageClass": storage_class} blob = self._make_one(blob_name, bucket=bucket, properties=properties) self.assertEqual(blob.storage_class, storage_class) def test_storage_class_setter(self): blob_name = "blob-name" bucket = _Bucket() storage_class = "COLDLINE" blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.storage_class) blob.storage_class = storage_class self.assertEqual(blob.storage_class, storage_class) self.assertEqual(blob._properties, {"storageClass": storage_class}) def test_temporary_hold_getter_missing(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertIsNone(blob.temporary_hold) def test_temporary_hold_getter_false(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"temporaryHold": False} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertFalse(blob.temporary_hold) def test_temporary_hold_getter_true(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"temporaryHold": True} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertTrue(blob.temporary_hold) def test_temporary_hold_setter(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.temporary_hold) blob.temporary_hold = True self.assertEqual(blob.temporary_hold, True) def test_time_deleted(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"timeDeleted": TIME_DELETED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.time_deleted, TIMESTAMP) def test_time_deleted_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.time_deleted) def test_time_created(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"timeCreated": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.time_created, TIMESTAMP) def test_time_created_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.time_created) def test_updated(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"updated": UPDATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.updated, TIMESTAMP) def test_updated_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.updated) def test_custom_time_getter(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"customTime": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.custom_time, TIMESTAMP) def test_custom_time_setter(self): from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.custom_time) blob.custom_time = TIMESTAMP self.assertEqual(blob.custom_time, TIMESTAMP) def test_custom_time_setter_none_value(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"customTime": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.custom_time, TIMESTAMP) blob.custom_time = None self.assertIsNone(blob.custom_time) def test_custom_time_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.custom_time) def test_from_string_w_valid_uri(self): from google.cloud.storage.blob import Blob connection = _Connection() client = _Client(connection) uri = "gs://BUCKET_NAME/b" blob = Blob.from_string(uri, client) self.assertIsInstance(blob, Blob) self.assertIs(blob.client, client) self.assertEqual(blob.name, "b") self.assertEqual(blob.bucket.name, "BUCKET_NAME") def test_from_string_w_invalid_uri(self): from google.cloud.storage.blob import Blob connection = _Connection() client = _Client(connection) with pytest.raises(ValueError, match="URI scheme must be gs"): Blob.from_string("http://bucket_name/b", client) def test_from_string_w_domain_name_bucket(self): from google.cloud.storage.blob import Blob connection = _Connection() client = _Client(connection) uri = "gs://buckets.example.com/b" blob = Blob.from_string(uri, client) self.assertIsInstance(blob, Blob) self.assertIs(blob.client, client) self.assertEqual(blob.name, "b") self.assertEqual(blob.bucket.name, "buckets.example.com") class Test__quote(unittest.TestCase): @staticmethod def _call_fut(*args, **kw): from google.cloud.storage.blob import _quote return _quote(*args, **kw) def test_bytes(self): quoted = self._call_fut(b"\xDE\xAD\xBE\xEF") self.assertEqual(quoted, "%DE%AD%BE%EF") def test_unicode(self): helicopter = u"\U0001f681" quoted = self._call_fut(helicopter) self.assertEqual(quoted, "%F0%9F%9A%81") def test_bad_type(self): with self.assertRaises(TypeError): self._call_fut(None) def test_w_slash_default(self): with_slash = "foo/bar/baz" quoted = self._call_fut(with_slash) self.assertEqual(quoted, "foo%2Fbar%2Fbaz") def test_w_slash_w_safe(self): with_slash = "foo/bar/baz" quoted_safe = self._call_fut(with_slash, safe=b"/") self.assertEqual(quoted_safe, with_slash) def test_w_tilde(self): with_tilde = "bam~qux" quoted = self._call_fut(with_tilde, safe=b"~") self.assertEqual(quoted, with_tilde) class Test__maybe_rewind(unittest.TestCase): @staticmethod def _call_fut(*args, **kwargs): from google.cloud.storage.blob import _maybe_rewind return _maybe_rewind(*args, **kwargs) def test_default(self): stream = mock.Mock(spec=[u"seek"]) ret_val = self._call_fut(stream) self.assertIsNone(ret_val) stream.seek.assert_not_called() def test_do_not_rewind(self): stream = mock.Mock(spec=[u"seek"]) ret_val = self._call_fut(stream, rewind=False) self.assertIsNone(ret_val) stream.seek.assert_not_called() def test_do_rewind(self): stream = mock.Mock(spec=[u"seek"]) ret_val = self._call_fut(stream, rewind=True) self.assertIsNone(ret_val) stream.seek.assert_called_once_with(0, os.SEEK_SET) class Test__raise_from_invalid_response(unittest.TestCase): @staticmethod def _call_fut(error): from google.cloud.storage.blob import _raise_from_invalid_response return _raise_from_invalid_response(error) def _helper(self, message, code=http_client.BAD_REQUEST, reason=None, args=()): import requests from google.resumable_media import InvalidResponse from google.api_core import exceptions response = requests.Response() response.request = requests.Request("GET", "http://example.com").prepare() response._content = reason response.status_code = code error = InvalidResponse(response, message, *args) with self.assertRaises(exceptions.GoogleAPICallError) as exc_info: self._call_fut(error) return exc_info def test_default(self): message = "Failure" exc_info = self._helper(message) expected = "GET http://example.com/: {}".format(message) self.assertEqual(exc_info.exception.message, expected) self.assertEqual(exc_info.exception.errors, []) def test_w_206_and_args(self): message = "Failure" reason = b"Not available" args = ("one", "two") exc_info = self._helper( message, code=http_client.PARTIAL_CONTENT, reason=reason, args=args ) expected = "GET http://example.com/: {}: {}".format( reason.decode("utf-8"), (message,) + args ) self.assertEqual(exc_info.exception.message, expected) self.assertEqual(exc_info.exception.errors, []) class Test__add_query_parameters(unittest.TestCase): @staticmethod def _call_fut(*args, **kwargs): from google.cloud.storage.blob import _add_query_parameters return _add_query_parameters(*args, **kwargs) def test_w_empty_list(self): BASE_URL = "https://test.example.com/base" self.assertEqual(self._call_fut(BASE_URL, []), BASE_URL) def test_wo_existing_qs(self): BASE_URL = "https://test.example.com/base" NV_LIST = [("one", "One"), ("two", "Two")] expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST]) self.assertEqual( self._call_fut(BASE_URL, NV_LIST), "{}?{}".format(BASE_URL, expected) ) def test_w_existing_qs(self): BASE_URL = "https://test.example.com/base?one=Three" NV_LIST = [("one", "One"), ("two", "Two")] expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST]) self.assertEqual( self._call_fut(BASE_URL, NV_LIST), "{}&{}".format(BASE_URL, expected) ) class _Connection(object): API_BASE_URL = "http://example.com" USER_AGENT = "testing 1.2.3" credentials = object() def __init__(self, *responses): self._responses = responses[:] self._requested = [] self._signed = [] def _respond(self, **kw): self._requested.append(kw) response, self._responses = self._responses[0], self._responses[1:] return response def api_request(self, **kw): from google.cloud.exceptions import NotFound info, content = self._respond(**kw) if info.get("status") == http_client.NOT_FOUND: raise NotFound(info) return content class _Bucket(object): def __init__(self, client=None, name="name", user_project=None): if client is None: connection = _Connection() client = _Client(connection) self.client = client self._blobs = {} self._copied = [] self._deleted = [] self.name = name self.path = "/b/" + name self.user_project = user_project def delete_blob( self, blob_name, client=None, generation=None, timeout=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, ): del self._blobs[blob_name] self._deleted.append( ( blob_name, client, generation, timeout, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, ) ) class _Client(object): def __init__(self, connection): self._base_connection = connection @property def _connection(self): return self._base_connection @property def _credentials(self): return self._base_connection.credentials
37.414313
115
0.62716
import base64 import datetime import hashlib import io import json import os import tempfile import unittest import mock import pytest import six from six.moves import http_client from google.cloud.storage.retry import DEFAULT_RETRY_IF_GENERATION_SPECIFIED def _make_credentials(): import google.auth.credentials return mock.Mock(spec=google.auth.credentials.Credentials) class Test_Blob(unittest.TestCase): @staticmethod def _make_one(*args, **kw): from google.cloud.storage.blob import Blob properties = kw.pop("properties", {}) blob = Blob(*args, **kw) blob._properties.update(properties) return blob @staticmethod def _get_default_timeout(): from google.cloud.storage.constants import _DEFAULT_TIMEOUT return _DEFAULT_TIMEOUT def test_ctor_wo_encryption_key(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"key": "value"} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertIs(blob.bucket, bucket) self.assertEqual(blob.name, BLOB_NAME) self.assertEqual(blob._properties, properties) self.assertFalse(blob._acl.loaded) self.assertIs(blob._acl.blob, blob) self.assertEqual(blob._encryption_key, None) self.assertEqual(blob.kms_key_name, None) def test_ctor_with_encoded_unicode(self): blob_name = b"wet \xe2\x9b\xb5" blob = self._make_one(blob_name, bucket=None) unicode_name = u"wet \N{sailboat}" self.assertNotIsInstance(blob.name, bytes) self.assertIsInstance(blob.name, six.text_type) self.assertEqual(blob.name, unicode_name) def test_ctor_w_encryption_key(self): KEY = b"01234567890123456789012345678901" BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY) self.assertEqual(blob._encryption_key, KEY) self.assertEqual(blob.kms_key_name, None) def test_ctor_w_kms_key_name_and_encryption_key(self): KEY = b"01234567890123456789012345678901" KMS_RESOURCE = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) BLOB_NAME = "blob-name" bucket = _Bucket() with self.assertRaises(ValueError): self._make_one( BLOB_NAME, bucket=bucket, encryption_key=KEY, kms_key_name=KMS_RESOURCE ) def test_ctor_w_kms_key_name(self): KMS_RESOURCE = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=KMS_RESOURCE) self.assertEqual(blob._encryption_key, None) self.assertEqual(blob.kms_key_name, KMS_RESOURCE) def test_ctor_with_generation(self): BLOB_NAME = "blob-name" GENERATION = 12345 bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) self.assertEqual(blob.generation, GENERATION) def _set_properties_helper(self, kms_key_name=None): import datetime from google.cloud._helpers import UTC from google.cloud._helpers import _RFC3339_MICROS now = datetime.datetime.utcnow().replace(tzinfo=UTC) NOW = now.strftime(_RFC3339_MICROS) BLOB_NAME = "blob-name" GENERATION = 12345 BLOB_ID = "name/{}/{}".format(BLOB_NAME, GENERATION) SELF_LINK = "http://example.com/self/" METAGENERATION = 23456 SIZE = 12345 MD5_HASH = "DEADBEEF" MEDIA_LINK = "http://example.com/media/" ENTITY = "project-owner-12345" ENTITY_ID = "23456" CRC32C = "FACE0DAC" COMPONENT_COUNT = 2 ETAG = "ETAG" resource = { "id": BLOB_ID, "selfLink": SELF_LINK, "generation": GENERATION, "metageneration": METAGENERATION, "contentType": "text/plain", "timeCreated": NOW, "updated": NOW, "timeDeleted": NOW, "storageClass": "NEARLINE", "timeStorageClassUpdated": NOW, "size": SIZE, "md5Hash": MD5_HASH, "mediaLink": MEDIA_LINK, "contentEncoding": "gzip", "contentDisposition": "inline", "contentLanguage": "en-US", "cacheControl": "private", "metadata": {"foo": "Foo"}, "owner": {"entity": ENTITY, "entityId": ENTITY_ID}, "crc32c": CRC32C, "componentCount": COMPONENT_COUNT, "etag": ETAG, "customTime": NOW, } if kms_key_name is not None: resource["kmsKeyName"] = kms_key_name bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) blob._set_properties(resource) self.assertEqual(blob.id, BLOB_ID) self.assertEqual(blob.self_link, SELF_LINK) self.assertEqual(blob.generation, GENERATION) self.assertEqual(blob.metageneration, METAGENERATION) self.assertEqual(blob.content_type, "text/plain") self.assertEqual(blob.time_created, now) self.assertEqual(blob.updated, now) self.assertEqual(blob.time_deleted, now) self.assertEqual(blob.storage_class, "NEARLINE") self.assertEqual(blob.size, SIZE) self.assertEqual(blob.md5_hash, MD5_HASH) self.assertEqual(blob.media_link, MEDIA_LINK) self.assertEqual(blob.content_encoding, "gzip") self.assertEqual(blob.content_disposition, "inline") self.assertEqual(blob.content_language, "en-US") self.assertEqual(blob.cache_control, "private") self.assertEqual(blob.metadata, {"foo": "Foo"}) self.assertEqual(blob.owner, {"entity": ENTITY, "entityId": ENTITY_ID}) self.assertEqual(blob.crc32c, CRC32C) self.assertEqual(blob.component_count, COMPONENT_COUNT) self.assertEqual(blob.etag, ETAG) self.assertEqual(blob.custom_time, now) if kms_key_name is not None: self.assertEqual(blob.kms_key_name, kms_key_name) else: self.assertIsNone(blob.kms_key_name) def test__set_properties_wo_kms_key_name(self): self._set_properties_helper() def test__set_properties_w_kms_key_name(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) self._set_properties_helper(kms_key_name=kms_resource) def test_chunk_size_ctor(self): from google.cloud.storage.blob import Blob BLOB_NAME = "blob-name" BUCKET = object() chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size) self.assertEqual(blob._chunk_size, chunk_size) def test_chunk_size_getter(self): BLOB_NAME = "blob-name" BUCKET = object() blob = self._make_one(BLOB_NAME, bucket=BUCKET) self.assertIsNone(blob.chunk_size) VALUE = object() blob._chunk_size = VALUE self.assertIs(blob.chunk_size, VALUE) def test_chunk_size_setter(self): BLOB_NAME = "blob-name" BUCKET = object() blob = self._make_one(BLOB_NAME, bucket=BUCKET) self.assertIsNone(blob._chunk_size) blob._CHUNK_SIZE_MULTIPLE = 10 blob.chunk_size = 20 self.assertEqual(blob._chunk_size, 20) def test_chunk_size_setter_bad_value(self): BLOB_NAME = "blob-name" BUCKET = object() blob = self._make_one(BLOB_NAME, bucket=BUCKET) self.assertIsNone(blob._chunk_size) blob._CHUNK_SIZE_MULTIPLE = 10 with self.assertRaises(ValueError): blob.chunk_size = 11 def test_acl_property(self): from google.cloud.storage.acl import ObjectACL fake_bucket = _Bucket() blob = self._make_one(u"name", bucket=fake_bucket) acl = blob.acl self.assertIsInstance(acl, ObjectACL) self.assertIs(acl, blob._acl) def test_path_bad_bucket(self): fake_bucket = object() name = u"blob-name" blob = self._make_one(name, bucket=fake_bucket) self.assertRaises(AttributeError, getattr, blob, "path") def test_path_no_name(self): bucket = _Bucket() blob = self._make_one(u"", bucket=bucket) self.assertRaises(ValueError, getattr, blob, "path") def test_path_normal(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob.path, "/b/name/o/%s" % BLOB_NAME) def test_path_w_slash_in_name(self): BLOB_NAME = "parent/child" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob.path, "/b/name/o/parent%2Fchild") def test_path_with_non_ascii(self): blob_name = u"Caf\xe9" bucket = _Bucket() blob = self._make_one(blob_name, bucket=bucket) self.assertEqual(blob.path, "/b/name/o/Caf%C3%A9") def test_bucket_readonly_property(self): blob_name = "BLOB" bucket = _Bucket() other = _Bucket() blob = self._make_one(blob_name, bucket=bucket) with self.assertRaises(AttributeError): blob.bucket = other def test_client(self): blob_name = "BLOB" bucket = _Bucket() blob = self._make_one(blob_name, bucket=bucket) self.assertIs(blob.client, bucket.client) def test_user_project(self): user_project = "user-project-123" blob_name = "BLOB" bucket = _Bucket(user_project=user_project) blob = self._make_one(blob_name, bucket=bucket) self.assertEqual(blob.user_project, user_project) def test__encryption_headers_wo_encryption_key(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) expected = {} self.assertEqual(blob._encryption_headers(), expected) def test__encryption_headers_w_encryption_key(self): key = b"aa426195405adee2c8081bb9e7e74b19" header_key_value = "YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk=" header_key_hash_value = "V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0=" BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=key) expected = { "X-Goog-Encryption-Algorithm": "AES256", "X-Goog-Encryption-Key": header_key_value, "X-Goog-Encryption-Key-Sha256": header_key_hash_value, } self.assertEqual(blob._encryption_headers(), expected) def test__query_params_default(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob._query_params, {}) def test__query_params_w_user_project(self): user_project = "user-project-123" BLOB_NAME = "BLOB" bucket = _Bucket(user_project=user_project) blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob._query_params, {"userProject": user_project}) def test__query_params_w_generation(self): generation = 123456 BLOB_NAME = "BLOB" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket, generation=generation) self.assertEqual(blob._query_params, {"generation": generation}) def test_public_url(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual( blob.public_url, "https://storage.googleapis.com/name/%s" % BLOB_NAME ) def test_public_url_w_slash_in_name(self): BLOB_NAME = "parent/child" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual( blob.public_url, "https://storage.googleapis.com/name/parent/child" ) def test_public_url_w_tilde_in_name(self): BLOB_NAME = "foo~bar" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertEqual(blob.public_url, "https://storage.googleapis.com/name/foo~bar") def test_public_url_with_non_ascii(self): blob_name = u"winter \N{snowman}" bucket = _Bucket() blob = self._make_one(blob_name, bucket=bucket) expected_url = "https://storage.googleapis.com/name/winter%20%E2%98%83" self.assertEqual(blob.public_url, expected_url) def test_generate_signed_url_w_invalid_version(self): BLOB_NAME = "blob-name" EXPIRATION = "2014-10-16T20:34:37.000Z" connection = _Connection() client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket) with self.assertRaises(ValueError): blob.generate_signed_url(EXPIRATION, version="nonesuch") def _generate_signed_url_helper( self, version=None, blob_name="blob-name", api_access_endpoint=None, method="GET", content_md5=None, content_type=None, response_type=None, response_disposition=None, generation=None, headers=None, query_parameters=None, credentials=None, expiration=None, encryption_key=None, access_token=None, service_account_email=None, virtual_hosted_style=False, bucket_bound_hostname=None, scheme="http", ): from six.moves.urllib import parse from google.cloud._helpers import UTC from google.cloud.storage._helpers import _bucket_bound_hostname_url from google.cloud.storage.blob import _API_ACCESS_ENDPOINT from google.cloud.storage.blob import _get_encryption_headers api_access_endpoint = api_access_endpoint or _API_ACCESS_ENDPOINT delta = datetime.timedelta(hours=1) if expiration is None: expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) + delta connection = _Connection() client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket, encryption_key=encryption_key) if version is None: effective_version = "v2" else: effective_version = version to_patch = "google.cloud.storage.blob.generate_signed_url_{}".format( effective_version ) with mock.patch(to_patch) as signer: signed_uri = blob.generate_signed_url( expiration=expiration, api_access_endpoint=api_access_endpoint, method=method, credentials=credentials, content_md5=content_md5, content_type=content_type, response_type=response_type, response_disposition=response_disposition, generation=generation, headers=headers, query_parameters=query_parameters, version=version, access_token=access_token, service_account_email=service_account_email, virtual_hosted_style=virtual_hosted_style, bucket_bound_hostname=bucket_bound_hostname, ) self.assertEqual(signed_uri, signer.return_value) if credentials is None: expected_creds = _Connection.credentials else: expected_creds = credentials encoded_name = blob_name.encode("utf-8") quoted_name = parse.quote(encoded_name, safe=b"/~") if virtual_hosted_style: expected_api_access_endpoint = "https://{}.storage.googleapis.com".format( bucket.name ) elif bucket_bound_hostname: expected_api_access_endpoint = _bucket_bound_hostname_url( bucket_bound_hostname, scheme ) else: expected_api_access_endpoint = api_access_endpoint expected_resource = "/{}/{}".format(bucket.name, quoted_name) if virtual_hosted_style or bucket_bound_hostname: expected_resource = "/{}".format(quoted_name) if encryption_key is not None: expected_headers = headers or {} if effective_version == "v2": expected_headers["X-Goog-Encryption-Algorithm"] = "AES256" else: expected_headers.update(_get_encryption_headers(encryption_key)) else: expected_headers = headers expected_kwargs = { "resource": expected_resource, "expiration": expiration, "api_access_endpoint": expected_api_access_endpoint, "method": method.upper(), "content_md5": content_md5, "content_type": content_type, "response_type": response_type, "response_disposition": response_disposition, "generation": generation, "headers": expected_headers, "query_parameters": query_parameters, "access_token": access_token, "service_account_email": service_account_email, } signer.assert_called_once_with(expected_creds, **expected_kwargs) def test_generate_signed_url_no_version_passed_warning(self): self._generate_signed_url_helper() def _generate_signed_url_v2_helper(self, **kw): version = "v2" self._generate_signed_url_helper(version, **kw) def test_generate_signed_url_v2_w_defaults(self): self._generate_signed_url_v2_helper() def test_generate_signed_url_v2_w_expiration(self): from google.cloud._helpers import UTC expiration = datetime.datetime.utcnow().replace(tzinfo=UTC) self._generate_signed_url_v2_helper(expiration=expiration) def test_generate_signed_url_v2_w_non_ascii_name(self): BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt" self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v2_w_slash_in_name(self): BLOB_NAME = "parent/child" self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v2_w_tilde_in_name(self): BLOB_NAME = "foo~bar" self._generate_signed_url_v2_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v2_w_endpoint(self): self._generate_signed_url_v2_helper( api_access_endpoint="https://api.example.com/v1" ) def test_generate_signed_url_v2_w_method(self): self._generate_signed_url_v2_helper(method="POST") def test_generate_signed_url_v2_w_lowercase_method(self): self._generate_signed_url_v2_helper(method="get") def test_generate_signed_url_v2_w_content_md5(self): self._generate_signed_url_v2_helper(content_md5="FACEDACE") def test_generate_signed_url_v2_w_content_type(self): self._generate_signed_url_v2_helper(content_type="text.html") def test_generate_signed_url_v2_w_response_type(self): self._generate_signed_url_v2_helper(response_type="text.html") def test_generate_signed_url_v2_w_response_disposition(self): self._generate_signed_url_v2_helper(response_disposition="inline") def test_generate_signed_url_v2_w_generation(self): self._generate_signed_url_v2_helper(generation=12345) def test_generate_signed_url_v2_w_headers(self): self._generate_signed_url_v2_helper(headers={"x-goog-foo": "bar"}) def test_generate_signed_url_v2_w_csek(self): self._generate_signed_url_v2_helper(encryption_key=os.urandom(32)) def test_generate_signed_url_v2_w_csek_and_headers(self): self._generate_signed_url_v2_helper( encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"} ) def test_generate_signed_url_v2_w_credentials(self): credentials = object() self._generate_signed_url_v2_helper(credentials=credentials) def _generate_signed_url_v4_helper(self, **kw): version = "v4" self._generate_signed_url_helper(version, **kw) def test_generate_signed_url_v4_w_defaults(self): self._generate_signed_url_v4_helper() def test_generate_signed_url_v4_w_non_ascii_name(self): BLOB_NAME = u"\u0410\u043a\u043a\u043e\u0440\u0434\u044b.txt" self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v4_w_slash_in_name(self): BLOB_NAME = "parent/child" self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v4_w_tilde_in_name(self): BLOB_NAME = "foo~bar" self._generate_signed_url_v4_helper(blob_name=BLOB_NAME) def test_generate_signed_url_v4_w_endpoint(self): self._generate_signed_url_v4_helper( api_access_endpoint="https://api.example.com/v1" ) def test_generate_signed_url_v4_w_method(self): self._generate_signed_url_v4_helper(method="POST") def test_generate_signed_url_v4_w_lowercase_method(self): self._generate_signed_url_v4_helper(method="get") def test_generate_signed_url_v4_w_content_md5(self): self._generate_signed_url_v4_helper(content_md5="FACEDACE") def test_generate_signed_url_v4_w_content_type(self): self._generate_signed_url_v4_helper(content_type="text.html") def test_generate_signed_url_v4_w_response_type(self): self._generate_signed_url_v4_helper(response_type="text.html") def test_generate_signed_url_v4_w_response_disposition(self): self._generate_signed_url_v4_helper(response_disposition="inline") def test_generate_signed_url_v4_w_generation(self): self._generate_signed_url_v4_helper(generation=12345) def test_generate_signed_url_v4_w_headers(self): self._generate_signed_url_v4_helper(headers={"x-goog-foo": "bar"}) def test_generate_signed_url_v4_w_csek(self): self._generate_signed_url_v4_helper(encryption_key=os.urandom(32)) def test_generate_signed_url_v4_w_csek_and_headers(self): self._generate_signed_url_v4_helper( encryption_key=os.urandom(32), headers={"x-goog-foo": "bar"} ) def test_generate_signed_url_v4_w_virtual_hostname(self): self._generate_signed_url_v4_helper(virtual_hosted_style=True) def test_generate_signed_url_v4_w_bucket_bound_hostname_w_scheme(self): self._generate_signed_url_v4_helper( bucket_bound_hostname="http://cdn.example.com" ) def test_generate_signed_url_v4_w_bucket_bound_hostname_w_bare_hostname(self): self._generate_signed_url_v4_helper(bucket_bound_hostname="cdn.example.com") def test_generate_signed_url_v4_w_credentials(self): credentials = object() self._generate_signed_url_v4_helper(credentials=credentials) def test_exists_miss(self): NONESUCH = "nonesuch" not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(NONESUCH, bucket=bucket) self.assertFalse(blob.exists(timeout=42)) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(NONESUCH), "query_params": {"fields": "name"}, "_target_object": None, "timeout": 42, }, ) def test_exists_hit_w_user_project(self): BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" found_response = ({"status": http_client.OK}, b"") connection = _Connection(found_response) client = _Client(connection) bucket = _Bucket(client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) bucket._blobs[BLOB_NAME] = 1 self.assertTrue(blob.exists()) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(BLOB_NAME), "query_params": {"fields": "name", "userProject": USER_PROJECT}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_exists_hit_w_generation(self): BLOB_NAME = "blob-name" GENERATION = 123456 found_response = ({"status": http_client.OK}, b"") connection = _Connection(found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) bucket._blobs[BLOB_NAME] = 1 self.assertTrue(blob.exists()) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(BLOB_NAME), "query_params": {"fields": "name", "generation": GENERATION}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_exists_w_generation_match(self): BLOB_NAME = "blob-name" GENERATION_NUMBER = 123456 METAGENERATION_NUMBER = 6 found_response = ({"status": http_client.OK}, b"") connection = _Connection(found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket) bucket._blobs[BLOB_NAME] = 1 self.assertTrue( blob.exists( if_generation_match=GENERATION_NUMBER, if_metageneration_match=METAGENERATION_NUMBER, ) ) self.assertEqual(len(connection._requested), 1) self.assertEqual( connection._requested[0], { "method": "GET", "path": "/b/name/o/{}".format(BLOB_NAME), "query_params": { "fields": "name", "ifGenerationMatch": GENERATION_NUMBER, "ifMetagenerationMatch": METAGENERATION_NUMBER, }, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_delete_wo_generation(self): BLOB_NAME = "blob-name" not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket) bucket._blobs[BLOB_NAME] = 1 blob.delete() self.assertFalse(blob.exists()) self.assertEqual( bucket._deleted, [ ( BLOB_NAME, None, None, self._get_default_timeout(), None, None, None, None, ) ], ) def test_delete_w_generation(self): BLOB_NAME = "blob-name" GENERATION = 123456 not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) bucket._blobs[BLOB_NAME] = 1 blob.delete(timeout=42) self.assertFalse(blob.exists()) self.assertEqual( bucket._deleted, [(BLOB_NAME, None, GENERATION, 42, None, None, None, None)] ) def test_delete_w_generation_match(self): BLOB_NAME = "blob-name" GENERATION = 123456 not_found_response = ({"status": http_client.NOT_FOUND}, b"") connection = _Connection(not_found_response) client = _Client(connection) bucket = _Bucket(client) blob = self._make_one(BLOB_NAME, bucket=bucket, generation=GENERATION) bucket._blobs[BLOB_NAME] = 1 blob.delete(timeout=42, if_generation_match=GENERATION) self.assertFalse(blob.exists()) self.assertEqual( bucket._deleted, [(BLOB_NAME, None, GENERATION, 42, GENERATION, None, None, None)], ) def test__get_transport(self): client = mock.Mock(spec=[u"_credentials", "_http"]) client._http = mock.sentinel.transport blob = self._make_one(u"blob-name", bucket=None) transport = blob._get_transport(client) self.assertIs(transport, mock.sentinel.transport) def test__get_download_url_with_media_link(self): blob_name = "something.txt" bucket = _Bucket(name="IRRELEVANT") blob = self._make_one(blob_name, bucket=bucket) media_link = "http://test.invalid" blob._properties["mediaLink"] = media_link client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) self.assertEqual(download_url, media_link) def test__get_download_url_with_generation_match(self): GENERATION_NUMBER = 6 MEDIA_LINK = "http://test.invalid" blob = self._make_one("something.txt", bucket=_Bucket(name="IRRELEVANT")) blob._properties["mediaLink"] = MEDIA_LINK client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url( client, if_generation_match=GENERATION_NUMBER ) self.assertEqual( download_url, "{}?ifGenerationMatch={}".format(MEDIA_LINK, GENERATION_NUMBER), ) def test__get_download_url_with_media_link_w_user_project(self): blob_name = "something.txt" user_project = "user-project-123" bucket = _Bucket(name="IRRELEVANT", user_project=user_project) blob = self._make_one(blob_name, bucket=bucket) media_link = "http://test.invalid" blob._properties["mediaLink"] = media_link client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) self.assertEqual( download_url, "{}?userProject={}".format(media_link, user_project) ) def test__get_download_url_on_the_fly(self): blob_name = "bzzz-fly.txt" bucket = _Bucket(name="buhkit") blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "buhkit/o/bzzz-fly.txt?alt=media" ) self.assertEqual(download_url, expected_url) def test__get_download_url_on_the_fly_with_generation(self): blob_name = "pretend.txt" bucket = _Bucket(name="fictional") blob = self._make_one(blob_name, bucket=bucket) generation = 1493058489532987 blob._properties["generation"] = str(generation) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "fictional/o/pretend.txt?alt=media&generation=1493058489532987" ) self.assertEqual(download_url, expected_url) def test__get_download_url_on_the_fly_with_user_project(self): blob_name = "pretend.txt" user_project = "user-project-123" bucket = _Bucket(name="fictional", user_project=user_project) blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "fictional/o/pretend.txt?alt=media&userProject={}".format(user_project) ) self.assertEqual(download_url, expected_url) def test__get_download_url_on_the_fly_with_kms_key_name(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) blob_name = "bzzz-fly.txt" bucket = _Bucket(name="buhkit") blob = self._make_one(blob_name, bucket=bucket, kms_key_name=kms_resource) self.assertIsNone(blob.media_link) client = mock.Mock(_connection=_Connection) client._connection.API_BASE_URL = "https://storage.googleapis.com" download_url = blob._get_download_url(client) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "buhkit/o/bzzz-fly.txt?alt=media" ) self.assertEqual(download_url, expected_url) @staticmethod def _mock_requests_response(status_code, headers, content=b""): import requests response = requests.Response() response.status_code = status_code response.headers.update(headers) response.raw = None response._content = content response.request = requests.Request("POST", "http://example.com").prepare() return response def _do_download_helper_wo_chunks(self, w_range, raw_download, timeout=None): blob_name = "blob-name" client = mock.Mock() bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.chunk_size) transport = object() file_obj = io.BytesIO() download_url = "http://test.invalid" headers = {} if raw_download: patch = mock.patch("google.cloud.storage.blob.RawDownload") else: patch = mock.patch("google.cloud.storage.blob.Download") if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} with patch as patched: if w_range: blob._do_download( transport, file_obj, download_url, headers, start=1, end=3, raw_download=raw_download, **timeout_kwarg ) else: blob._do_download( transport, file_obj, download_url, headers, raw_download=raw_download, **timeout_kwarg ) if w_range: patched.assert_called_once_with( download_url, stream=file_obj, headers=headers, start=1, end=3, checksum="md5", ) else: patched.assert_called_once_with( download_url, stream=file_obj, headers=headers, start=None, end=None, checksum="md5", ) patched.return_value.consume.assert_called_once_with( transport, timeout=expected_timeout ) def test__do_download_wo_chunks_wo_range_wo_raw(self): self._do_download_helper_wo_chunks(w_range=False, raw_download=False) def test__do_download_wo_chunks_w_range_wo_raw(self): self._do_download_helper_wo_chunks(w_range=True, raw_download=False) def test__do_download_wo_chunks_wo_range_w_raw(self): self._do_download_helper_wo_chunks(w_range=False, raw_download=True) def test__do_download_wo_chunks_w_range_w_raw(self): self._do_download_helper_wo_chunks(w_range=True, raw_download=True) def test__do_download_wo_chunks_w_custom_timeout(self): self._do_download_helper_wo_chunks( w_range=False, raw_download=False, timeout=9.58 ) def _do_download_helper_w_chunks( self, w_range, raw_download, timeout=None, checksum="md5" ): blob_name = "blob-name" client = mock.Mock(_credentials=_make_credentials(), spec=["_credentials"]) bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) blob._CHUNK_SIZE_MULTIPLE = 1 chunk_size = blob.chunk_size = 3 transport = object() file_obj = io.BytesIO() download_url = "http://test.invalid" headers = {} download = mock.Mock(finished=False, spec=["finished", "consume_next_chunk"]) def side_effect(*args, **kwargs): download.finished = True download.consume_next_chunk.side_effect = side_effect if raw_download: patch = mock.patch("google.cloud.storage.blob.RawChunkedDownload") else: patch = mock.patch("google.cloud.storage.blob.ChunkedDownload") if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} with patch as patched: patched.return_value = download if w_range: blob._do_download( transport, file_obj, download_url, headers, start=1, end=3, raw_download=raw_download, checksum=checksum, **timeout_kwarg ) else: blob._do_download( transport, file_obj, download_url, headers, raw_download=raw_download, checksum=checksum, **timeout_kwarg ) if w_range: patched.assert_called_once_with( download_url, chunk_size, file_obj, headers=headers, start=1, end=3 ) else: patched.assert_called_once_with( download_url, chunk_size, file_obj, headers=headers, start=0, end=None ) download.consume_next_chunk.assert_called_once_with( transport, timeout=expected_timeout ) def test__do_download_w_chunks_wo_range_wo_raw(self): self._do_download_helper_w_chunks(w_range=False, raw_download=False) def test__do_download_w_chunks_w_range_wo_raw(self): self._do_download_helper_w_chunks(w_range=True, raw_download=False) def test__do_download_w_chunks_wo_range_w_raw(self): self._do_download_helper_w_chunks(w_range=False, raw_download=True) def test__do_download_w_chunks_w_range_w_raw(self): self._do_download_helper_w_chunks(w_range=True, raw_download=True) def test__do_download_w_chunks_w_custom_timeout(self): self._do_download_helper_w_chunks(w_range=True, raw_download=True, timeout=9.58) def test__do_download_w_chunks_w_checksum(self): from google.cloud.storage import blob as blob_module with mock.patch("logging.info") as patch: self._do_download_helper_w_chunks( w_range=False, raw_download=False, checksum="md5" ) patch.assert_called_once_with( blob_module._CHUNKED_DOWNLOAD_CHECKSUM_MESSAGE.format("md5") ) def test__do_download_w_chunks_wo_checksum(self): with mock.patch("logging.info") as patch: self._do_download_helper_w_chunks( w_range=False, raw_download=False, checksum=None ) patch.assert_not_called() def test_download_to_file_with_failure(self): import requests from google.resumable_media import InvalidResponse from google.cloud import exceptions raw_response = requests.Response() raw_response.status_code = http_client.NOT_FOUND raw_request = requests.Request("GET", "http://example.com") raw_response.request = raw_request.prepare() grmp_response = InvalidResponse(raw_response) blob_name = "blob-name" media_link = "http://test.invalid" client = mock.Mock(spec=[u"_http"]) bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) blob._properties["mediaLink"] = media_link blob._do_download = mock.Mock() blob._do_download.side_effect = grmp_response file_obj = io.BytesIO() with self.assertRaises(exceptions.NotFound): blob.download_to_file(file_obj) self.assertEqual(file_obj.tell(), 0) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, file_obj, media_link, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_to_file_wo_media_link(self): blob_name = "blob-name" client = mock.Mock(_connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" bucket = _Bucket(client) blob = self._make_one(blob_name, bucket=bucket) blob._do_download = mock.Mock() file_obj = io.BytesIO() blob.download_to_file(file_obj) self.assertIsNone(blob.media_link) expected_url = ( "https://storage.googleapis.com/download/storage/v1/b/" "name/o/blob-name?alt=media" ) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, file_obj, expected_url, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_to_file_w_generation_match(self): GENERATION_NUMBER = 6 HEADERS = {"accept-encoding": "gzip"} EXPECTED_URL = ( "https://storage.googleapis.com/download/storage/v1/b/" "name/o/blob-name?alt=media&ifGenerationNotMatch={}".format( GENERATION_NUMBER ) ) client = mock.Mock(_connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" blob = self._make_one("blob-name", bucket=_Bucket(client)) blob._do_download = mock.Mock() file_obj = io.BytesIO() blob.download_to_file(file_obj, if_generation_not_match=GENERATION_NUMBER) blob._do_download.assert_called_once_with( client._http, file_obj, EXPECTED_URL, HEADERS, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def _download_to_file_helper(self, use_chunks, raw_download, timeout=None): blob_name = "blob-name" client = mock.Mock(spec=[u"_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) if use_chunks: blob._CHUNK_SIZE_MULTIPLE = 1 blob.chunk_size = 3 blob._do_download = mock.Mock() if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} file_obj = io.BytesIO() if raw_download: blob.download_to_file(file_obj, raw_download=True, **timeout_kwarg) else: blob.download_to_file(file_obj, **timeout_kwarg) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, file_obj, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) def test_download_to_file_wo_chunks_wo_raw(self): self._download_to_file_helper(use_chunks=False, raw_download=False) def test_download_to_file_w_chunks_wo_raw(self): self._download_to_file_helper(use_chunks=True, raw_download=False) def test_download_to_file_wo_chunks_w_raw(self): self._download_to_file_helper(use_chunks=False, raw_download=True) def test_download_to_file_w_chunks_w_raw(self): self._download_to_file_helper(use_chunks=True, raw_download=True) def test_download_to_file_w_custom_timeout(self): self._download_to_file_helper( use_chunks=False, raw_download=False, timeout=9.58 ) def _download_to_filename_helper(self, updated, raw_download, timeout=None): import os from google.cloud.storage._helpers import _convert_to_timestamp from google.cloud._testing import _NamedTemporaryFile blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} if updated is not None: properties["updated"] = updated blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() with _NamedTemporaryFile() as temp: if timeout is None: blob.download_to_filename(temp.name, raw_download=raw_download) else: blob.download_to_filename( temp.name, raw_download=raw_download, timeout=timeout, ) if updated is None: self.assertIsNone(blob.updated) else: mtime = os.path.getmtime(temp.name) if six.PY2: updated_time = _convert_to_timestamp(blob.updated) else: updated_time = blob.updated.timestamp() self.assertEqual(mtime, updated_time) expected_timeout = self._get_default_timeout() if timeout is None else timeout headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertEqual(stream.name, temp.name) def test_download_to_filename_w_generation_match(self): from google.cloud._testing import _NamedTemporaryFile GENERATION_NUMBER = 6 MEDIA_LINK = "http://example.com/media/" EXPECTED_LINK = MEDIA_LINK + "?ifGenerationMatch={}".format(GENERATION_NUMBER) HEADERS = {"accept-encoding": "gzip"} client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob._do_download = mock.Mock() with _NamedTemporaryFile() as temp: blob.download_to_filename(temp.name, if_generation_match=GENERATION_NUMBER) blob._do_download.assert_called_once_with( client._http, mock.ANY, EXPECTED_LINK, HEADERS, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_to_filename_w_updated_wo_raw(self): updated = "2014-12-06T13:13:50.690Z" self._download_to_filename_helper(updated=updated, raw_download=False) def test_download_to_filename_wo_updated_wo_raw(self): self._download_to_filename_helper(updated=None, raw_download=False) def test_download_to_filename_w_updated_w_raw(self): updated = "2014-12-06T13:13:50.690Z" self._download_to_filename_helper(updated=updated, raw_download=True) def test_download_to_filename_wo_updated_w_raw(self): self._download_to_filename_helper(updated=None, raw_download=True) def test_download_to_filename_w_custom_timeout(self): self._download_to_filename_helper( updated=None, raw_download=False, timeout=9.58 ) def test_download_to_filename_corrupted(self): from google.resumable_media import DataCorruption blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() blob._do_download.side_effect = DataCorruption("testing") # `_NamedTemporaryFile` it will try to remove after the file is # already removed) filehandle, filename = tempfile.mkstemp() os.close(filehandle) self.assertTrue(os.path.exists(filename)) with self.assertRaises(DataCorruption): blob.download_to_filename(filename) # Make sure the file was cleaned up. self.assertFalse(os.path.exists(filename)) headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertEqual(stream.name, filename) def test_download_to_filename_w_key(self): from google.cloud._testing import _NamedTemporaryFile from google.cloud.storage.blob import _get_encryption_headers blob_name = "blob-name" # Create a fake client/bucket and use them in the Blob() constructor. client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} key = b"aa426195405adee2c8081bb9e7e74b19" blob = self._make_one( blob_name, bucket=bucket, properties=properties, encryption_key=key ) blob._do_download = mock.Mock() with _NamedTemporaryFile() as temp: blob.download_to_filename(temp.name) headers = {"accept-encoding": "gzip"} headers.update(_get_encryption_headers(key)) blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, False, timeout=self._get_default_timeout(), checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertEqual(stream.name, temp.name) def _download_as_bytes_helper(self, raw_download, timeout=None): blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() if timeout is None: expected_timeout = self._get_default_timeout() fetched = blob.download_as_bytes(raw_download=raw_download) else: expected_timeout = timeout fetched = blob.download_as_bytes(raw_download=raw_download, timeout=timeout) self.assertEqual(fetched, b"") headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertIsInstance(stream, io.BytesIO) def test_download_as_string_w_response_headers(self): blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} blob = self._make_one(blob_name, bucket=bucket, properties=properties) response = self._mock_requests_response( http_client.OK, headers={ "Content-Type": "application/json", "Content-Language": "ko-kr", "Cache-Control": "max-age=1337;public", "Content-Encoding": "gzip", "X-Goog-Storage-Class": "STANDARD", "X-Goog-Hash": "crc32c=4gcgLQ==,md5=CS9tHYTtyFntzj7B9nkkJQ==", }, # { "x": 5 } gzipped content=b"\x1f\x8b\x08\x00\xcfo\x17_\x02\xff\xabVP\xaaP\xb2R0U\xa8\x05\x00\xa1\xcaQ\x93\n\x00\x00\x00", ) blob._extract_headers_from_download(response) self.assertEqual(blob.content_type, "application/json") self.assertEqual(blob.content_language, "ko-kr") self.assertEqual(blob.content_encoding, "gzip") self.assertEqual(blob.cache_control, "max-age=1337;public") self.assertEqual(blob.storage_class, "STANDARD") self.assertEqual(blob.md5_hash, "CS9tHYTtyFntzj7B9nkkJQ==") self.assertEqual(blob.crc32c, "4gcgLQ==") response = self._mock_requests_response( http_client.OK, headers={ "Content-Type": "application/octet-stream", "Content-Language": "en-US", "Cache-Control": "max-age=1337;public", "Content-Encoding": "gzip", "X-Goog-Storage-Class": "STANDARD", "X-Goog-Hash": "crc32c=4/c+LQ==,md5=CS9tHYTt/+ntzj7B9nkkJQ==", }, content=b"", ) blob._extract_headers_from_download(response) self.assertEqual(blob.content_type, "application/octet-stream") self.assertEqual(blob.content_language, "en-US") self.assertEqual(blob.md5_hash, "CS9tHYTt/+ntzj7B9nkkJQ==") self.assertEqual(blob.crc32c, "4/c+LQ==") def test_download_as_string_w_hash_response_header_none(self): blob_name = "blob-name" md5_hash = "CS9tHYTtyFntzj7B9nkkJQ==" crc32c = "4gcgLQ==" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = { "mediaLink": media_link, "md5Hash": md5_hash, "crc32c": crc32c, } blob = self._make_one(blob_name, bucket=bucket, properties=properties) response = self._mock_requests_response( http_client.OK, headers={"X-Goog-Hash": ""}, # { "x": 5 } gzipped content=b"\x1f\x8b\x08\x00\xcfo\x17_\x02\xff\xabVP\xaaP\xb2R0U\xa8\x05\x00\xa1\xcaQ\x93\n\x00\x00\x00", ) blob._extract_headers_from_download(response) self.assertEqual(blob.md5_hash, md5_hash) self.assertEqual(blob.crc32c, crc32c) def test_download_as_bytes_w_generation_match(self): GENERATION_NUMBER = 6 MEDIA_LINK = "http://example.com/media/" client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob.download_to_file = mock.Mock() fetched = blob.download_as_bytes(if_generation_match=GENERATION_NUMBER) self.assertEqual(fetched, b"") blob.download_to_file.assert_called_once_with( mock.ANY, client=None, start=None, end=None, raw_download=False, if_generation_match=GENERATION_NUMBER, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_as_bytes_wo_raw(self): self._download_as_bytes_helper(raw_download=False) def test_download_as_bytes_w_raw(self): self._download_as_bytes_helper(raw_download=True) def test_download_as_byte_w_custom_timeout(self): self._download_as_bytes_helper(raw_download=False, timeout=9.58) def _download_as_text_helper(self, raw_download, encoding=None, timeout=None): blob_name = "blob-name" client = mock.Mock(spec=["_http"]) bucket = _Bucket(client) media_link = "http://example.com/media/" properties = {"mediaLink": media_link} if encoding: properties["contentEncoding"] = encoding blob = self._make_one(blob_name, bucket=bucket, properties=properties) blob._do_download = mock.Mock() if timeout is None: expected_timeout = self._get_default_timeout() fetched = blob.download_as_text(raw_download=raw_download) else: expected_timeout = timeout fetched = blob.download_as_text(raw_download=raw_download, timeout=timeout) self.assertEqual(fetched, "") headers = {"accept-encoding": "gzip"} blob._do_download.assert_called_once_with( client._http, mock.ANY, media_link, headers, None, None, raw_download, timeout=expected_timeout, checksum="md5", ) stream = blob._do_download.mock_calls[0].args[1] self.assertIsInstance(stream, io.BytesIO) def test_download_as_text_w_generation_match(self): GENERATION_NUMBER = 6 MEDIA_LINK = "http://example.com/media/" client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob.download_to_file = mock.Mock() fetched = blob.download_as_text(if_generation_match=GENERATION_NUMBER) self.assertEqual(fetched, "") blob.download_to_file.assert_called_once_with( mock.ANY, client=None, start=None, end=None, raw_download=False, if_generation_match=GENERATION_NUMBER, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=self._get_default_timeout(), checksum="md5", ) def test_download_as_text_wo_raw(self): self._download_as_text_helper(raw_download=False) def test_download_as_text_w_raw(self): self._download_as_text_helper(raw_download=True) def test_download_as_text_w_custom_timeout(self): self._download_as_text_helper(raw_download=False, timeout=9.58) def test_download_as_text_w_encoding(self): self._download_as_text_helper(raw_download=False, encoding="utf-8") @mock.patch("warnings.warn") def test_download_as_string(self, mock_warn): MEDIA_LINK = "http://example.com/media/" client = mock.Mock(spec=["_http"]) blob = self._make_one( "blob-name", bucket=_Bucket(client), properties={"mediaLink": MEDIA_LINK} ) blob.download_to_file = mock.Mock() fetched = blob.download_as_string() self.assertEqual(fetched, b"") blob.download_to_file.assert_called_once_with( mock.ANY, client=None, start=None, end=None, raw_download=False, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=self._get_default_timeout(), checksum="md5", ) mock_warn.assert_called_with( "Blob.download_as_string() is deprecated and will be removed in future." "Use Blob.download_as_bytes() instead.", PendingDeprecationWarning, stacklevel=1, ) def test__get_content_type_explicit(self): blob = self._make_one(u"blob-name", bucket=None) content_type = u"text/plain" return_value = blob._get_content_type(content_type) self.assertEqual(return_value, content_type) def test__get_content_type_from_blob(self): blob = self._make_one(u"blob-name", bucket=None) blob.content_type = u"video/mp4" return_value = blob._get_content_type(None) self.assertEqual(return_value, blob.content_type) def test__get_content_type_from_filename(self): blob = self._make_one(u"blob-name", bucket=None) return_value = blob._get_content_type(None, filename="archive.tar") self.assertEqual(return_value, "application/x-tar") def test__get_content_type_default(self): blob = self._make_one(u"blob-name", bucket=None) return_value = blob._get_content_type(None) self.assertEqual(return_value, u"application/octet-stream") def test__get_writable_metadata_no_changes(self): name = u"blob-name" blob = self._make_one(name, bucket=None) object_metadata = blob._get_writable_metadata() expected = {"name": name} self.assertEqual(object_metadata, expected) def test__get_writable_metadata_with_changes(self): name = u"blob-name" blob = self._make_one(name, bucket=None) blob.storage_class = "NEARLINE" blob.cache_control = "max-age=3600" blob.metadata = {"color": "red"} object_metadata = blob._get_writable_metadata() expected = { "cacheControl": blob.cache_control, "metadata": blob.metadata, "name": name, "storageClass": blob.storage_class, } self.assertEqual(object_metadata, expected) def test__get_writable_metadata_unwritable_field(self): name = u"blob-name" properties = {"updated": "2016-10-16T18:18:18.181Z"} blob = self._make_one(name, bucket=None, properties=properties) # Fake that `updated` is in changes. blob._changes.add("updated") object_metadata = blob._get_writable_metadata() expected = {"name": name} self.assertEqual(object_metadata, expected) def test__set_metadata_to_none(self): name = u"blob-name" blob = self._make_one(name, bucket=None) blob.storage_class = "NEARLINE" blob.cache_control = "max-age=3600" with mock.patch("google.cloud.storage.blob.Blob._patch_property") as patch_prop: blob.metadata = None patch_prop.assert_called_once_with("metadata", None) def test__get_upload_arguments(self): name = u"blob-name" key = b"[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO" blob = self._make_one(name, bucket=None, encryption_key=key) blob.content_disposition = "inline" content_type = u"image/jpeg" info = blob._get_upload_arguments(content_type) headers, object_metadata, new_content_type = info header_key_value = "W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8=" header_key_hash_value = "G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg=" expected_headers = { "X-Goog-Encryption-Algorithm": "AES256", "X-Goog-Encryption-Key": header_key_value, "X-Goog-Encryption-Key-Sha256": header_key_hash_value, } self.assertEqual(headers, expected_headers) expected_metadata = { "contentDisposition": blob.content_disposition, "name": name, } self.assertEqual(object_metadata, expected_metadata) self.assertEqual(new_content_type, content_type) def _mock_transport(self, status_code, headers, content=b""): fake_transport = mock.Mock(spec=["request"]) fake_response = self._mock_requests_response( status_code, headers, content=content ) fake_transport.request.return_value = fake_response return fake_transport def _do_multipart_success( self, mock_get_boundary, size=None, num_retries=None, user_project=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, kms_key_name=None, timeout=None, ): from six.moves.urllib.parse import urlencode bucket = _Bucket(name="w00t", user_project=user_project) blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) self.assertIsNone(blob.chunk_size) # Create mocks to be checked for doing transport. transport = self._mock_transport(http_client.OK, {}) # Create some mock arguments. client = mock.Mock(_http=transport, _connection=_Connection, spec=["_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" data = b"data here hear hier" stream = io.BytesIO(data) content_type = u"application/xml" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} response = blob._do_multipart_upload( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, **timeout_kwarg ) # Check the mocks and the returned value. self.assertIs(response, transport.request.return_value) if size is None: data_read = data self.assertEqual(stream.tell(), len(data)) else: data_read = data[:size] self.assertEqual(stream.tell(), size) mock_get_boundary.assert_called_once_with() upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o" ) qs_params = [("uploadType", "multipart")] if user_project is not None: qs_params.append(("userProject", user_project)) if predefined_acl is not None: qs_params.append(("predefinedAcl", predefined_acl)) if kms_key_name is not None and "cryptoKeyVersions" not in kms_key_name: qs_params.append(("kmsKeyName", kms_key_name)) if if_generation_match is not None: qs_params.append(("ifGenerationMatch", if_generation_match)) if if_generation_not_match is not None: qs_params.append(("ifGenerationNotMatch", if_generation_not_match)) if if_metageneration_match is not None: qs_params.append(("ifMetagenerationMatch", if_metageneration_match)) if if_metageneration_not_match is not None: qs_params.append(("ifMetaGenerationNotMatch", if_metageneration_not_match)) upload_url += "?" + urlencode(qs_params) payload = ( b"--==0==\r\n" + b"content-type: application/json; charset=UTF-8\r\n\r\n" + b'{"name": "blob-name"}\r\n' + b"--==0==\r\n" + b"content-type: application/xml\r\n\r\n" + data_read + b"\r\n--==0==--" ) headers = {"content-type": b'multipart/related; boundary="==0=="'} transport.request.assert_called_once_with( "POST", upload_url, data=payload, headers=headers, timeout=expected_timeout ) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_no_size(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, predefined_acl="private") @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_size(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, size=10) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_user_project(self, mock_get_boundary): user_project = "user-project-123" self._do_multipart_success(mock_get_boundary, user_project=user_project) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_kms(self, mock_get_boundary): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) self._do_multipart_success(mock_get_boundary, kms_key_name=kms_resource) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_kms_with_version(self, mock_get_boundary): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" "cryptoKeyVersions/1" ) self._do_multipart_success(mock_get_boundary, kms_key_name=kms_resource) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_retry(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, num_retries=8) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_generation_match(self, mock_get_boundary): self._do_multipart_success( mock_get_boundary, if_generation_match=4, if_metageneration_match=4 ) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_custom_timeout(self, mock_get_boundary): self._do_multipart_success(mock_get_boundary, timeout=9.58) @mock.patch(u"google.resumable_media._upload.get_boundary", return_value=b"==0==") def test__do_multipart_upload_with_generation_not_match(self, mock_get_boundary): self._do_multipart_success( mock_get_boundary, if_generation_not_match=4, if_metageneration_not_match=4 ) def test__do_multipart_upload_bad_size(self): blob = self._make_one(u"blob-name", bucket=None) data = b"data here hear hier" stream = io.BytesIO(data) size = 50 self.assertGreater(size, len(data)) with self.assertRaises(ValueError) as exc_info: blob._do_multipart_upload( None, stream, None, size, None, None, None, None, None, None ) exc_contents = str(exc_info.exception) self.assertIn("was specified but the file-like object only had", exc_contents) self.assertEqual(stream.tell(), len(data)) def _initiate_resumable_helper( self, size=None, extra_headers=None, chunk_size=None, num_retries=None, user_project=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, blob_chunk_size=786432, kms_key_name=None, timeout=None, ): from six.moves.urllib.parse import urlencode from google.resumable_media.requests import ResumableUpload from google.cloud.storage.blob import _DEFAULT_CHUNKSIZE bucket = _Bucket(name="whammy", user_project=user_project) blob = self._make_one(u"blob-name", bucket=bucket, kms_key_name=kms_key_name) blob.metadata = {"rook": "takes knight"} blob.chunk_size = blob_chunk_size if blob_chunk_size is not None: self.assertIsNotNone(blob.chunk_size) else: self.assertIsNone(blob.chunk_size) # Need to make sure **same** dict is used because ``json.dumps()`` # will depend on the hash order. object_metadata = blob._get_writable_metadata() blob._get_writable_metadata = mock.Mock(return_value=object_metadata, spec=[]) # Create mocks to be checked for doing transport. resumable_url = "http://test.invalid?upload_id=hey-you" response_headers = {"location": resumable_url} transport = self._mock_transport(http_client.OK, response_headers) # Create some mock arguments and call the method under test. client = mock.Mock(_http=transport, _connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" data = b"hello hallo halo hi-low" stream = io.BytesIO(data) content_type = u"text/plain" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} upload, transport = blob._initiate_resumable_upload( client, stream, content_type, size, num_retries, extra_headers=extra_headers, chunk_size=chunk_size, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, **timeout_kwarg ) # Check the returned values. self.assertIsInstance(upload, ResumableUpload) upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o" ) qs_params = [("uploadType", "resumable")] if user_project is not None: qs_params.append(("userProject", user_project)) if predefined_acl is not None: qs_params.append(("predefinedAcl", predefined_acl)) if kms_key_name is not None and "cryptoKeyVersions" not in kms_key_name: qs_params.append(("kmsKeyName", kms_key_name)) if if_generation_match is not None: qs_params.append(("ifGenerationMatch", if_generation_match)) if if_generation_not_match is not None: qs_params.append(("ifGenerationNotMatch", if_generation_not_match)) if if_metageneration_match is not None: qs_params.append(("ifMetagenerationMatch", if_metageneration_match)) if if_metageneration_not_match is not None: qs_params.append(("ifMetaGenerationNotMatch", if_metageneration_not_match)) upload_url += "?" + urlencode(qs_params) self.assertEqual(upload.upload_url, upload_url) if extra_headers is None: self.assertEqual(upload._headers, {}) else: self.assertEqual(upload._headers, extra_headers) self.assertIsNot(upload._headers, extra_headers) self.assertFalse(upload.finished) if chunk_size is None: if blob_chunk_size is None: self.assertEqual(upload._chunk_size, _DEFAULT_CHUNKSIZE) else: self.assertEqual(upload._chunk_size, blob.chunk_size) else: self.assertNotEqual(blob.chunk_size, chunk_size) self.assertEqual(upload._chunk_size, chunk_size) self.assertIs(upload._stream, stream) if size is None: self.assertIsNone(upload._total_bytes) else: self.assertEqual(upload._total_bytes, size) self.assertEqual(upload._content_type, content_type) self.assertEqual(upload.resumable_url, resumable_url) retry_strategy = upload._retry_strategy self.assertEqual(retry_strategy.max_sleep, 64.0) if num_retries is None: self.assertEqual(retry_strategy.max_cumulative_retry, 600.0) self.assertIsNone(retry_strategy.max_retries) else: self.assertIsNone(retry_strategy.max_cumulative_retry) self.assertEqual(retry_strategy.max_retries, num_retries) self.assertIs(transport, transport) # Make sure we never read from the stream. self.assertEqual(stream.tell(), 0) # Check the mocks. blob._get_writable_metadata.assert_called_once_with() payload = json.dumps(object_metadata).encode("utf-8") expected_headers = { "content-type": "application/json; charset=UTF-8", "x-upload-content-type": content_type, } if size is not None: expected_headers["x-upload-content-length"] = str(size) if extra_headers is not None: expected_headers.update(extra_headers) transport.request.assert_called_once_with( "POST", upload_url, data=payload, headers=expected_headers, timeout=expected_timeout, ) def test__initiate_resumable_upload_with_custom_timeout(self): self._initiate_resumable_helper(timeout=9.58) def test__initiate_resumable_upload_no_size(self): self._initiate_resumable_helper() def test__initiate_resumable_upload_with_size(self): self._initiate_resumable_helper(size=10000) def test__initiate_resumable_upload_with_user_project(self): user_project = "user-project-123" self._initiate_resumable_helper(user_project=user_project) def test__initiate_resumable_upload_with_kms(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) self._initiate_resumable_helper(kms_key_name=kms_resource) def test__initiate_resumable_upload_with_kms_with_version(self): kms_resource = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" "cryptoKeyVersions/1" ) self._initiate_resumable_helper(kms_key_name=kms_resource) def test__initiate_resumable_upload_without_chunk_size(self): self._initiate_resumable_helper(blob_chunk_size=None) def test__initiate_resumable_upload_with_chunk_size(self): one_mb = 1048576 self._initiate_resumable_helper(chunk_size=one_mb) def test__initiate_resumable_upload_with_extra_headers(self): extra_headers = {"origin": "http://not-in-kansas-anymore.invalid"} self._initiate_resumable_helper(extra_headers=extra_headers) def test__initiate_resumable_upload_with_retry(self): self._initiate_resumable_helper(num_retries=11) def test__initiate_resumable_upload_with_generation_match(self): self._initiate_resumable_helper( if_generation_match=4, if_metageneration_match=4 ) def test__initiate_resumable_upload_with_generation_not_match(self): self._initiate_resumable_helper( if_generation_not_match=4, if_metageneration_not_match=4 ) def test__initiate_resumable_upload_with_predefined_acl(self): self._initiate_resumable_helper(predefined_acl="private") def _make_resumable_transport( self, headers1, headers2, headers3, total_bytes, data_corruption=False ): from google import resumable_media fake_transport = mock.Mock(spec=["request"]) fake_response1 = self._mock_requests_response(http_client.OK, headers1) fake_response2 = self._mock_requests_response( resumable_media.PERMANENT_REDIRECT, headers2 ) json_body = '{{"size": "{:d}"}}'.format(total_bytes) if data_corruption: fake_response3 = resumable_media.DataCorruption(None) else: fake_response3 = self._mock_requests_response( http_client.OK, headers3, content=json_body.encode("utf-8") ) responses = [fake_response1, fake_response2, fake_response3] fake_transport.request.side_effect = responses return fake_transport, responses @staticmethod def _do_resumable_upload_call0( blob, content_type, size=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, ): # First mock transport.request() does initiates upload. upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + blob.bucket.path + "/o?uploadType=resumable" ) if predefined_acl is not None: upload_url += "&predefinedAcl={}".format(predefined_acl) expected_headers = { "content-type": "application/json; charset=UTF-8", "x-upload-content-type": content_type, } if size is not None: expected_headers["x-upload-content-length"] = str(size) payload = json.dumps({"name": blob.name}).encode("utf-8") return mock.call( "POST", upload_url, data=payload, headers=expected_headers, timeout=timeout ) @staticmethod def _do_resumable_upload_call1( blob, content_type, data, resumable_url, size=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, ): # Second mock transport.request() does sends first chunk. if size is None: content_range = "bytes 0-{:d}/*".format(blob.chunk_size - 1) else: content_range = "bytes 0-{:d}/{:d}".format(blob.chunk_size - 1, size) expected_headers = { "content-type": content_type, "content-range": content_range, } payload = data[: blob.chunk_size] return mock.call( "PUT", resumable_url, data=payload, headers=expected_headers, timeout=timeout, ) @staticmethod def _do_resumable_upload_call2( blob, content_type, data, resumable_url, total_bytes, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, ): # Third mock transport.request() does sends last chunk. content_range = "bytes {:d}-{:d}/{:d}".format( blob.chunk_size, total_bytes - 1, total_bytes ) expected_headers = { "content-type": content_type, "content-range": content_range, } payload = data[blob.chunk_size :] return mock.call( "PUT", resumable_url, data=payload, headers=expected_headers, timeout=timeout, ) def _do_resumable_helper( self, use_size=False, num_retries=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, timeout=None, data_corruption=False, ): bucket = _Bucket(name="yesterday") blob = self._make_one(u"blob-name", bucket=bucket) blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE self.assertIsNotNone(blob.chunk_size) # Data to be uploaded. data = b"<html>" + (b"A" * blob.chunk_size) + b"</html>" total_bytes = len(data) if use_size: size = total_bytes else: size = None # Create mocks to be checked for doing transport. resumable_url = "http://test.invalid?upload_id=and-then-there-was-1" headers1 = {"location": resumable_url} headers2 = {"range": "bytes=0-{:d}".format(blob.chunk_size - 1)} transport, responses = self._make_resumable_transport( headers1, headers2, {}, total_bytes, data_corruption=data_corruption ) # Create some mock arguments and call the method under test. client = mock.Mock(_http=transport, _connection=_Connection, spec=["_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" stream = io.BytesIO(data) content_type = u"text/html" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} response = blob._do_resumable_upload( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, **timeout_kwarg ) # Check the returned values. self.assertIs(response, responses[2]) self.assertEqual(stream.tell(), total_bytes) # Check the mocks. call0 = self._do_resumable_upload_call0( blob, content_type, size=size, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, timeout=expected_timeout, ) call1 = self._do_resumable_upload_call1( blob, content_type, data, resumable_url, size=size, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, timeout=expected_timeout, ) call2 = self._do_resumable_upload_call2( blob, content_type, data, resumable_url, total_bytes, predefined_acl=predefined_acl, if_generation_match=if_generation_match, if_generation_not_match=if_generation_not_match, if_metageneration_match=if_metageneration_match, if_metageneration_not_match=if_metageneration_not_match, timeout=expected_timeout, ) self.assertEqual(transport.request.mock_calls, [call0, call1, call2]) def test__do_resumable_upload_with_custom_timeout(self): self._do_resumable_helper(timeout=9.58) def test__do_resumable_upload_no_size(self): self._do_resumable_helper() def test__do_resumable_upload_with_size(self): self._do_resumable_helper(use_size=True) def test__do_resumable_upload_with_retry(self): self._do_resumable_helper(num_retries=6) def test__do_resumable_upload_with_predefined_acl(self): self._do_resumable_helper(predefined_acl="private") def test__do_resumable_upload_with_data_corruption(self): from google.resumable_media import DataCorruption with mock.patch("google.cloud.storage.blob.Blob.delete") as patch: try: self._do_resumable_helper(data_corruption=True) except Exception as e: self.assertTrue(patch.called) self.assertIsInstance(e, DataCorruption) def _do_upload_helper( self, chunk_size=None, num_retries=None, predefined_acl=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, size=None, timeout=None, ): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE blob = self._make_one(u"blob-name", bucket=None) # Create a fake response. response = mock.Mock(spec=[u"json"]) response.json.return_value = mock.sentinel.json # Mock **both** helpers. blob._do_multipart_upload = mock.Mock(return_value=response, spec=[]) blob._do_resumable_upload = mock.Mock(return_value=response, spec=[]) if chunk_size is None: self.assertIsNone(blob.chunk_size) else: blob.chunk_size = chunk_size self.assertIsNotNone(blob.chunk_size) client = mock.sentinel.client stream = mock.sentinel.stream content_type = u"video/mp4" if size is None: size = 12345654321 if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} # Make the request and check the mocks. created_json = blob._do_upload( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, **timeout_kwarg ) self.assertIs(created_json, mock.sentinel.json) response.json.assert_called_once_with() if size is not None and size <= _MAX_MULTIPART_SIZE: blob._do_multipart_upload.assert_called_once_with( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, timeout=expected_timeout, checksum=None, ) blob._do_resumable_upload.assert_not_called() else: blob._do_multipart_upload.assert_not_called() blob._do_resumable_upload.assert_called_once_with( client, stream, content_type, size, num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, timeout=expected_timeout, checksum=None, ) def test__do_upload_uses_multipart(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE self._do_upload_helper(size=_MAX_MULTIPART_SIZE) def test__do_upload_uses_multipart_w_custom_timeout(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE self._do_upload_helper(size=_MAX_MULTIPART_SIZE, timeout=9.58) def test__do_upload_uses_resumable(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE chunk_size = 256 * 1024 # 256KB self._do_upload_helper(chunk_size=chunk_size, size=_MAX_MULTIPART_SIZE + 1) def test__do_upload_uses_resumable_w_custom_timeout(self): from google.cloud.storage.blob import _MAX_MULTIPART_SIZE chunk_size = 256 * 1024 # 256KB self._do_upload_helper( chunk_size=chunk_size, size=_MAX_MULTIPART_SIZE + 1, timeout=9.58 ) def test__do_upload_with_retry(self): self._do_upload_helper(num_retries=20) def _upload_from_file_helper(self, side_effect=None, **kwargs): from google.cloud._helpers import UTC blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"updated": "2017-01-01T09:09:09.081Z"} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) if side_effect is not None: blob._do_upload.side_effect = side_effect # Make sure `updated` is empty before the request. self.assertIsNone(blob.updated) data = b"data is here" stream = io.BytesIO(data) stream.seek(2) # Not at zero. content_type = u"font/woff" client = mock.sentinel.client predefined_acl = kwargs.get("predefined_acl", None) if_generation_match = kwargs.get("if_generation_match", None) if_generation_not_match = kwargs.get("if_generation_not_match", None) if_metageneration_match = kwargs.get("if_metageneration_match", None) if_metageneration_not_match = kwargs.get("if_metageneration_not_match", None) ret_val = blob.upload_from_file( stream, size=len(data), content_type=content_type, client=client, **kwargs ) # Check the response and side-effects. self.assertIsNone(ret_val) new_updated = datetime.datetime(2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC) self.assertEqual(blob.updated, new_updated) expected_timeout = kwargs.get("timeout", self._get_default_timeout()) # Check the mock. num_retries = kwargs.get("num_retries") blob._do_upload.assert_called_once_with( client, stream, content_type, len(data), num_retries, predefined_acl, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, timeout=expected_timeout, checksum=None, ) return stream def test_upload_from_file_success(self): stream = self._upload_from_file_helper(predefined_acl="private") assert stream.tell() == 2 @mock.patch("warnings.warn") def test_upload_from_file_with_retries(self, mock_warn): from google.cloud.storage import blob as blob_module self._upload_from_file_helper(num_retries=20) mock_warn.assert_called_once_with( blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning, stacklevel=2 ) def test_upload_from_file_with_rewind(self): stream = self._upload_from_file_helper(rewind=True) assert stream.tell() == 0 def test_upload_from_file_with_custom_timeout(self): self._upload_from_file_helper(timeout=9.58) def test_upload_from_file_failure(self): import requests from google.resumable_media import InvalidResponse from google.cloud import exceptions message = "Someone is already in this spot." response = requests.Response() response.status_code = http_client.CONFLICT response.request = requests.Request("POST", "http://example.com").prepare() side_effect = InvalidResponse(response, message) with self.assertRaises(exceptions.Conflict) as exc_info: self._upload_from_file_helper(side_effect=side_effect) self.assertIn(message, exc_info.exception.message) self.assertEqual(exc_info.exception.errors, []) def _do_upload_mock_call_helper( self, blob, client, content_type, size, timeout=None ): self.assertEqual(blob._do_upload.call_count, 1) mock_call = blob._do_upload.mock_calls[0] call_name, pos_args, kwargs = mock_call self.assertEqual(call_name, "") self.assertEqual(len(pos_args), 10) self.assertEqual(pos_args[0], client) self.assertEqual(pos_args[2], content_type) self.assertEqual(pos_args[3], size) self.assertIsNone(pos_args[4]) # num_retries self.assertIsNone(pos_args[5]) # predefined_acl self.assertIsNone(pos_args[6]) # if_generation_match self.assertIsNone(pos_args[7]) # if_generation_not_match self.assertIsNone(pos_args[8]) # if_metageneration_match self.assertIsNone(pos_args[9]) # if_metageneration_not_match expected_timeout = self._get_default_timeout() if timeout is None else timeout self.assertEqual(kwargs, {"timeout": expected_timeout, "checksum": None}) return pos_args[1] def test_upload_from_filename(self): from google.cloud._testing import _NamedTemporaryFile blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"metadata": {"mint": "ice-cream"}} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) # Make sure `metadata` is empty before the request. self.assertIsNone(blob.metadata) data = b"soooo much data" content_type = u"image/svg+xml" client = mock.sentinel.client with _NamedTemporaryFile() as temp: with open(temp.name, "wb") as file_obj: file_obj.write(data) ret_val = blob.upload_from_filename( temp.name, content_type=content_type, client=client ) # Check the response and side-effects. self.assertIsNone(ret_val) self.assertEqual(blob.metadata, created_json["metadata"]) # Check the mock. stream = self._do_upload_mock_call_helper(blob, client, content_type, len(data)) self.assertTrue(stream.closed) self.assertEqual(stream.mode, "rb") self.assertEqual(stream.name, temp.name) def test_upload_from_filename_w_custom_timeout(self): from google.cloud._testing import _NamedTemporaryFile blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"metadata": {"mint": "ice-cream"}} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) # Make sure `metadata` is empty before the request. self.assertIsNone(blob.metadata) data = b"soooo much data" content_type = u"image/svg+xml" client = mock.sentinel.client with _NamedTemporaryFile() as temp: with open(temp.name, "wb") as file_obj: file_obj.write(data) blob.upload_from_filename( temp.name, content_type=content_type, client=client, timeout=9.58 ) # Check the mock. self._do_upload_mock_call_helper( blob, client, content_type, len(data), timeout=9.58 ) def _upload_from_string_helper(self, data, **kwargs): from google.cloud._helpers import _to_bytes blob = self._make_one("blob-name", bucket=None) # Mock low-level upload helper on blob (it is tested elsewhere). created_json = {"componentCount": "5"} blob._do_upload = mock.Mock(return_value=created_json, spec=[]) # Make sure `metadata` is empty before the request. self.assertIsNone(blob.component_count) client = mock.sentinel.client ret_val = blob.upload_from_string(data, client=client, **kwargs) # Check the response and side-effects. self.assertIsNone(ret_val) self.assertEqual(blob.component_count, 5) # Check the mock. payload = _to_bytes(data, encoding="utf-8") stream = self._do_upload_mock_call_helper( blob, client, "text/plain", len(payload), kwargs.get("timeout", self._get_default_timeout()), ) self.assertIsInstance(stream, io.BytesIO) self.assertEqual(stream.getvalue(), payload) def test_upload_from_string_w_custom_timeout(self): data = b"XB]jb\xb8tad\xe0" self._upload_from_string_helper(data, timeout=9.58) def test_upload_from_string_w_bytes(self): data = b"XB]jb\xb8tad\xe0" self._upload_from_string_helper(data) def test_upload_from_string_w_text(self): data = u"\N{snowman} \N{sailboat}" self._upload_from_string_helper(data) def _create_resumable_upload_session_helper( self, origin=None, side_effect=None, timeout=None ): bucket = _Bucket(name="alex-trebek") blob = self._make_one("blob-name", bucket=bucket) chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE blob.chunk_size = chunk_size # Create mocks to be checked for doing transport. resumable_url = "http://test.invalid?upload_id=clean-up-everybody" response_headers = {"location": resumable_url} transport = self._mock_transport(http_client.OK, response_headers) if side_effect is not None: transport.request.side_effect = side_effect # Create some mock arguments and call the method under test. content_type = u"text/plain" size = 10000 client = mock.Mock(_http=transport, _connection=_Connection, spec=[u"_http"]) client._connection.API_BASE_URL = "https://storage.googleapis.com" if timeout is None: expected_timeout = self._get_default_timeout() timeout_kwarg = {} else: expected_timeout = timeout timeout_kwarg = {"timeout": timeout} new_url = blob.create_resumable_upload_session( content_type=content_type, size=size, origin=origin, client=client, **timeout_kwarg ) # Check the returned value and (lack of) side-effect. self.assertEqual(new_url, resumable_url) self.assertEqual(blob.chunk_size, chunk_size) # Check the mocks. upload_url = ( "https://storage.googleapis.com/upload/storage/v1" + bucket.path + "/o?uploadType=resumable" ) payload = b'{"name": "blob-name"}' expected_headers = { "content-type": "application/json; charset=UTF-8", "x-upload-content-length": str(size), "x-upload-content-type": content_type, } if origin is not None: expected_headers["Origin"] = origin transport.request.assert_called_once_with( "POST", upload_url, data=payload, headers=expected_headers, timeout=expected_timeout, ) def test_create_resumable_upload_session(self): self._create_resumable_upload_session_helper() def test_create_resumable_upload_session_with_custom_timeout(self): self._create_resumable_upload_session_helper(timeout=9.58) def test_create_resumable_upload_session_with_origin(self): self._create_resumable_upload_session_helper(origin="http://google.com") def test_create_resumable_upload_session_with_failure(self): from google.resumable_media import InvalidResponse from google.cloud import exceptions message = "5-oh-3 woe is me." response = self._mock_requests_response( status_code=http_client.SERVICE_UNAVAILABLE, headers={} ) side_effect = InvalidResponse(response, message) with self.assertRaises(exceptions.ServiceUnavailable) as exc_info: self._create_resumable_upload_session_helper(side_effect=side_effect) self.assertIn(message, exc_info.exception.message) self.assertEqual(exc_info.exception.errors, []) def test_get_iam_policy(self): from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_EDITOR_ROLE from google.cloud.storage.iam import STORAGE_VIEWER_ROLE from google.api_core.iam import Policy BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 OWNER1 = "user:phred@example.com" OWNER2 = "group:cloud-logs@google.com" EDITOR1 = "domain:google.com" EDITOR2 = "user:phred@example.com" VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" VIEWER2 = "user:phred@example.com" RETURNED = { "resourceId": PATH, "etag": ETAG, "version": VERSION, "bindings": [ {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, ], } after = ({"status": http_client.OK}, RETURNED) EXPECTED = { binding["role"]: set(binding["members"]) for binding in RETURNED["bindings"] } connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) policy = blob.get_iam_policy(timeout=42) self.assertIsInstance(policy, Policy) self.assertEqual(policy.etag, RETURNED["etag"]) self.assertEqual(policy.version, RETURNED["version"]) self.assertEqual(dict(policy), EXPECTED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "GET", "path": "%s/iam" % (PATH,), "query_params": {}, "_target_object": None, "timeout": 42, }, ) def test_get_iam_policy_w_requested_policy_version(self): from google.cloud.storage.iam import STORAGE_OWNER_ROLE BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 OWNER1 = "user:phred@example.com" OWNER2 = "group:cloud-logs@google.com" RETURNED = { "resourceId": PATH, "etag": ETAG, "version": VERSION, "bindings": [{"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}], } after = ({"status": http_client.OK}, RETURNED) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.get_iam_policy(requested_policy_version=3) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "GET", "path": "%s/iam" % (PATH,), "query_params": {"optionsRequestedPolicyVersion": 3}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_get_iam_policy_w_user_project(self): from google.api_core.iam import Policy BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 RETURNED = { "resourceId": PATH, "etag": ETAG, "version": VERSION, "bindings": [], } after = ({"status": http_client.OK}, RETURNED) EXPECTED = {} connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) policy = blob.get_iam_policy() self.assertIsInstance(policy, Policy) self.assertEqual(policy.etag, RETURNED["etag"]) self.assertEqual(policy.version, RETURNED["version"]) self.assertEqual(dict(policy), EXPECTED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "GET", "path": "%s/iam" % (PATH,), "query_params": {"userProject": USER_PROJECT}, "_target_object": None, "timeout": self._get_default_timeout(), }, ) def test_set_iam_policy(self): import operator from google.cloud.storage.iam import STORAGE_OWNER_ROLE from google.cloud.storage.iam import STORAGE_EDITOR_ROLE from google.cloud.storage.iam import STORAGE_VIEWER_ROLE from google.api_core.iam import Policy BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 OWNER1 = "user:phred@example.com" OWNER2 = "group:cloud-logs@google.com" EDITOR1 = "domain:google.com" EDITOR2 = "user:phred@example.com" VIEWER1 = "serviceAccount:1234-abcdef@service.example.com" VIEWER2 = "user:phred@example.com" BINDINGS = [ {"role": STORAGE_OWNER_ROLE, "members": [OWNER1, OWNER2]}, {"role": STORAGE_EDITOR_ROLE, "members": [EDITOR1, EDITOR2]}, {"role": STORAGE_VIEWER_ROLE, "members": [VIEWER1, VIEWER2]}, ] RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} after = ({"status": http_client.OK}, RETURNED) policy = Policy() for binding in BINDINGS: policy[binding["role"]] = binding["members"] connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) returned = blob.set_iam_policy(policy, timeout=42) self.assertEqual(returned.etag, ETAG) self.assertEqual(returned.version, VERSION) self.assertEqual(dict(returned), dict(policy)) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PUT") self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) self.assertEqual(kw[0]["query_params"], {}) self.assertEqual(kw[0]["timeout"], 42) sent = kw[0]["data"] self.assertEqual(sent["resourceId"], PATH) self.assertEqual(len(sent["bindings"]), len(BINDINGS)) key = operator.itemgetter("role") for found, expected in zip( sorted(sent["bindings"], key=key), sorted(BINDINGS, key=key) ): self.assertEqual(found["role"], expected["role"]) self.assertEqual(sorted(found["members"]), sorted(expected["members"])) def test_set_iam_policy_w_user_project(self): from google.api_core.iam import Policy BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" PATH = "/b/name/o/%s" % (BLOB_NAME,) ETAG = "DEADBEEF" VERSION = 1 BINDINGS = [] RETURNED = {"etag": ETAG, "version": VERSION, "bindings": BINDINGS} after = ({"status": http_client.OK}, RETURNED) policy = Policy() connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) returned = blob.set_iam_policy(policy) self.assertEqual(returned.etag, ETAG) self.assertEqual(returned.version, VERSION) self.assertEqual(dict(returned), dict(policy)) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PUT") self.assertEqual(kw[0]["path"], "%s/iam" % (PATH,)) self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) self.assertEqual(kw[0]["data"], {"resourceId": PATH}) def test_test_iam_permissions(self): from google.cloud.storage.iam import STORAGE_OBJECTS_LIST from google.cloud.storage.iam import STORAGE_BUCKETS_GET from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE BLOB_NAME = "blob-name" PATH = "/b/name/o/%s" % (BLOB_NAME,) PERMISSIONS = [ STORAGE_OBJECTS_LIST, STORAGE_BUCKETS_GET, STORAGE_BUCKETS_UPDATE, ] ALLOWED = PERMISSIONS[1:] RETURNED = {"permissions": ALLOWED} after = ({"status": http_client.OK}, RETURNED) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) allowed = blob.test_iam_permissions(PERMISSIONS, timeout=42) self.assertEqual(allowed, ALLOWED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "GET") self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) self.assertEqual(kw[0]["query_params"], {"permissions": PERMISSIONS}) self.assertEqual(kw[0]["timeout"], 42) def test_test_iam_permissions_w_user_project(self): from google.cloud.storage.iam import STORAGE_OBJECTS_LIST from google.cloud.storage.iam import STORAGE_BUCKETS_GET from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE BLOB_NAME = "blob-name" USER_PROJECT = "user-project-123" PATH = "/b/name/o/%s" % (BLOB_NAME,) PERMISSIONS = [ STORAGE_OBJECTS_LIST, STORAGE_BUCKETS_GET, STORAGE_BUCKETS_UPDATE, ] ALLOWED = PERMISSIONS[1:] RETURNED = {"permissions": ALLOWED} after = ({"status": http_client.OK}, RETURNED) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket) allowed = blob.test_iam_permissions(PERMISSIONS) self.assertEqual(allowed, ALLOWED) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "GET") self.assertEqual(kw[0]["path"], "%s/iam/testPermissions" % (PATH,)) self.assertEqual( kw[0]["query_params"], {"permissions": PERMISSIONS, "userProject": USER_PROJECT}, ) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) def test_make_public(self): from google.cloud.storage.acl import _ACLEntity BLOB_NAME = "blob-name" permissive = [{"entity": "allUsers", "role": _ACLEntity.READER_ROLE}] after = ({"status": http_client.OK}, {"acl": permissive}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.acl.loaded = True blob.make_public() self.assertEqual(list(blob.acl), permissive) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PATCH") self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME) self.assertEqual(kw[0]["data"], {"acl": permissive}) self.assertEqual(kw[0]["query_params"], {"projection": "full"}) def test_make_private(self): BLOB_NAME = "blob-name" no_permissions = [] after = ({"status": http_client.OK}, {"acl": no_permissions}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.acl.loaded = True blob.make_private() self.assertEqual(list(blob.acl), no_permissions) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "PATCH") self.assertEqual(kw[0]["path"], "/b/name/o/%s" % BLOB_NAME) self.assertEqual(kw[0]["data"], {"acl": no_permissions}) self.assertEqual(kw[0]["query_params"], {"projection": "full"}) def test_compose_wo_content_type_set(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {} after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) # no destination.content_type set destination.compose(sources=[source_1, source_2]) self.assertIsNone(destination.content_type) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], "destination": {}, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_minimal_w_user_project(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {"etag": "DEADBEEF"} USER_PROJECT = "user-project-123" after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.content_type = "text/plain" destination.compose(sources=[source_1, source_2], timeout=42) self.assertEqual(destination.etag, "DEADBEEF") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {"userProject": USER_PROJECT}, "data": { "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], "destination": {"contentType": "text/plain"}, }, "_target_object": destination, "timeout": 42, "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_w_additional_property_changes(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {"etag": "DEADBEEF"} after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.content_type = "text/plain" destination.content_language = "en-US" destination.metadata = {"my-key": "my-value"} destination.compose(sources=[source_1, source_2]) self.assertEqual(destination.etag, "DEADBEEF") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [{"name": source_1.name}, {"name": source_2.name}], "destination": { "contentType": "text/plain", "contentLanguage": "en-US", "metadata": {"my-key": "my-value"}, }, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_w_generation_match(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" RESOURCE = {} GENERATION_NUMBERS = [6, 9] METAGENERATION_NUMBERS = [7, 1] after = ({"status": http_client.OK}, RESOURCE) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.compose( sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS, if_metageneration_match=METAGENERATION_NUMBERS, ) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [ { "name": source_1.name, "objectPreconditions": { "ifGenerationMatch": GENERATION_NUMBERS[0], "ifMetagenerationMatch": METAGENERATION_NUMBERS[0], }, }, { "name": source_2.name, "objectPreconditions": { "ifGenerationMatch": GENERATION_NUMBERS[1], "ifMetagenerationMatch": METAGENERATION_NUMBERS[1], }, }, ], "destination": {}, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_compose_w_generation_match_bad_length(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" GENERATION_NUMBERS = [6] METAGENERATION_NUMBERS = [7] after = ({"status": http_client.OK}, {}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) with self.assertRaises(ValueError): destination.compose( sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS ) with self.assertRaises(ValueError): destination.compose( sources=[source_1, source_2], if_metageneration_match=METAGENERATION_NUMBERS, ) def test_compose_w_generation_match_nones(self): SOURCE_1 = "source-1" SOURCE_2 = "source-2" DESTINATION = "destination" GENERATION_NUMBERS = [6, None] after = ({"status": http_client.OK}, {}) connection = _Connection(after) client = _Client(connection) bucket = _Bucket(client=client) source_1 = self._make_one(SOURCE_1, bucket=bucket) source_2 = self._make_one(SOURCE_2, bucket=bucket) destination = self._make_one(DESTINATION, bucket=bucket) destination.compose( sources=[source_1, source_2], if_generation_match=GENERATION_NUMBERS ) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual( kw[0], { "method": "POST", "path": "/b/name/o/%s/compose" % DESTINATION, "query_params": {}, "data": { "sourceObjects": [ { "name": source_1.name, "objectPreconditions": { "ifGenerationMatch": GENERATION_NUMBERS[0] }, }, {"name": source_2.name}, ], "destination": {}, }, "_target_object": destination, "timeout": self._get_default_timeout(), "retry": DEFAULT_RETRY_IF_GENERATION_SPECIFIED, }, ) def test_rewrite_response_without_resource(self): SOURCE_BLOB = "source" DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket) token, rewritten, size = dest_blob.rewrite(source_blob) self.assertEqual(token, TOKEN) self.assertEqual(rewritten, 33) self.assertEqual(size, 42) def test_rewrite_w_generations(self): SOURCE_BLOB = "source" SOURCE_GENERATION = 42 DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" DEST_GENERATION = 43 TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one( SOURCE_BLOB, bucket=source_bucket, generation=SOURCE_GENERATION ) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one( DEST_BLOB, bucket=dest_bucket, generation=DEST_GENERATION ) token, rewritten, size = dest_blob.rewrite(source_blob, timeout=42) self.assertEqual(token, TOKEN) self.assertEqual(rewritten, 33) self.assertEqual(size, 42) (kw,) = connection._requested self.assertEqual(kw["method"], "POST") self.assertEqual( kw["path"], "/b/%s/o/%s/rewriteTo/b/%s/o/%s" % ( (source_bucket.name, source_blob.name, dest_bucket.name, dest_blob.name) ), ) self.assertEqual(kw["query_params"], {"sourceGeneration": SOURCE_GENERATION}) self.assertEqual(kw["timeout"], 42) def test_rewrite_w_generation_match(self): SOURCE_BLOB = "source" SOURCE_GENERATION_NUMBER = 42 DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" DEST_GENERATION_NUMBER = 16 TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one( SOURCE_BLOB, bucket=source_bucket, generation=SOURCE_GENERATION_NUMBER ) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one( DEST_BLOB, bucket=dest_bucket, generation=DEST_GENERATION_NUMBER ) token, rewritten, size = dest_blob.rewrite( source_blob, timeout=42, if_generation_match=dest_blob.generation, if_source_generation_match=source_blob.generation, ) (kw,) = connection._requested self.assertEqual(kw["method"], "POST") self.assertEqual( kw["path"], "/b/%s/o/%s/rewriteTo/b/%s/o/%s" % ( (source_bucket.name, source_blob.name, dest_bucket.name, dest_blob.name) ), ) self.assertEqual( kw["query_params"], { "ifSourceGenerationMatch": SOURCE_GENERATION_NUMBER, "ifGenerationMatch": DEST_GENERATION_NUMBER, "sourceGeneration": SOURCE_GENERATION_NUMBER, }, ) self.assertEqual(kw["timeout"], 42) def test_rewrite_other_bucket_other_name_no_encryption_partial(self): SOURCE_BLOB = "source" DEST_BLOB = "dest" DEST_BUCKET = "other-bucket" TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 33, "objectSize": 42, "done": False, "rewriteToken": TOKEN, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) source_bucket = _Bucket(client=client) source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket) dest_bucket = _Bucket(client=client, name=DEST_BUCKET) dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket) token, rewritten, size = dest_blob.rewrite(source_blob) self.assertEqual(token, TOKEN) self.assertEqual(rewritten, 33) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/%s/o/%s" % ( SOURCE_BLOB, DEST_BUCKET, DEST_BLOB, ) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {}) SENT = {} self.assertEqual(kw[0]["data"], SENT) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) self.assertNotIn("X-Goog-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Encryption-Key", headers) self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers) def test_rewrite_same_name_no_old_key_new_key_done_w_user_project(self): KEY = b"01234567890123456789012345678901" # 32 bytes KEY_B64 = base64.b64encode(KEY).rstrip().decode("ascii") KEY_HASH = hashlib.sha256(KEY).digest() KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode("ascii") BLOB_NAME = "blob" USER_PROJECT = "user-project-123" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) plain = self._make_one(BLOB_NAME, bucket=bucket) encrypted = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY) token, rewritten, size = encrypted.rewrite(plain) self.assertIsNone(token) self.assertEqual(rewritten, 42) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) SENT = {} self.assertEqual(kw[0]["data"], SENT) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Encryption-Key"], KEY_B64) self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], KEY_HASH_B64) def test_rewrite_same_name_no_key_new_key_w_token(self): SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii") SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest() SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii") DEST_KEY = b"90123456789012345678901234567890" # 32 bytes DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode("ascii") DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest() DEST_KEY_HASH_B64 = base64.b64encode(DEST_KEY_HASH).rstrip().decode("ascii") BLOB_NAME = "blob" TOKEN = "TOKEN" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY) dest = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=DEST_KEY) token, rewritten, size = dest.rewrite(source, token=TOKEN) self.assertIsNone(token) self.assertEqual(rewritten, 42) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {"rewriteToken": TOKEN}) SENT = {} self.assertEqual(kw[0]["data"], SENT) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64) self.assertEqual( headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64 ) self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Encryption-Key"], DEST_KEY_B64) self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], DEST_KEY_HASH_B64) def test_rewrite_same_name_w_old_key_new_kms_key(self): SOURCE_KEY = b"01234567890123456789012345678901" # 32 bytes SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode("ascii") SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest() SOURCE_KEY_HASH_B64 = base64.b64encode(SOURCE_KEY_HASH).rstrip().decode("ascii") DEST_KMS_RESOURCE = ( "projects/test-project-123/" "locations/us/" "keyRings/test-ring/" "cryptoKeys/test-key" ) BLOB_NAME = "blob" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"etag": "DEADBEEF"}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) source = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY) dest = self._make_one(BLOB_NAME, bucket=bucket, kms_key_name=DEST_KMS_RESOURCE) token, rewritten, size = dest.rewrite(source) self.assertIsNone(token) self.assertEqual(rewritten, 42) self.assertEqual(size, 42) kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual( kw[0]["query_params"], {"destinationKmsKeyName": DEST_KMS_RESOURCE} ) self.assertEqual(kw[0]["timeout"], self._get_default_timeout()) SENT = {"kmsKeyName": DEST_KMS_RESOURCE} self.assertEqual(kw[0]["data"], SENT) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], SOURCE_KEY_B64) self.assertEqual( headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], SOURCE_KEY_HASH_B64 ) def test_update_storage_class_invalid(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) with self.assertRaises(ValueError): blob.update_storage_class(u"BOGUS") def test_update_storage_class_large_file(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" TOKEN = "TOKEN" INCOMPLETE_RESPONSE = { "totalBytesRewritten": 42, "objectSize": 84, "done": False, "rewriteToken": TOKEN, "resource": {"storageClass": STORAGE_CLASS}, } COMPLETE_RESPONSE = { "totalBytesRewritten": 84, "objectSize": 84, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response_1 = ({"status": http_client.OK}, INCOMPLETE_RESPONSE) response_2 = ({"status": http_client.OK}, COMPLETE_RESPONSE) connection = _Connection(response_1, response_2) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class("NEARLINE") self.assertEqual(blob.storage_class, "NEARLINE") def test_update_storage_class_with_custom_timeout(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" TOKEN = "TOKEN" INCOMPLETE_RESPONSE = { "totalBytesRewritten": 42, "objectSize": 84, "done": False, "rewriteToken": TOKEN, "resource": {"storageClass": STORAGE_CLASS}, } COMPLETE_RESPONSE = { "totalBytesRewritten": 84, "objectSize": 84, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response_1 = ({"status": http_client.OK}, INCOMPLETE_RESPONSE) response_2 = ({"status": http_client.OK}, COMPLETE_RESPONSE) connection = _Connection(response_1, response_2) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class("NEARLINE", timeout=9.58) self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 2) for kw_item in kw: self.assertIn("timeout", kw_item) self.assertEqual(kw_item["timeout"], 9.58) def test_update_storage_class_wo_encryption_key(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class("NEARLINE") self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {}) SENT = {"storageClass": STORAGE_CLASS} self.assertEqual(kw[0]["data"], SENT) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} # Blob has no key, and therefore the relevant headers are not sent. self.assertNotIn("X-Goog-Copy-Source-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key", headers) self.assertNotIn("X-Goog-Copy-Source-Encryption-Key-Sha256", headers) self.assertNotIn("X-Goog-Encryption-Algorithm", headers) self.assertNotIn("X-Goog-Encryption-Key", headers) self.assertNotIn("X-Goog-Encryption-Key-Sha256", headers) def test_update_storage_class_w_encryption_key_w_user_project(self): BLOB_NAME = "blob-name" BLOB_KEY = b"01234567890123456789012345678901" # 32 bytes BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode("ascii") BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest() BLOB_KEY_HASH_B64 = base64.b64encode(BLOB_KEY_HASH).rstrip().decode("ascii") STORAGE_CLASS = u"NEARLINE" USER_PROJECT = "user-project-123" RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client, user_project=USER_PROJECT) blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY) blob.update_storage_class("NEARLINE") self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual(kw[0]["query_params"], {"userProject": USER_PROJECT}) SENT = {"storageClass": STORAGE_CLASS} self.assertEqual(kw[0]["data"], SENT) headers = {key.title(): str(value) for key, value in kw[0]["headers"].items()} # Blob has key, and therefore the relevant headers are sent. self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Copy-Source-Encryption-Key"], BLOB_KEY_B64) self.assertEqual( headers["X-Goog-Copy-Source-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64 ) self.assertEqual(headers["X-Goog-Encryption-Algorithm"], "AES256") self.assertEqual(headers["X-Goog-Encryption-Key"], BLOB_KEY_B64) self.assertEqual(headers["X-Goog-Encryption-Key-Sha256"], BLOB_KEY_HASH_B64) def test_update_storage_class_w_generation_match(self): BLOB_NAME = "blob-name" STORAGE_CLASS = u"NEARLINE" GENERATION_NUMBER = 6 SOURCE_GENERATION_NUMBER = 9 RESPONSE = { "totalBytesRewritten": 42, "objectSize": 42, "done": True, "resource": {"storageClass": STORAGE_CLASS}, } response = ({"status": http_client.OK}, RESPONSE) connection = _Connection(response) client = _Client(connection) bucket = _Bucket(client=client) blob = self._make_one(BLOB_NAME, bucket=bucket) blob.update_storage_class( "NEARLINE", if_generation_match=GENERATION_NUMBER, if_source_generation_match=SOURCE_GENERATION_NUMBER, ) self.assertEqual(blob.storage_class, "NEARLINE") kw = connection._requested self.assertEqual(len(kw), 1) self.assertEqual(kw[0]["method"], "POST") PATH = "/b/name/o/%s/rewriteTo/b/name/o/%s" % (BLOB_NAME, BLOB_NAME) self.assertEqual(kw[0]["path"], PATH) self.assertEqual( kw[0]["query_params"], { "ifGenerationMatch": GENERATION_NUMBER, "ifSourceGenerationMatch": SOURCE_GENERATION_NUMBER, }, ) SENT = {"storageClass": STORAGE_CLASS} self.assertEqual(kw[0]["data"], SENT) def test_cache_control_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CACHE_CONTROL = "no-cache" properties = {"cacheControl": CACHE_CONTROL} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.cache_control, CACHE_CONTROL) def test_cache_control_setter(self): BLOB_NAME = "blob-name" CACHE_CONTROL = "no-cache" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.cache_control) blob.cache_control = CACHE_CONTROL self.assertEqual(blob.cache_control, CACHE_CONTROL) def test_component_count(self): BUCKET = object() COMPONENT_COUNT = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"componentCount": COMPONENT_COUNT} ) self.assertEqual(blob.component_count, COMPONENT_COUNT) def test_component_count_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.component_count) def test_component_count_string_val(self): BUCKET = object() COMPONENT_COUNT = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"componentCount": str(COMPONENT_COUNT)}, ) self.assertEqual(blob.component_count, COMPONENT_COUNT) def test_content_disposition_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_DISPOSITION = "Attachment; filename=example.jpg" properties = {"contentDisposition": CONTENT_DISPOSITION} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) def test_content_disposition_setter(self): BLOB_NAME = "blob-name" CONTENT_DISPOSITION = "Attachment; filename=example.jpg" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_disposition) blob.content_disposition = CONTENT_DISPOSITION self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION) def test_content_encoding_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_ENCODING = "gzip" properties = {"contentEncoding": CONTENT_ENCODING} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_encoding, CONTENT_ENCODING) def test_content_encoding_setter(self): BLOB_NAME = "blob-name" CONTENT_ENCODING = "gzip" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_encoding) blob.content_encoding = CONTENT_ENCODING self.assertEqual(blob.content_encoding, CONTENT_ENCODING) def test_content_language_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_LANGUAGE = "pt-BR" properties = {"contentLanguage": CONTENT_LANGUAGE} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_language, CONTENT_LANGUAGE) def test_content_language_setter(self): BLOB_NAME = "blob-name" CONTENT_LANGUAGE = "pt-BR" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_language) blob.content_language = CONTENT_LANGUAGE self.assertEqual(blob.content_language, CONTENT_LANGUAGE) def test_content_type_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CONTENT_TYPE = "image/jpeg" properties = {"contentType": CONTENT_TYPE} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.content_type, CONTENT_TYPE) def test_content_type_setter(self): BLOB_NAME = "blob-name" CONTENT_TYPE = "image/jpeg" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.content_type) blob.content_type = CONTENT_TYPE self.assertEqual(blob.content_type, CONTENT_TYPE) def test_crc32c_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() CRC32C = "DEADBEEF" properties = {"crc32c": CRC32C} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.crc32c, CRC32C) def test_crc32c_setter(self): BLOB_NAME = "blob-name" CRC32C = "DEADBEEF" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.crc32c) blob.crc32c = CRC32C self.assertEqual(blob.crc32c, CRC32C) def test_etag(self): BLOB_NAME = "blob-name" bucket = _Bucket() ETAG = "ETAG" properties = {"etag": ETAG} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.etag, ETAG) def test_event_based_hold_getter_missing(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertIsNone(blob.event_based_hold) def test_event_based_hold_getter_false(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"eventBasedHold": False} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertFalse(blob.event_based_hold) def test_event_based_hold_getter_true(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"eventBasedHold": True} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertTrue(blob.event_based_hold) def test_event_based_hold_setter(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.event_based_hold) blob.event_based_hold = True self.assertEqual(blob.event_based_hold, True) def test_generation(self): BUCKET = object() GENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"generation": GENERATION} ) self.assertEqual(blob.generation, GENERATION) def test_generation_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.generation) def test_generation_string_val(self): BUCKET = object() GENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"generation": str(GENERATION)} ) self.assertEqual(blob.generation, GENERATION) def test_id(self): BLOB_NAME = "blob-name" bucket = _Bucket() ID = "ID" properties = {"id": ID} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.id, ID) def test_md5_hash_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() MD5_HASH = "DEADBEEF" properties = {"md5Hash": MD5_HASH} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.md5_hash, MD5_HASH) def test_md5_hash_setter(self): BLOB_NAME = "blob-name" MD5_HASH = "DEADBEEF" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.md5_hash) blob.md5_hash = MD5_HASH self.assertEqual(blob.md5_hash, MD5_HASH) def test_media_link(self): BLOB_NAME = "blob-name" bucket = _Bucket() MEDIA_LINK = "http://example.com/media/" properties = {"mediaLink": MEDIA_LINK} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.media_link, MEDIA_LINK) def test_metadata_getter(self): BLOB_NAME = "blob-name" bucket = _Bucket() METADATA = {"foo": "Foo"} properties = {"metadata": METADATA} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.metadata, METADATA) def test_metadata_setter(self): BLOB_NAME = "blob-name" METADATA = {"foo": "Foo"} bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.metadata) blob.metadata = METADATA self.assertEqual(blob.metadata, METADATA) def test_metadata_setter_w_nan(self): BLOB_NAME = "blob-name" METADATA = {"foo": float("nan")} bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.metadata) blob.metadata = METADATA value = blob.metadata["foo"] self.assertIsInstance(value, str) def test_metageneration(self): BUCKET = object() METAGENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"metageneration": METAGENERATION} ) self.assertEqual(blob.metageneration, METAGENERATION) def test_metageneration_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.metageneration) def test_metageneration_string_val(self): BUCKET = object() METAGENERATION = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"metageneration": str(METAGENERATION)}, ) self.assertEqual(blob.metageneration, METAGENERATION) def test_owner(self): BLOB_NAME = "blob-name" bucket = _Bucket() OWNER = {"entity": "project-owner-12345", "entityId": "23456"} properties = {"owner": OWNER} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) owner = blob.owner self.assertEqual(owner["entity"], "project-owner-12345") self.assertEqual(owner["entityId"], "23456") def test_retention_expiration_time(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"retentionExpirationTime": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.retention_expiration_time, TIMESTAMP) def test_retention_expiration_time_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.retention_expiration_time) def test_self_link(self): BLOB_NAME = "blob-name" bucket = _Bucket() SELF_LINK = "http://example.com/self/" properties = {"selfLink": SELF_LINK} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.self_link, SELF_LINK) def test_size(self): BUCKET = object() SIZE = 42 blob = self._make_one("blob-name", bucket=BUCKET, properties={"size": SIZE}) self.assertEqual(blob.size, SIZE) def test_size_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.size) def test_size_string_val(self): BUCKET = object() SIZE = 42 blob = self._make_one( "blob-name", bucket=BUCKET, properties={"size": str(SIZE)} ) self.assertEqual(blob.size, SIZE) def test_storage_class_getter(self): blob_name = "blob-name" bucket = _Bucket() storage_class = "COLDLINE" properties = {"storageClass": storage_class} blob = self._make_one(blob_name, bucket=bucket, properties=properties) self.assertEqual(blob.storage_class, storage_class) def test_storage_class_setter(self): blob_name = "blob-name" bucket = _Bucket() storage_class = "COLDLINE" blob = self._make_one(blob_name, bucket=bucket) self.assertIsNone(blob.storage_class) blob.storage_class = storage_class self.assertEqual(blob.storage_class, storage_class) self.assertEqual(blob._properties, {"storageClass": storage_class}) def test_temporary_hold_getter_missing(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertIsNone(blob.temporary_hold) def test_temporary_hold_getter_false(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"temporaryHold": False} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertFalse(blob.temporary_hold) def test_temporary_hold_getter_true(self): BLOB_NAME = "blob-name" bucket = _Bucket() properties = {"temporaryHold": True} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertTrue(blob.temporary_hold) def test_temporary_hold_setter(self): BLOB_NAME = "blob-name" bucket = _Bucket() blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.temporary_hold) blob.temporary_hold = True self.assertEqual(blob.temporary_hold, True) def test_time_deleted(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"timeDeleted": TIME_DELETED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.time_deleted, TIMESTAMP) def test_time_deleted_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.time_deleted) def test_time_created(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"timeCreated": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.time_created, TIMESTAMP) def test_time_created_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.time_created) def test_updated(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"updated": UPDATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.updated, TIMESTAMP) def test_updated_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.updated) def test_custom_time_getter(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"customTime": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.custom_time, TIMESTAMP) def test_custom_time_setter(self): from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) blob = self._make_one(BLOB_NAME, bucket=bucket) self.assertIsNone(blob.custom_time) blob.custom_time = TIMESTAMP self.assertEqual(blob.custom_time, TIMESTAMP) def test_custom_time_setter_none_value(self): from google.cloud._helpers import _RFC3339_MICROS from google.cloud._helpers import UTC BLOB_NAME = "blob-name" bucket = _Bucket() TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC) TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS) properties = {"customTime": TIME_CREATED} blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties) self.assertEqual(blob.custom_time, TIMESTAMP) blob.custom_time = None self.assertIsNone(blob.custom_time) def test_custom_time_unset(self): BUCKET = object() blob = self._make_one("blob-name", bucket=BUCKET) self.assertIsNone(blob.custom_time) def test_from_string_w_valid_uri(self): from google.cloud.storage.blob import Blob connection = _Connection() client = _Client(connection) uri = "gs://BUCKET_NAME/b" blob = Blob.from_string(uri, client) self.assertIsInstance(blob, Blob) self.assertIs(blob.client, client) self.assertEqual(blob.name, "b") self.assertEqual(blob.bucket.name, "BUCKET_NAME") def test_from_string_w_invalid_uri(self): from google.cloud.storage.blob import Blob connection = _Connection() client = _Client(connection) with pytest.raises(ValueError, match="URI scheme must be gs"): Blob.from_string("http://bucket_name/b", client) def test_from_string_w_domain_name_bucket(self): from google.cloud.storage.blob import Blob connection = _Connection() client = _Client(connection) uri = "gs://buckets.example.com/b" blob = Blob.from_string(uri, client) self.assertIsInstance(blob, Blob) self.assertIs(blob.client, client) self.assertEqual(blob.name, "b") self.assertEqual(blob.bucket.name, "buckets.example.com") class Test__quote(unittest.TestCase): @staticmethod def _call_fut(*args, **kw): from google.cloud.storage.blob import _quote return _quote(*args, **kw) def test_bytes(self): quoted = self._call_fut(b"\xDE\xAD\xBE\xEF") self.assertEqual(quoted, "%DE%AD%BE%EF") def test_unicode(self): helicopter = u"\U0001f681" quoted = self._call_fut(helicopter) self.assertEqual(quoted, "%F0%9F%9A%81") def test_bad_type(self): with self.assertRaises(TypeError): self._call_fut(None) def test_w_slash_default(self): with_slash = "foo/bar/baz" quoted = self._call_fut(with_slash) self.assertEqual(quoted, "foo%2Fbar%2Fbaz") def test_w_slash_w_safe(self): with_slash = "foo/bar/baz" quoted_safe = self._call_fut(with_slash, safe=b"/") self.assertEqual(quoted_safe, with_slash) def test_w_tilde(self): with_tilde = "bam~qux" quoted = self._call_fut(with_tilde, safe=b"~") self.assertEqual(quoted, with_tilde) class Test__maybe_rewind(unittest.TestCase): @staticmethod def _call_fut(*args, **kwargs): from google.cloud.storage.blob import _maybe_rewind return _maybe_rewind(*args, **kwargs) def test_default(self): stream = mock.Mock(spec=[u"seek"]) ret_val = self._call_fut(stream) self.assertIsNone(ret_val) stream.seek.assert_not_called() def test_do_not_rewind(self): stream = mock.Mock(spec=[u"seek"]) ret_val = self._call_fut(stream, rewind=False) self.assertIsNone(ret_val) stream.seek.assert_not_called() def test_do_rewind(self): stream = mock.Mock(spec=[u"seek"]) ret_val = self._call_fut(stream, rewind=True) self.assertIsNone(ret_val) stream.seek.assert_called_once_with(0, os.SEEK_SET) class Test__raise_from_invalid_response(unittest.TestCase): @staticmethod def _call_fut(error): from google.cloud.storage.blob import _raise_from_invalid_response return _raise_from_invalid_response(error) def _helper(self, message, code=http_client.BAD_REQUEST, reason=None, args=()): import requests from google.resumable_media import InvalidResponse from google.api_core import exceptions response = requests.Response() response.request = requests.Request("GET", "http://example.com").prepare() response._content = reason response.status_code = code error = InvalidResponse(response, message, *args) with self.assertRaises(exceptions.GoogleAPICallError) as exc_info: self._call_fut(error) return exc_info def test_default(self): message = "Failure" exc_info = self._helper(message) expected = "GET http://example.com/: {}".format(message) self.assertEqual(exc_info.exception.message, expected) self.assertEqual(exc_info.exception.errors, []) def test_w_206_and_args(self): message = "Failure" reason = b"Not available" args = ("one", "two") exc_info = self._helper( message, code=http_client.PARTIAL_CONTENT, reason=reason, args=args ) expected = "GET http://example.com/: {}: {}".format( reason.decode("utf-8"), (message,) + args ) self.assertEqual(exc_info.exception.message, expected) self.assertEqual(exc_info.exception.errors, []) class Test__add_query_parameters(unittest.TestCase): @staticmethod def _call_fut(*args, **kwargs): from google.cloud.storage.blob import _add_query_parameters return _add_query_parameters(*args, **kwargs) def test_w_empty_list(self): BASE_URL = "https://test.example.com/base" self.assertEqual(self._call_fut(BASE_URL, []), BASE_URL) def test_wo_existing_qs(self): BASE_URL = "https://test.example.com/base" NV_LIST = [("one", "One"), ("two", "Two")] expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST]) self.assertEqual( self._call_fut(BASE_URL, NV_LIST), "{}?{}".format(BASE_URL, expected) ) def test_w_existing_qs(self): BASE_URL = "https://test.example.com/base?one=Three" NV_LIST = [("one", "One"), ("two", "Two")] expected = "&".join(["{}={}".format(name, value) for name, value in NV_LIST]) self.assertEqual( self._call_fut(BASE_URL, NV_LIST), "{}&{}".format(BASE_URL, expected) ) class _Connection(object): API_BASE_URL = "http://example.com" USER_AGENT = "testing 1.2.3" credentials = object() def __init__(self, *responses): self._responses = responses[:] self._requested = [] self._signed = [] def _respond(self, **kw): self._requested.append(kw) response, self._responses = self._responses[0], self._responses[1:] return response def api_request(self, **kw): from google.cloud.exceptions import NotFound info, content = self._respond(**kw) if info.get("status") == http_client.NOT_FOUND: raise NotFound(info) return content class _Bucket(object): def __init__(self, client=None, name="name", user_project=None): if client is None: connection = _Connection() client = _Client(connection) self.client = client self._blobs = {} self._copied = [] self._deleted = [] self.name = name self.path = "/b/" + name self.user_project = user_project def delete_blob( self, blob_name, client=None, generation=None, timeout=None, if_generation_match=None, if_generation_not_match=None, if_metageneration_match=None, if_metageneration_not_match=None, ): del self._blobs[blob_name] self._deleted.append( ( blob_name, client, generation, timeout, if_generation_match, if_generation_not_match, if_metageneration_match, if_metageneration_not_match, ) ) class _Client(object): def __init__(self, connection): self._base_connection = connection @property def _connection(self): return self._base_connection @property def _credentials(self): return self._base_connection.credentials
true
true
f7138682a10089e0ddf292ce21c018b8f71c8f42
18,123
py
Python
lite/tests/unittest_py/auto_scan_base.py
xiebaiyuan/PaddleLite
6f7280a91741d1c63fcb0296ac5c08c4e81c2a90
[ "Apache-2.0" ]
null
null
null
lite/tests/unittest_py/auto_scan_base.py
xiebaiyuan/PaddleLite
6f7280a91741d1c63fcb0296ac5c08c4e81c2a90
[ "Apache-2.0" ]
null
null
null
lite/tests/unittest_py/auto_scan_base.py
xiebaiyuan/PaddleLite
6f7280a91741d1c63fcb0296ac5c08c4e81c2a90
[ "Apache-2.0" ]
null
null
null
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import unittest import abc import os import enum import time import logging import shutil import paddle import paddle.fluid as fluid from paddle.fluid.initializer import NumpyArrayInitializer from paddle.fluid.core import PassVersionChecker import paddle.fluid.core as core from paddle import compat as cpt import paddle.inference as paddle_infer from typing import Optional, List, Callable, Dict, Any, Set from program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model from itertools import product from program_config import CxxConfig, TargetType, PrecisionType, DataLayoutType, Place import hypothesis from hypothesis import given, settings, seed import hypothesis.strategies as st import argparse parser = argparse.ArgumentParser() parser.add_argument("--target", choices=['Host', 'X86','CUDA','ARM','OpenCL','FPGA','NPU','MLU','RKNPU','APU','HUAWEI_ASCEND_NPU','INTEL_FPGA'], required=True) logging.basicConfig(level=logging.INFO, format="%(message)s") settings.register_profile( "ci", max_examples=10, suppress_health_check=hypothesis.HealthCheck.all(), deadline=None, print_blob=True, derandomize=True, report_multiple_bugs=False) settings.load_profile("ci") class IgnoreReasonsBase(enum.Enum): # Paddle not support, but paddlelite support, we need to add the feature. PADDLE_NOT_IMPLEMENTED = 0 # paddlelite not support. PADDLELITE_NOT_SUPPORT = 1 # Accuracy is abnormal after enabling pass. ACCURACY_ERROR = 2 class AutoScanBaseTest(unittest.TestCase): def __init__(self, *args, **kwargs): self.valid_places = [] self.thread_num = [1] np.random.seed(1024) paddle.enable_static() super(AutoScanBaseTest, self).__init__(*args, **kwargs) self.ignore_cases = [] abs_dir = os.path.abspath(os.path.dirname(__file__)) self.cache_dir = os.path.join(abs_dir, str(self.__module__) + '_cache_dir') self.available_passes_in_framework = set() self.num_ran_programs = 0 self.num_invalid_programs = 0 self.num_ignore_tests = 0 self.num_predictor_kinds = 0 args = parser.parse_args() self.args = args @abc.abstractmethod def sample_program_configs(self, draw): ''' Generate all config with the combination of different Input tensor shape and different Attr values. ''' raise NotImplementedError @abc.abstractmethod def sample_predictor_configs(self): raise NotImplementedError @abc.abstractmethod def add_ignore_check_case( self, teller: [Callable[[ProgramConfig, CxxConfig], bool]], reason: IgnoreReasonsBase, note: str): self.ignore_cases.append((teller, reason, note)) @abc.abstractmethod def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: return True def run_test_config(self, model, params, prog_config, pred_config, feed_data) -> Dict[str, np.ndarray]: ''' Test a single case. ''' pred_config.set_model_buffer(model, len(model), params, len(params)) predictor = paddle_infer.create_predictor(pred_config) self.available_passes_in_framework = self.available_passes_in_framework | set( pred_config.pass_builder().all_passes()) for name, _ in prog_config.inputs.items(): input_tensor = predictor.get_input_handle(name) input_tensor.copy_from_cpu(feed_data[name]['data']) if feed_data[name]['lod'] is not None: input_tensor.set_lod(feed_data[name]['lod']) predictor.run() result = {} for out_name, o_name in zip(prog_config.outputs, predictor.get_output_names()): result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu() return result @abc.abstractmethod def assert_tensors_near(self, atol: float, rtol: float, tensor: Dict[str, np.array], baseline: Dict[str, np.array]): if len(tensor) == 1 and len(baseline) == 1: tensor_key = list(tensor.keys()) arr = np.array(tensor[tensor_key[0]]) base_key = list(baseline.keys()) base = np.array(baseline[base_key[0]]) self.assertTrue( base.shape == arr.shape, "The output shapes are not equal, the baseline shape is " + str(base.shape) + ', but got ' + str(arr.shape)) self.assertTrue( np.allclose( base, arr, atol=atol, rtol=rtol), "Output has diff. ") else: for key in tensor: opencl_str = "/target_trans" index = key.rfind(opencl_str) paddlekey=key if index > 0: paddlekey = key[0: index] if (key == "saved_mean" or key == "saved_variance"): # training using data continue arr = np.array(tensor[key]) self.assertTrue( baseline[paddlekey].shape == arr.shape, "The output shapes are not equal, the baseline shape is " + str(baseline[paddlekey].shape) + ', but got ' + str(arr.shape)) self.assertTrue( np.allclose( baseline[paddlekey], arr, atol=atol, rtol=rtol), "Output has diff. ") def generate_op_config(self, ops_config: List[Dict[str, Any]]) -> List[OpConfig]: ops = [] for i in range(len(ops_config)): op_config = ops_config[i] ops.append( OpConfig( type=op_config['op_type'], inputs=op_config['op_inputs'], outputs=op_config['op_outputs'], attrs=op_config['op_attrs'])) return ops @abc.abstractmethod def ignore_log(self, msg: str): logging.warning("SKIP: " + msg) @abc.abstractmethod def fail_log(self, msg: str): logging.fatal("FAILE: " + msg) @abc.abstractmethod def success_log(self, msg: str): logging.info("SUCCESS: " + msg) @abc.abstractmethod def create_inference_config(self, passes: Optional[List[str]]=None, use_gpu: bool=False, use_mkldnn: bool=False, ir_optim: Optional[bool]=None): config = paddle_infer.Config() config.switch_ir_debug(True) config.disable_glog_info() if ir_optim is not None: config.switch_ir_optim(ir_optim) if use_gpu: config.enable_use_gpu(100, 0) if use_mkldnn: config.enable_mkldnn() if passes is not None: config.pass_builder().set_passes(passes) self.passes = passes return config def run_test(self, quant=False, prog_configs=None): status = True paddlelite_configs, op_list_, (atol_, rtol_) = self.sample_predictor_configs() for prog_config in prog_configs: # if program is invalid, we should ignore this cases. program_valid_ = False for paddlelite_config in paddlelite_configs: # judge validity of program if self.is_program_valid(prog_config, paddlelite_config): program_valid_ = True if not program_valid_: self.num_invalid_programs += 1 continue self.num_ran_programs += 1 model, params = create_fake_model(prog_config) if quant: model, params = create_quant_model(model, params) feed_data = {} for name, tensor_config in prog_config.inputs.items(): feed_data[name] = { 'data': tensor_config.data, 'lod': tensor_config.lod } results: List[Dict[str, np.ndarray]] = [] # baseline: cpu no ir_optim run base_config = self.create_inference_config(ir_optim=False) logging.info('[ProgramConfig]: ' + str(prog_config)) results.append( self.run_test_config(model, params, prog_config, base_config, feed_data)) for paddlelite_config in paddlelite_configs: # judge validity of program if not self.is_program_valid(prog_config, paddlelite_config): continue self.num_predictor_kinds += 1 # ignore info ignore_flag = False pred_config = paddlelite_config.value() for ignore_info in self.ignore_cases: if ignore_info[0](prog_config, paddlelite_config): ignore_flag = True self.num_ignore_tests += 1 if ignore_info[1] == IgnoreReasonsBase.ACCURACY_ERROR: self.ignore_log("[ACCURACY_ERROR] " + ignore_info[2] + ' ' + ' vs ' + self. paddlelite_config_str(pred_config)) else: raise NotImplementedError break if os.path.exists(self.cache_dir): shutil.rmtree(self.cache_dir) if not os.path.exists(self.cache_dir): os.mkdir(self.cache_dir) try: result, opt_model_bytes = self.run_lite_config(model, params, feed_data, pred_config) results.append(result) self.assert_tensors_near(atol_, rtol_, results[-1], results[0]) if not ignore_flag and self.passes is not None: self.assert_op_list(opt_model_bytes, op_list_) except Exception as e: self.fail_log( self.paddlelite_config_str(pred_config) + '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e))) if not ignore_flag: status = False continue self.success_log('PredictorConfig: ' + self. paddlelite_config_str(pred_config)) self.assertTrue(status) def inference_config_str(self, config) -> bool: dic = {} enable_mkldnn = config.mkldnn_enabled() dic['use_mkldnn'] = enable_mkldnn enable_gpu = config.use_gpu() return str(dic) def paddlelite_config_str(self, config) -> bool: return str(config) # method for ignoring def add_ignore_pass_case(self): return # judge if program contain op_list def assert_op_list(self, model_bytes, op_list_after_fusion): if not self.passes: raise ValueError( "In PassAutoScan you should give a valid pass name.") pg = paddle.static.deserialize_program(model_bytes) main_block = pg.desc.block(0) after_op_list = list() for i in range(main_block.op_size()): if main_block.op(i).type() in ["feed", "fetch"]: continue after_op_list.append(main_block.op(i).type()) self.assertTrue( op_list_after_fusion == after_op_list, "Expected operator list after fusion is {}, but now it's {}".format( op_list_after_fusion, after_op_list), ) def run_and_statis( self, quant=False, max_examples=100, reproduce=None, min_success_num=25, max_duration=180, passes=None ): if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == "dev": max_examples *= 10 min_success_num *= 10 # while at ce phase, there's no limit on time max_duration = -1 start_time = time.time() settings.register_profile( "ci", max_examples=max_examples, suppress_health_check=hypothesis.HealthCheck.all(), deadline=None, print_blob=True, derandomize=True, report_multiple_bugs=False, ) settings.load_profile("ci") self.passes = passes self.add_ignore_pass_case() def program_generator(draw): return self.sample_program_configs(draw) def run_test(prog_config): return self.run_test(quant=quant, prog_configs=[prog_config]) # if current unittest is not active on the input target, we will exit directly. if not self.is_actived(): logging.info("Error: This test is not actived on " + self.get_target()) return generator = st.composite(program_generator) loop_func = given(generator())(run_test) if reproduce is not None: loop_func = reproduce(loop_func) logging.info("Start to running test of {}".format(type(self))) loop_func() logging.info( "===================Statistical Information===================") logging.info("Number of Generated Programs: {}".format( self.num_ran_programs + self.num_invalid_programs)) logging.info("Number of Invalid Programs: {}".format( self.num_invalid_programs)) logging.info("Number of Ran Programs: {}".format(self.num_ran_programs)) logging.info("Number of Ignored Tests: {}".format( self.num_ignore_tests)) if self.num_predictor_kinds == 0: successful_ran_programs = int(self.num_ran_programs) min_success_num = 0 else: successful_ran_programs = int(self.num_ran_programs - self.num_ignore_tests / self.num_predictor_kinds) logging.info( "Number of successfully ran programs approximately equal to {}". format(successful_ran_programs)) if successful_ran_programs < min_success_num: logging.warning( "satisfied_programs = ran_programs - num_ignore_tests / num_predictor_kinds" ) logging.fatal( "At least {} programs need to ran successfully, but now only about {} programs satisfied.". format(min_success_num, successful_ran_programs)) assert False used_time = time.time() - start_time if max_duration > 0 and used_time > max_duration: logging.fatal( "The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.". format(max_duration)) assert False @abc.abstractmethod def run_lite_config(self, model, params, feed_data, pred_config) -> Dict[str, np.ndarray]: raise NotImplementedError # enable a predictor config # configs will be generated automatically according to inputs def enable_testing_on_place(self, target=None, precision=None, layout=None, thread=None, places=None) -> None: # set thread_num if isinstance(thread,list): self.thread_num = list(set(self.thread_num + thread)) if isinstance(thread,int): self.thread_num.append(thread) self.thread_num = list(self.thread_num) # if list[Place] is inputed, this will be used directly if places is not None: assert isinstance(places, list) self.valid_places.append(places) return # otherwise we will generate a list[Place] from the inputed[target\precision\layout] assert (target is not None) target_ = target if isinstance(target,list) else [target] precision_ = precision if isinstance(precision, list) else [precision] layout_ = layout if isinstance(layout,list) else [layout] for tar_, pre_, lay_ in product(target_, precision_, layout_): self.valid_places.append([Place(tar_, pre_, lay_)]) return def get_target(self) -> str: return self.args.target def is_actived(self) -> bool: for valid_place_ in self.valid_places: if self.get_target() in valid_place_[0]: return True return False def get_predictor_configs(self) -> List[CxxConfig]: return self.target_to_predictor_configs(self, self.get_target()) # get valid test configs @staticmethod def target_to_predictor_configs(self,target:str) -> List[CxxConfig]: configs_ = [] for elem_ in self.valid_places: if target in elem_[0]: for thread_ in self.thread_num: config_ = CxxConfig() config_.set_valid_places(elem_) config_.set_threads(thread_) configs_.append(config_) return configs_
39.227273
159
0.587154
import numpy as np import unittest import abc import os import enum import time import logging import shutil import paddle import paddle.fluid as fluid from paddle.fluid.initializer import NumpyArrayInitializer from paddle.fluid.core import PassVersionChecker import paddle.fluid.core as core from paddle import compat as cpt import paddle.inference as paddle_infer from typing import Optional, List, Callable, Dict, Any, Set from program_config import TensorConfig, OpConfig, ProgramConfig, create_fake_model, create_quant_model from itertools import product from program_config import CxxConfig, TargetType, PrecisionType, DataLayoutType, Place import hypothesis from hypothesis import given, settings, seed import hypothesis.strategies as st import argparse parser = argparse.ArgumentParser() parser.add_argument("--target", choices=['Host', 'X86','CUDA','ARM','OpenCL','FPGA','NPU','MLU','RKNPU','APU','HUAWEI_ASCEND_NPU','INTEL_FPGA'], required=True) logging.basicConfig(level=logging.INFO, format="%(message)s") settings.register_profile( "ci", max_examples=10, suppress_health_check=hypothesis.HealthCheck.all(), deadline=None, print_blob=True, derandomize=True, report_multiple_bugs=False) settings.load_profile("ci") class IgnoreReasonsBase(enum.Enum): PADDLE_NOT_IMPLEMENTED = 0 PADDLELITE_NOT_SUPPORT = 1 ACCURACY_ERROR = 2 class AutoScanBaseTest(unittest.TestCase): def __init__(self, *args, **kwargs): self.valid_places = [] self.thread_num = [1] np.random.seed(1024) paddle.enable_static() super(AutoScanBaseTest, self).__init__(*args, **kwargs) self.ignore_cases = [] abs_dir = os.path.abspath(os.path.dirname(__file__)) self.cache_dir = os.path.join(abs_dir, str(self.__module__) + '_cache_dir') self.available_passes_in_framework = set() self.num_ran_programs = 0 self.num_invalid_programs = 0 self.num_ignore_tests = 0 self.num_predictor_kinds = 0 args = parser.parse_args() self.args = args @abc.abstractmethod def sample_program_configs(self, draw): raise NotImplementedError @abc.abstractmethod def sample_predictor_configs(self): raise NotImplementedError @abc.abstractmethod def add_ignore_check_case( self, teller: [Callable[[ProgramConfig, CxxConfig], bool]], reason: IgnoreReasonsBase, note: str): self.ignore_cases.append((teller, reason, note)) @abc.abstractmethod def is_program_valid(self, program_config: ProgramConfig, predictor_config: CxxConfig) -> bool: return True def run_test_config(self, model, params, prog_config, pred_config, feed_data) -> Dict[str, np.ndarray]: pred_config.set_model_buffer(model, len(model), params, len(params)) predictor = paddle_infer.create_predictor(pred_config) self.available_passes_in_framework = self.available_passes_in_framework | set( pred_config.pass_builder().all_passes()) for name, _ in prog_config.inputs.items(): input_tensor = predictor.get_input_handle(name) input_tensor.copy_from_cpu(feed_data[name]['data']) if feed_data[name]['lod'] is not None: input_tensor.set_lod(feed_data[name]['lod']) predictor.run() result = {} for out_name, o_name in zip(prog_config.outputs, predictor.get_output_names()): result[out_name] = predictor.get_output_handle(o_name).copy_to_cpu() return result @abc.abstractmethod def assert_tensors_near(self, atol: float, rtol: float, tensor: Dict[str, np.array], baseline: Dict[str, np.array]): if len(tensor) == 1 and len(baseline) == 1: tensor_key = list(tensor.keys()) arr = np.array(tensor[tensor_key[0]]) base_key = list(baseline.keys()) base = np.array(baseline[base_key[0]]) self.assertTrue( base.shape == arr.shape, "The output shapes are not equal, the baseline shape is " + str(base.shape) + ', but got ' + str(arr.shape)) self.assertTrue( np.allclose( base, arr, atol=atol, rtol=rtol), "Output has diff. ") else: for key in tensor: opencl_str = "/target_trans" index = key.rfind(opencl_str) paddlekey=key if index > 0: paddlekey = key[0: index] if (key == "saved_mean" or key == "saved_variance"): continue arr = np.array(tensor[key]) self.assertTrue( baseline[paddlekey].shape == arr.shape, "The output shapes are not equal, the baseline shape is " + str(baseline[paddlekey].shape) + ', but got ' + str(arr.shape)) self.assertTrue( np.allclose( baseline[paddlekey], arr, atol=atol, rtol=rtol), "Output has diff. ") def generate_op_config(self, ops_config: List[Dict[str, Any]]) -> List[OpConfig]: ops = [] for i in range(len(ops_config)): op_config = ops_config[i] ops.append( OpConfig( type=op_config['op_type'], inputs=op_config['op_inputs'], outputs=op_config['op_outputs'], attrs=op_config['op_attrs'])) return ops @abc.abstractmethod def ignore_log(self, msg: str): logging.warning("SKIP: " + msg) @abc.abstractmethod def fail_log(self, msg: str): logging.fatal("FAILE: " + msg) @abc.abstractmethod def success_log(self, msg: str): logging.info("SUCCESS: " + msg) @abc.abstractmethod def create_inference_config(self, passes: Optional[List[str]]=None, use_gpu: bool=False, use_mkldnn: bool=False, ir_optim: Optional[bool]=None): config = paddle_infer.Config() config.switch_ir_debug(True) config.disable_glog_info() if ir_optim is not None: config.switch_ir_optim(ir_optim) if use_gpu: config.enable_use_gpu(100, 0) if use_mkldnn: config.enable_mkldnn() if passes is not None: config.pass_builder().set_passes(passes) self.passes = passes return config def run_test(self, quant=False, prog_configs=None): status = True paddlelite_configs, op_list_, (atol_, rtol_) = self.sample_predictor_configs() for prog_config in prog_configs: program_valid_ = False for paddlelite_config in paddlelite_configs: if self.is_program_valid(prog_config, paddlelite_config): program_valid_ = True if not program_valid_: self.num_invalid_programs += 1 continue self.num_ran_programs += 1 model, params = create_fake_model(prog_config) if quant: model, params = create_quant_model(model, params) feed_data = {} for name, tensor_config in prog_config.inputs.items(): feed_data[name] = { 'data': tensor_config.data, 'lod': tensor_config.lod } results: List[Dict[str, np.ndarray]] = [] base_config = self.create_inference_config(ir_optim=False) logging.info('[ProgramConfig]: ' + str(prog_config)) results.append( self.run_test_config(model, params, prog_config, base_config, feed_data)) for paddlelite_config in paddlelite_configs: if not self.is_program_valid(prog_config, paddlelite_config): continue self.num_predictor_kinds += 1 ignore_flag = False pred_config = paddlelite_config.value() for ignore_info in self.ignore_cases: if ignore_info[0](prog_config, paddlelite_config): ignore_flag = True self.num_ignore_tests += 1 if ignore_info[1] == IgnoreReasonsBase.ACCURACY_ERROR: self.ignore_log("[ACCURACY_ERROR] " + ignore_info[2] + ' ' + ' vs ' + self. paddlelite_config_str(pred_config)) else: raise NotImplementedError break if os.path.exists(self.cache_dir): shutil.rmtree(self.cache_dir) if not os.path.exists(self.cache_dir): os.mkdir(self.cache_dir) try: result, opt_model_bytes = self.run_lite_config(model, params, feed_data, pred_config) results.append(result) self.assert_tensors_near(atol_, rtol_, results[-1], results[0]) if not ignore_flag and self.passes is not None: self.assert_op_list(opt_model_bytes, op_list_) except Exception as e: self.fail_log( self.paddlelite_config_str(pred_config) + '\033[1;31m \nERROR INFO: {}\033[0m'.format(str(e))) if not ignore_flag: status = False continue self.success_log('PredictorConfig: ' + self. paddlelite_config_str(pred_config)) self.assertTrue(status) def inference_config_str(self, config) -> bool: dic = {} enable_mkldnn = config.mkldnn_enabled() dic['use_mkldnn'] = enable_mkldnn enable_gpu = config.use_gpu() return str(dic) def paddlelite_config_str(self, config) -> bool: return str(config) def add_ignore_pass_case(self): return def assert_op_list(self, model_bytes, op_list_after_fusion): if not self.passes: raise ValueError( "In PassAutoScan you should give a valid pass name.") pg = paddle.static.deserialize_program(model_bytes) main_block = pg.desc.block(0) after_op_list = list() for i in range(main_block.op_size()): if main_block.op(i).type() in ["feed", "fetch"]: continue after_op_list.append(main_block.op(i).type()) self.assertTrue( op_list_after_fusion == after_op_list, "Expected operator list after fusion is {}, but now it's {}".format( op_list_after_fusion, after_op_list), ) def run_and_statis( self, quant=False, max_examples=100, reproduce=None, min_success_num=25, max_duration=180, passes=None ): if os.getenv('HYPOTHESIS_TEST_PROFILE', 'ci') == "dev": max_examples *= 10 min_success_num *= 10 # while at ce phase, there's no limit on time max_duration = -1 start_time = time.time() settings.register_profile( "ci", max_examples=max_examples, suppress_health_check=hypothesis.HealthCheck.all(), deadline=None, print_blob=True, derandomize=True, report_multiple_bugs=False, ) settings.load_profile("ci") self.passes = passes self.add_ignore_pass_case() def program_generator(draw): return self.sample_program_configs(draw) def run_test(prog_config): return self.run_test(quant=quant, prog_configs=[prog_config]) if not self.is_actived(): logging.info("Error: This test is not actived on " + self.get_target()) return generator = st.composite(program_generator) loop_func = given(generator())(run_test) if reproduce is not None: loop_func = reproduce(loop_func) logging.info("Start to running test of {}".format(type(self))) loop_func() logging.info( "===================Statistical Information===================") logging.info("Number of Generated Programs: {}".format( self.num_ran_programs + self.num_invalid_programs)) logging.info("Number of Invalid Programs: {}".format( self.num_invalid_programs)) logging.info("Number of Ran Programs: {}".format(self.num_ran_programs)) logging.info("Number of Ignored Tests: {}".format( self.num_ignore_tests)) if self.num_predictor_kinds == 0: successful_ran_programs = int(self.num_ran_programs) min_success_num = 0 else: successful_ran_programs = int(self.num_ran_programs - self.num_ignore_tests / self.num_predictor_kinds) logging.info( "Number of successfully ran programs approximately equal to {}". format(successful_ran_programs)) if successful_ran_programs < min_success_num: logging.warning( "satisfied_programs = ran_programs - num_ignore_tests / num_predictor_kinds" ) logging.fatal( "At least {} programs need to ran successfully, but now only about {} programs satisfied.". format(min_success_num, successful_ran_programs)) assert False used_time = time.time() - start_time if max_duration > 0 and used_time > max_duration: logging.fatal( "The duration exceeds {} seconds, if this is neccessary, try to set a larger number for parameter `max_duration`.". format(max_duration)) assert False @abc.abstractmethod def run_lite_config(self, model, params, feed_data, pred_config) -> Dict[str, np.ndarray]: raise NotImplementedError def enable_testing_on_place(self, target=None, precision=None, layout=None, thread=None, places=None) -> None: if isinstance(thread,list): self.thread_num = list(set(self.thread_num + thread)) if isinstance(thread,int): self.thread_num.append(thread) self.thread_num = list(self.thread_num) if places is not None: assert isinstance(places, list) self.valid_places.append(places) return assert (target is not None) target_ = target if isinstance(target,list) else [target] precision_ = precision if isinstance(precision, list) else [precision] layout_ = layout if isinstance(layout,list) else [layout] for tar_, pre_, lay_ in product(target_, precision_, layout_): self.valid_places.append([Place(tar_, pre_, lay_)]) return def get_target(self) -> str: return self.args.target def is_actived(self) -> bool: for valid_place_ in self.valid_places: if self.get_target() in valid_place_[0]: return True return False def get_predictor_configs(self) -> List[CxxConfig]: return self.target_to_predictor_configs(self, self.get_target()) @staticmethod def target_to_predictor_configs(self,target:str) -> List[CxxConfig]: configs_ = [] for elem_ in self.valid_places: if target in elem_[0]: for thread_ in self.thread_num: config_ = CxxConfig() config_.set_valid_places(elem_) config_.set_threads(thread_) configs_.append(config_) return configs_
true
true
f71386d08e0198f828a61f421605596b20d2de39
861
py
Python
banme/banme.py
reo-ar/reo-cogs
1424270a41891822e3a6e68bd43a92abe75fa6fc
[ "Unlicense" ]
1
2020-10-22T21:28:55.000Z
2020-10-22T21:28:55.000Z
banme/banme.py
reo-ar/reo-cogs
1424270a41891822e3a6e68bd43a92abe75fa6fc
[ "Unlicense" ]
null
null
null
banme/banme.py
reo-ar/reo-cogs
1424270a41891822e3a6e68bd43a92abe75fa6fc
[ "Unlicense" ]
null
null
null
from typing import Any import discord from redbot.core import commands from redbot.core.utils.predicates import MessagePredicate Cog: Any = getattr(commands, "Cog", object) class BanMe(Cog): """ Ridiculous cog for a ridiculous request """ def __init__(self, bot): self.bot = bot @commands.command() async def banme(self, ctx): """ Does what it says on the tin, bans yourself. """ await ctx.send("Are you sure about banning yourself? Answer with a `Yes` or a `No`") pred = MessagePredicate.yes_or_no(ctx) await self.bot.wait_for("message", check=pred) if pred.result is True: try: await ctx.guild.ban(ctx.author, reason="You literally banned yourself") except Exception as e: await ctx.send(f"Error: ***{e}***")
27.774194
92
0.614402
from typing import Any import discord from redbot.core import commands from redbot.core.utils.predicates import MessagePredicate Cog: Any = getattr(commands, "Cog", object) class BanMe(Cog): def __init__(self, bot): self.bot = bot @commands.command() async def banme(self, ctx): await ctx.send("Are you sure about banning yourself? Answer with a `Yes` or a `No`") pred = MessagePredicate.yes_or_no(ctx) await self.bot.wait_for("message", check=pred) if pred.result is True: try: await ctx.guild.ban(ctx.author, reason="You literally banned yourself") except Exception as e: await ctx.send(f"Error: ***{e}***")
true
true
f7138833d6bb52401ccd4e503b1eb3526852629c
3,335
py
Python
tensorflow/python/summary/impl/io_wrapper.py
jdehotin/TensorFlow
a6c5f8e4e013e54fed8dfcf49fb6de365f018022
[ "Apache-2.0" ]
5
2018-03-22T06:56:15.000Z
2018-09-04T02:41:35.000Z
tensorflow/python/summary/impl/io_wrapper.py
jdehotin/TensorFlow
a6c5f8e4e013e54fed8dfcf49fb6de365f018022
[ "Apache-2.0" ]
1
2021-04-12T03:51:59.000Z
2021-04-12T03:51:59.000Z
tensorflow/python/summary/impl/io_wrapper.py
jdehotin/TensorFlow
a6c5f8e4e013e54fed8dfcf49fb6de365f018022
[ "Apache-2.0" ]
5
2018-02-27T00:34:23.000Z
2022-02-28T16:38:08.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functions that wrap both gfile and gcs. This module is *not* intended to be a general-purpose IO wrapper library; it only implements the operations that are necessary for loading event files. The functions either dispatch to the gcs library or to gfile, depending on whether the path is a GCS 'pseudo-path' (i.e., it satisfies gcs.IsGCSPath) or not. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.platform import gfile from tensorflow.python.summary.impl import event_file_loader from tensorflow.python.summary.impl import gcs from tensorflow.python.summary.impl import gcs_file_loader def CreateFileLoader(path): """Creates a file loader for the given path. Args: path: A string representing either a normal path or a GCS Returns: An object with a Load() method that yields event_pb2.Event protos. """ if gcs.IsGCSPath(path): return gcs_file_loader.GCSFileLoader(path) else: return event_file_loader.EventFileLoader(path) def ListDirectoryAbsolute(directory): """Yields all files in the given directory. The paths are absolute.""" if gcs.IsGCSPath(directory): return gcs.ListDirectory(directory) else: return (os.path.join(directory, path) for path in gfile.ListDirectory(directory)) def ListRecursively(top): """Walks a directory tree, yielding (dir_path, file_paths) tuples. For each of `top` and its subdirectories, yields a tuple containing the path to the directory and the path to each of the contained files. Note that unlike os.Walk()/gfile.Walk(), this does not list subdirectories and the file paths are all absolute. If the directory does not exist, this yields nothing. Args: top: A path to a directory.. Yields: A list of (dir_path, file_paths) tuples. """ if gcs.IsGCSPath(top): for x in gcs.ListRecursively(top): yield x else: for dir_path, _, filenames in gfile.Walk(top): yield (dir_path, (os.path.join(dir_path, filename) for filename in filenames)) def IsDirectory(path): """Returns true if path exists and is a directory.""" if gcs.IsGCSPath(path): return gcs.IsDirectory(path) else: return gfile.IsDirectory(path) def Exists(path): if gcs.IsGCSPath(path): return gcs.Exists(path) else: return gfile.Exists(path) def Size(path): """Returns the number of bytes in the given file. Doesn't work on GCS.""" if gcs.IsGCSPath(path): raise NotImplementedError("io_wrapper.Size doesn't support GCS paths") else: return gfile.Open(path).size()
32.696078
80
0.71994
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.platform import gfile from tensorflow.python.summary.impl import event_file_loader from tensorflow.python.summary.impl import gcs from tensorflow.python.summary.impl import gcs_file_loader def CreateFileLoader(path): if gcs.IsGCSPath(path): return gcs_file_loader.GCSFileLoader(path) else: return event_file_loader.EventFileLoader(path) def ListDirectoryAbsolute(directory): if gcs.IsGCSPath(directory): return gcs.ListDirectory(directory) else: return (os.path.join(directory, path) for path in gfile.ListDirectory(directory)) def ListRecursively(top): if gcs.IsGCSPath(top): for x in gcs.ListRecursively(top): yield x else: for dir_path, _, filenames in gfile.Walk(top): yield (dir_path, (os.path.join(dir_path, filename) for filename in filenames)) def IsDirectory(path): if gcs.IsGCSPath(path): return gcs.IsDirectory(path) else: return gfile.IsDirectory(path) def Exists(path): if gcs.IsGCSPath(path): return gcs.Exists(path) else: return gfile.Exists(path) def Size(path): if gcs.IsGCSPath(path): raise NotImplementedError("io_wrapper.Size doesn't support GCS paths") else: return gfile.Open(path).size()
true
true
f7138874e490e4bbf693bf32a7ec22b4a4ec1221
503
py
Python
netbox_topology_views/api/serializers.py
spalmesano/netbox-topology-views
3b4209223e31cc96adf4b133eea36bc982d25d7e
[ "Apache-2.0" ]
201
2020-04-16T16:08:45.000Z
2022-03-28T02:20:34.000Z
netbox_topology_views/api/serializers.py
spalmesano/netbox-topology-views
3b4209223e31cc96adf4b133eea36bc982d25d7e
[ "Apache-2.0" ]
63
2020-04-15T16:20:17.000Z
2022-03-27T01:05:09.000Z
netbox_topology_views/api/serializers.py
spalmesano/netbox-topology-views
3b4209223e31cc96adf4b133eea36bc982d25d7e
[ "Apache-2.0" ]
26
2020-05-14T13:42:23.000Z
2022-03-28T02:20:37.000Z
from rest_framework.serializers import ModelSerializer from dcim.models import DeviceRole, Device from extras.models import Tag class PreDeviceRoleSerializer(ModelSerializer): class Meta: model = DeviceRole fields = ('id', 'name') class PreTagSerializer(ModelSerializer): class Meta: model = Tag fields = ('id', 'name') class TopologyDummySerializer(ModelSerializer): class Meta: model = Device fields = ('id', 'name')
20.958333
54
0.656064
from rest_framework.serializers import ModelSerializer from dcim.models import DeviceRole, Device from extras.models import Tag class PreDeviceRoleSerializer(ModelSerializer): class Meta: model = DeviceRole fields = ('id', 'name') class PreTagSerializer(ModelSerializer): class Meta: model = Tag fields = ('id', 'name') class TopologyDummySerializer(ModelSerializer): class Meta: model = Device fields = ('id', 'name')
true
true
f713888e2eb6650e6c8c4d7d10f378838c57d92c
648
py
Python
compiled/construct/switch_manual_str.py
smarek/ci_targets
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
[ "MIT" ]
4
2017-04-08T12:55:11.000Z
2020-12-05T21:09:31.000Z
compiled/construct/switch_manual_str.py
smarek/ci_targets
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
[ "MIT" ]
7
2018-04-23T01:30:33.000Z
2020-10-30T23:56:14.000Z
compiled/construct/switch_manual_str.py
smarek/ci_targets
c5edee7b0901fd8e7f75f85245ea4209b38e0cb3
[ "MIT" ]
6
2017-04-08T11:41:14.000Z
2020-10-30T22:47:31.000Z
from construct import * from construct.lib import * switch_manual_str__opcode__intval = Struct( 'value' / Int8ub, ) switch_manual_str__opcode__strval = Struct( 'value' / NullTerminated(GreedyString(encoding='ASCII'), term=b'\x00', include=False, consume=True), ) switch_manual_str__opcode = Struct( 'code' / FixedSized(1, GreedyString(encoding='ASCII')), 'body' / Switch(this.code, {u"I": LazyBound(lambda: switch_manual_str__opcode__intval), u"S": LazyBound(lambda: switch_manual_str__opcode__strval), }), ) switch_manual_str = Struct( 'opcodes' / GreedyRange(LazyBound(lambda: switch_manual_str__opcode)), ) _schema = switch_manual_str
29.454545
152
0.770062
from construct import * from construct.lib import * switch_manual_str__opcode__intval = Struct( 'value' / Int8ub, ) switch_manual_str__opcode__strval = Struct( 'value' / NullTerminated(GreedyString(encoding='ASCII'), term=b'\x00', include=False, consume=True), ) switch_manual_str__opcode = Struct( 'code' / FixedSized(1, GreedyString(encoding='ASCII')), 'body' / Switch(this.code, {u"I": LazyBound(lambda: switch_manual_str__opcode__intval), u"S": LazyBound(lambda: switch_manual_str__opcode__strval), }), ) switch_manual_str = Struct( 'opcodes' / GreedyRange(LazyBound(lambda: switch_manual_str__opcode)), ) _schema = switch_manual_str
true
true
f71388bcab372f438de6790334669998d9e80989
4,156
py
Python
intersight/models/hyperflex_cluster_network_policy_list.py
ategaw-cisco/intersight-python
9d6476620507281b1dc358e29ac452d56081bbb0
[ "Apache-2.0" ]
null
null
null
intersight/models/hyperflex_cluster_network_policy_list.py
ategaw-cisco/intersight-python
9d6476620507281b1dc358e29ac452d56081bbb0
[ "Apache-2.0" ]
null
null
null
intersight/models/hyperflex_cluster_network_policy_list.py
ategaw-cisco/intersight-python
9d6476620507281b1dc358e29ac452d56081bbb0
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ Intersight REST API This is Intersight REST API OpenAPI spec version: 1.0.9-262 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class HyperflexClusterNetworkPolicyList(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'count': 'int', 'results': 'list[HyperflexClusterNetworkPolicy]' } attribute_map = { 'count': 'Count', 'results': 'Results' } def __init__(self, count=None, results=None): """ HyperflexClusterNetworkPolicyList - a model defined in Swagger """ self._count = None self._results = None if count is not None: self.count = count if results is not None: self.results = results @property def count(self): """ Gets the count of this HyperflexClusterNetworkPolicyList. The number of hyperflexClusterNetworkPolicies matching your request in total for all pages. :return: The count of this HyperflexClusterNetworkPolicyList. :rtype: int """ return self._count @count.setter def count(self, count): """ Sets the count of this HyperflexClusterNetworkPolicyList. The number of hyperflexClusterNetworkPolicies matching your request in total for all pages. :param count: The count of this HyperflexClusterNetworkPolicyList. :type: int """ self._count = count @property def results(self): """ Gets the results of this HyperflexClusterNetworkPolicyList. The array of hyperflexClusterNetworkPolicies matching your request. :return: The results of this HyperflexClusterNetworkPolicyList. :rtype: list[HyperflexClusterNetworkPolicy] """ return self._results @results.setter def results(self, results): """ Sets the results of this HyperflexClusterNetworkPolicyList. The array of hyperflexClusterNetworkPolicies matching your request. :param results: The results of this HyperflexClusterNetworkPolicyList. :type: list[HyperflexClusterNetworkPolicy] """ self._results = results def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, HyperflexClusterNetworkPolicyList): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
26.987013
99
0.582531
from pprint import pformat from six import iteritems import re class HyperflexClusterNetworkPolicyList(object): swagger_types = { 'count': 'int', 'results': 'list[HyperflexClusterNetworkPolicy]' } attribute_map = { 'count': 'Count', 'results': 'Results' } def __init__(self, count=None, results=None): self._count = None self._results = None if count is not None: self.count = count if results is not None: self.results = results @property def count(self): return self._count @count.setter def count(self, count): self._count = count @property def results(self): return self._results @results.setter def results(self, results): self._results = results def to_dict(self): result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): return pformat(self.to_dict()) def __repr__(self): return self.to_str() def __eq__(self, other): if not isinstance(other, HyperflexClusterNetworkPolicyList): return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
true
true
f71388cadb94d8215dddb8bc4a7cb2d38d7823a4
95,078
py
Python
tensorflow/python/keras/engine/training.py
ahoneybun/tensorflow
5134e65300d1ac384eeb1f4ca72a011ad7225bc8
[ "Apache-2.0" ]
5
2018-10-20T03:54:49.000Z
2021-01-02T07:19:53.000Z
tensorflow/python/keras/engine/training.py
liufengdb/tensorflow
51100a8de57ef53e36a8a9f5a9829cbd33fbed04
[ "Apache-2.0" ]
null
null
null
tensorflow/python/keras/engine/training.py
liufengdb/tensorflow
51100a8de57ef53e36a8a9f5a9829cbd33fbed04
[ "Apache-2.0" ]
2
2018-11-03T01:19:26.000Z
2021-04-23T02:34:07.000Z
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Training-related part of the Keras engine. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import weakref import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.keras import losses from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras import optimizers from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import distributed_training_utils from tensorflow.python.keras.engine import training_arrays from tensorflow.python.keras.engine import training_distributed from tensorflow.python.keras.engine import training_eager from tensorflow.python.keras.engine import training_generator from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.engine.network import Network from tensorflow.python.keras.utils.generic_utils import slice_arrays from tensorflow.python.ops import math_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import optimizer as tf_optimizer_module from tensorflow.python.training.checkpointable import base as checkpointable from tensorflow.python.util.tf_export import tf_export @tf_export('keras.models.Model', 'keras.Model') class Model(Network): """`Model` groups layers into an object with training and inference features. There are two ways to instantiate a `Model`: 1 - With the "functional API", where you start from `Input`, you chain layer calls to specify the model's forward pass, and finally you create your model from inputs and outputs: ```python import tensorflow as tf inputs = tf.keras.Input(shape=(3,)) x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) ``` 2 - By subclassing the `Model` class: in that case, you should define your layers in `__init__` and you should implement the model's forward pass in `call`. ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) def call(self, inputs): x = self.dense1(inputs) return self.dense2(x) model = MyModel() ``` If you subclass `Model`, you can optionally have a `training` argument (boolean) in `call`, which you can use to specify a different behavior in training and inference: ```python import tensorflow as tf class MyModel(tf.keras.Model): def __init__(self): super(MyModel, self).__init__() self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu) self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax) self.dropout = tf.keras.layers.Dropout(0.5) def call(self, inputs, training=False): x = self.dense1(inputs) if training: x = self.dropout(x, training=training) return self.dense2(x) model = MyModel() ``` """ def __init__(self, *args, **kwargs): super(Model, self).__init__(*args, **kwargs) # Create a cache for iterator get_next op. self._iterator_get_next = weakref.WeakKeyDictionary() # Create a cache for dataset - uninitialized iterators self._dataset_iterator_cache = weakref.WeakKeyDictionary() # initializing _distribution_strategy here since it is possible to call # predict on a model without compiling it. self._distribution_strategy = None def _set_sample_weight_attributes(self, sample_weight_mode, skip_target_weighing_indices): """Sets sample weight related attributes on the model.""" sample_weights, sample_weight_modes = training_utils.prepare_sample_weights( self.output_names, sample_weight_mode, skip_target_weighing_indices) self.sample_weights = sample_weights self.sample_weight_modes = sample_weight_modes self._feed_sample_weight_modes = [ sample_weight_modes[i] for i in range(len(self.outputs)) if i not in skip_target_weighing_indices ] self._feed_sample_weights = [ sample_weights[i] for i in range(len(sample_weights)) if i not in skip_target_weighing_indices ] def _get_metric_name(self, metric, output_index, weighted=False): """Returns the metric name corresponding to the given metric input. Arguments: metric: Metric function name or reference. output_index: Index of the current output. weighted: Boolean indicating if the given metric is weighted. Returns: A metric name. """ metric_name_prefix = 'weighted_' if weighted else '' if metric in ('accuracy', 'acc', 'crossentropy', 'ce'): if metric in ('accuracy', 'acc'): suffix = 'acc' elif metric in ('crossentropy', 'ce'): suffix = 'ce' else: metric_fn = metrics_module.get(metric) # Get metric name as string if hasattr(metric_fn, 'name'): suffix = metric_fn.name else: suffix = metric_fn.__name__ metric_name = metric_name_prefix + suffix if len(self.output_names) > 1: metric_name = '%s_%s' % (self.output_names[output_index], metric_name) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = '%s_%d' % (base_metric_name, j) j += 1 return metric_name def _handle_per_output_metrics(self, metrics, y_true, y_pred, output_index, output_shape, loss_fn, mask, weights=None): """Calls metric functions and sets metric attributes for a single output. Arguments: metrics: List of metrics. y_true: Target output. y_pred: Predicted output. output_index: Index of the current output. output_shape: Shape of the current output. loss_fn: Loss function corresponding to the current output. mask: Computed mask value for the current output. weights: Weights to be applied on the current output. Returns: A list of metric result tensors. """ metric_results = [] for metric in metrics: metric_fn = training_utils.get_metric_function( metric, output_shape=output_shape, loss_fn=loss_fn) metric_name = self._get_metric_name( metric, output_index, weighted=weights is not None) with K.name_scope(metric_name): # If both outputs and targets are available, call the metric function. if y_true is not None and y_pred is not None: if isinstance(metric_fn, metrics_module.Metric): # Call the stateful metric function. if mask is not None: mask = math_ops.cast(mask, y_pred.dtype) # Update weights with mask. if weights is None: weights = mask else: # Update shape of weights if possible before adding mask. # Update dimensions of weights to match with mask if possible. mask, _, weights = metrics_module.squeeze_or_expand_dimensions( mask, None, weights) try: # Broadcast weights if possible. weights = weights_broadcast_ops.broadcast_weights( weights, mask) except ValueError: pass # TODO(psv): Handle case when mask and weight shapes are not # compatible. weights *= mask metric_result = metric_fn(y_true, y_pred, weights) else: # Call the stateless metric function. weighted_metric_fn = training_utils.weighted_masked_objective( metric_fn) metric_result = weighted_metric_fn( y_true, y_pred, weights=weights, mask=mask) if not context.executing_eagerly(): # Keep track of metric result tensor. self.metrics_tensors.append(metric_result) metric_results.append(metric_result) # Keep track of metric name. self.metrics_names.append(metric_name) # Keep track of stateful metric attributes (name and metric function). if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful: self.stateful_metric_names.append(metric_name) self.stateful_metric_functions.append(metric_fn) if not context.executing_eagerly(): # Keep track of updates created by stateful metrics. self.metrics_updates += metric_fn.updates return metric_results def _handle_metrics(self, outputs, skip_target_indices=None, targets=None, sample_weights=None, masks=None): """Handles calling metric functions and setting model metric attributes. Arguments: outputs: List of outputs (predictions). skip_target_indices: Optional. List of target ids to skip. targets: List of targets. sample_weights: Optional list of sample weight arrays. masks: List of computed output mask values. Returns: A list of metric result tensors. """ skip_target_indices = skip_target_indices or [] metric_results = [] with K.name_scope('metrics'): for i in range(len(outputs)): if i in skip_target_indices: continue output = outputs[i] if outputs else None target = targets[i] if targets else None output_shape = None if output is None else output.get_shape().as_list() output_mask = masks[i] if masks else None metric_results.extend( self._handle_per_output_metrics( self.nested_metrics[i], target, output, i, output_shape, self.loss_functions[i], output_mask)) metric_results.extend( self._handle_per_output_metrics( self.nested_weighted_metrics[i], target, output, i, output_shape, self.loss_functions[i], output_mask, weights=sample_weights[i])) return metric_results @checkpointable.no_automatic_dependency_tracking def compile(self, optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, distribute=None, **kwargs): """Configures the model for training. Arguments: optimizer: String (name of optimizer) or optimizer instance. See [optimizers](/api_docs/python/tf/keras/optimizers). loss: String (name of objective function) or objective function. See [losses](/api_docs/python/tf/losses). If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then be the sum of all individual losses. metrics: List of metrics to be evaluated by the model during training and testing. Typically you will use `metrics=['accuracy']`. To specify different metrics for different outputs of a multi-output model, you could also pass a dictionary, such as `metrics={'output_a': 'accuracy'}`. loss_weights: Optional list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then be the *weighted sum* of all individual losses, weighted by the `loss_weights` coefficients. If a list, it is expected to have a 1:1 mapping to the model's outputs. If a tensor, it is expected to map output names (strings) to scalar coefficients. sample_weight_mode: If you need to do timestep-wise sample weighting (2D weights), set this to `"temporal"`. `None` defaults to sample-wise weights (1D). If the model has multiple outputs, you can use a different `sample_weight_mode` on each output by passing a dictionary or a list of modes. weighted_metrics: List of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing. target_tensors: By default, Keras will create placeholders for the model's target, which will be fed with the target data during training. If instead you would like to use your own target tensors (in turn, Keras will not expect external Numpy data for these targets at training time), you can specify them via the `target_tensors` argument. It can be a single tensor (for a single-output model), a list of tensors, or a dict mapping output names to target tensors. distribute: The DistributionStrategy instance that we want to use to distribute the training of the model. **kwargs: These arguments are passed to `tf.Session.run`. Raises: ValueError: In case of invalid arguments for `optimizer`, `loss`, `metrics` or `sample_weight_mode`. """ # Validate that arguments passed by the user to `compile` are supported by # DistributionStrategy. if distribute and not isinstance( optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)): raise NotImplementedError('Only TF native optimizers are supported with ' 'DistributionStrategy.') if distribute and context.executing_eagerly(): raise NotImplementedError('DistributionStrategy is not supported in ' 'Eager mode.') if distribute and sample_weight_mode: raise NotImplementedError('sample_weight_mode is not supported with ' 'DistributionStrategy.') if distribute and weighted_metrics: raise NotImplementedError('weighted_metrics is not supported with ' 'DistributionStrategy.') if distribute and target_tensors: raise ValueError('target_tensors is not supported with ' 'DistributionStrategy.') loss = loss or {} if context.executing_eagerly() and not isinstance( optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)): raise ValueError('Only TF native optimizers are supported in Eager mode.') self.optimizer = optimizers.get(optimizer) # We've disabled automatic dependency tracking for this method, but do want # to add a checkpoint dependency on the optimizer if it's checkpointable. if isinstance(self.optimizer, checkpointable.CheckpointableBase): self._track_checkpointable( self.optimizer, name='optimizer', overwrite=True) self.loss = loss self.metrics = metrics or [] self.loss_weights = loss_weights self.sample_weight_mode = sample_weight_mode self.weighted_metrics = weighted_metrics if context.executing_eagerly() and target_tensors is not None: raise ValueError('target_tensors is not supported in Eager mode.') self.target_tensors = target_tensors # Set DistributionStrategy specific parameters. self._distribution_strategy = distribute if self._distribution_strategy is not None: self._grouped_model = self._compile_distributed_model( self._distribution_strategy) with self._distribution_strategy.scope(): first_replicated_model = self._distribution_strategy.unwrap( self._grouped_model)[0] # If the specified metrics in `compile` are stateful, raise an error # since we currently don't support stateful metrics. if first_replicated_model.stateful_metric_names: raise NotImplementedError('Stateful metrics are not supported with ' 'DistributionStrategy.') # We initialize the callback model with the first replicated model. self._replicated_model = DistributedCallbackModel(first_replicated_model) self._replicated_model.set_original_model(self) if not self.built: # Model is not compilable because it does not know its number of inputs # and outputs, nor their shapes and names. We will compile after the first # time the model gets called on training data. return self._is_compiled = True # Prepare loss functions. if isinstance(loss, dict): for name in loss: if name not in self.output_names: raise ValueError( 'Unknown entry in loss ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) loss_functions = [] for name in self.output_names: if name not in loss: logging.warning( 'Output "' + name + '" missing from loss dictionary. We assume ' 'this was done on purpose. The fit and evaluate APIs will not be ' 'expecting any data to be passed to "' + name + '".') loss_functions.append(losses.get(loss.get(name))) elif isinstance(loss, list): if len(loss) != len(self.outputs): raise ValueError('When passing a list as loss, ' 'it should have one entry per model outputs. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss=' + str(loss)) loss_functions = [losses.get(l) for l in loss] else: loss_function = losses.get(loss) loss_functions = [loss_function for _ in range(len(self.outputs))] self.loss_functions = loss_functions weighted_losses = [training_utils.weighted_masked_objective(fn) for fn in loss_functions] skip_target_indices = [] skip_target_weighing_indices = [] self._feed_outputs = [] self._feed_output_names = [] self._feed_output_shapes = [] self._feed_loss_fns = [] for i in range(len(weighted_losses)): if weighted_losses[i] is None: skip_target_indices.append(i) skip_target_weighing_indices.append(i) # Prepare output masks. if not context.executing_eagerly(): masks = [getattr(x, '_keras_mask', None) for x in self.outputs] if not isinstance(masks, list): masks = [masks] # Prepare loss weights. if loss_weights is None: loss_weights_list = [1. for _ in range(len(self.outputs))] elif isinstance(loss_weights, dict): for name in loss_weights: if name not in self.output_names: raise ValueError( 'Unknown entry in loss_weights ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) loss_weights_list = [] for name in self.output_names: loss_weights_list.append(loss_weights.get(name, 1.)) elif isinstance(loss_weights, list): if len(loss_weights) != len(self.outputs): raise ValueError( 'When passing a list as loss_weights, ' 'it should have one entry per model output. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss_weights=' + str(loss_weights)) loss_weights_list = loss_weights else: raise TypeError('Could not interpret loss_weights argument: ' + str(loss_weights) + ' - expected a list of dicts.') self.loss_weights_list = loss_weights_list # Initialize model metric attributes. self.metrics_names = ['loss'] self.metrics_tensors = [] self.metrics_updates = [] self.stateful_metric_names = [] self.stateful_metric_functions = [] # Nested metrics is a list of list of metrics. # One list per output of the model. self.nested_metrics = training_utils.collect_metrics( metrics, self.output_names) self.nested_weighted_metrics = training_utils.collect_metrics( weighted_metrics, self.output_names) # Initialization for Eager mode execution. if context.executing_eagerly(): # Prepare sample weights. self._set_sample_weight_attributes(sample_weight_mode, skip_target_weighing_indices) if target_tensors is not None: raise ValueError('target_tensors are not currently supported in Eager ' 'mode.') self.total_loss = None for i in range(len(self.outputs)): if len(self.outputs) > 1: self.metrics_names.append(self.output_names[i] + '_loss') # Set metric attributes on model. self._handle_metrics( self.outputs, skip_target_indices=skip_target_indices, sample_weights=self.sample_weights) self.targets = [] for i in range(len(self.outputs)): self._feed_output_names.append(self.output_names[i]) self._collected_trainable_weights = self.trainable_weights return # Prepare targets of model. self.targets = [] self._feed_targets = [] if target_tensors not in (None, []): if isinstance(target_tensors, list): if len(target_tensors) != len(self.outputs): raise ValueError( 'When passing a list as `target_tensors`, ' 'it should have one entry per model output. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed target_tensors=' + str(target_tensors)) elif isinstance(target_tensors, dict): for name in target_tensors: if name not in self.output_names: raise ValueError( 'Unknown entry in `target_tensors` ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) tmp_target_tensors = [] for name in self.output_names: tmp_target_tensors.append(target_tensors.get(name, None)) target_tensors = tmp_target_tensors else: raise TypeError('Expected `target_tensors` to be ' 'a list or dict, but got:', target_tensors) for i in range(len(self.outputs)): if i in skip_target_indices: self.targets.append(None) else: shape = K.int_shape(self.outputs[i]) name = self.output_names[i] if target_tensors not in (None, []): target = target_tensors[i] else: target = None if target is None or K.is_placeholder(target): if target is None: target = K.placeholder( ndim=len(shape), name=name + '_target', sparse=K.is_sparse(self.outputs[i]), dtype=K.dtype(self.outputs[i])) self._feed_targets.append(target) self._feed_outputs.append(self.outputs[i]) self._feed_output_names.append(name) self._feed_output_shapes.append(shape) self._feed_loss_fns.append(self.loss_functions[i]) else: skip_target_weighing_indices.append(i) self.targets.append(target) # Prepare sample weights. self._set_sample_weight_attributes(sample_weight_mode, skip_target_weighing_indices) # Compute total loss. total_loss = None with K.name_scope('loss'): for i in range(len(self.outputs)): if i in skip_target_indices: continue y_true = self.targets[i] y_pred = self.outputs[i] weighted_loss = weighted_losses[i] sample_weight = self.sample_weights[i] mask = masks[i] loss_weight = loss_weights_list[i] with K.name_scope(self.output_names[i] + '_loss'): output_loss = weighted_loss(y_true, y_pred, sample_weight, mask) if len(self.outputs) > 1: self.metrics_tensors.append(output_loss) self.metrics_names.append(self.output_names[i] + '_loss') if total_loss is None: total_loss = loss_weight * output_loss else: total_loss += loss_weight * output_loss if total_loss is None: if not self.losses: raise ValueError('The model cannot be compiled ' 'because it has no loss to optimize.') else: total_loss = 0. # Add regularization penalties # and other layer-specific losses. for loss_tensor in self.losses: total_loss += loss_tensor # Invoke metric functions for all the outputs. self._handle_metrics( self.outputs, masks=masks, targets=self.targets, skip_target_indices=skip_target_indices, sample_weights=self.sample_weights) # Prepare gradient updates and state updates. self.total_loss = total_loss # Functions for train, test and predict will # be compiled lazily when required. # This saves time when the user is not using all functions. self._function_kwargs = kwargs self.train_function = None self.test_function = None self.predict_function = None # Collected trainable weights, sorted in topological order. trainable_weights = self.trainable_weights self._collected_trainable_weights = trainable_weights def _compile_distributed_model(self, distribution_strategy): # TODO(anjalisridhar): Can we move the clone_and_build_model to outside the # model? def _clone_model_per_tower(model): new_model = training_distributed.clone_and_build_model(model) return new_model with distribution_strategy.scope(): # Create a copy of this model on each of the devices. grouped_models = distribution_strategy.call_for_each_tower( _clone_model_per_tower, self) return grouped_models def _check_trainable_weights_consistency(self): """Check trainable weights count consistency. This will raise a warning if `trainable_weights` and `_collected_trainable_weights` are inconsistent (i.e. have different number of parameters). Inconsistency will typically arise when one modifies `model.trainable` without calling `model.compile` again. """ if not hasattr(self, '_collected_trainable_weights'): return if len(self.trainable_weights) != len(self._collected_trainable_weights): logging.warning( UserWarning( 'Discrepancy between trainable weights and collected trainable' ' weights, did you set `model.trainable` without calling' ' `model.compile` after ?')) def _make_train_function(self): if not hasattr(self, 'train_function'): raise RuntimeError('You must compile your model before using it.') self._check_trainable_weights_consistency() if self.train_function is None: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] with K.name_scope('training'): with K.name_scope(self.optimizer.__class__.__name__): # Training updates updates = self.optimizer.get_updates( params=self._collected_trainable_weights, loss=self.total_loss) # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self.inputs) # Stateful metrics updates updates += self.metrics_updates # Gets loss and metrics. Updates weights at each call. self.train_function = K.function( inputs, [self.total_loss] + self.metrics_tensors, updates=updates, name='train_function', **self._function_kwargs) def _make_test_function(self): if not hasattr(self, 'test_function'): raise RuntimeError('You must compile your model before using it.') if self.test_function is None: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] # Return loss and metrics, no gradient updates. # Does update the network states. self.test_function = K.function( inputs, [self.total_loss] + self.metrics_tensors, updates=self.state_updates + self.metrics_updates, name='test_function', **self._function_kwargs) def _make_predict_function(self): if not hasattr(self, 'predict_function'): self.predict_function = None if self.predict_function is None: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs = self._feed_inputs + [K.learning_phase()] else: inputs = self._feed_inputs # Gets network outputs. Does not update weights. # Does update the network states. kwargs = getattr(self, '_function_kwargs', {}) self.predict_function = K.function( inputs, self.outputs, updates=self.state_updates, name='predict_function', **kwargs) def _get_iterator_get_next_tensors(self, iterator): get_next_op = self._iterator_get_next.get(iterator, None) if get_next_op is None: get_next_op = iterator.get_next() self._iterator_get_next[iterator] = get_next_op return get_next_op def _distribution_standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, check_steps=False, steps_name='steps', steps=None, validation_split=0): """Runs validation checks on input and target data passed by the user. This is called when using DistributionStrategy to train, evaluate or serve the model. Args: x: Input data. A `tf.data` dataset. y: Since `x` is a dataset, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. check_steps: boolean, True if we want to check for validity of `steps` and False, otherwise. steps_name: The public API's parameter name for `steps`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Returns: A tuple of 3 lists: input arrays, target arrays, sample-weight arrays. If the model's input and targets are symbolic, these lists are empty (since the model takes no user-provided data, instead the data comes from the symbolic inputs/targets). Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if sample_weight is not None and sample_weight.all(): raise NotImplementedError('sample_weight is currently not supported when ' 'using DistributionStrategy.') if class_weight: raise NotImplementedError('class_weight is currently not supported when ' 'using DistributionStrategy.') # TODO(anjalisridhar): Can we use the iterator and getnext op cache? # We require users to pass Datasets since we distribute the dataset across # multiple devices. if not isinstance(x, dataset_ops.Dataset): raise ValueError('When using DistributionStrategy you must specify a ' 'Dataset object instead of a %s.' % type(x)) # TODO(anjalisridhar): We want distribute_dataset() to accept a Dataset or a # function which returns a Dataset. Currently distribute_dataset() only # accepts a function that returns a Dataset. Once we add support for being # able to clone a Dataset on multiple workers we can remove this lambda. result = self._distribution_strategy.distribute_dataset(lambda: x) iterator = result.make_initializable_iterator() K.get_session().run(iterator.initializer) # Validates `steps` argument based on x's type. if check_steps: if steps is None: raise ValueError('When using a Dataset instance as input to a model, ' 'you should specify the `{steps_name}` argument.' .format(steps_name=steps_name)) training_utils.validate_iterator_input(x, y, sample_weight, validation_split) # x an y may be PerDevice objects with an input and output tensor # corresponding to each device. For example, x could be # PerDevice:{device: get_next tensor,...}. next_element = iterator.get_next() if not isinstance(next_element, (list, tuple)) or len(next_element) != 2: raise ValueError('Please provide data as a list or tuple of 2 elements ' ' - input and target pair. Received %s' % next_element) x, y = next_element # Validate that all the elements in x and y are of the same type and shape. # We can then pass the first element of x and y to `_standardize_weights` # below and be confident of the output. We need to reopen the scope since # we unwrap values when we validate x and y. with self._distribution_strategy.scope(): x_values, y_values = distributed_training_utils.\ validate_distributed_dataset_inputs(self._distribution_strategy, x, y) _, _, sample_weights = self._standardize_weights(x_values, y_values, sample_weight, class_weight, batch_size) return x, y, sample_weights def _standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, check_steps=False, steps_name='steps', steps=None, validation_split=0): """Runs validation checks on input and target data passed by the user. Also standardizes the data to lists of arrays, in order. Also builds and compiles the model on the fly if it is a subclassed model that has never been called before (and thus has no inputs/outputs). This is a purely internal method, subject to refactoring at any time. Args: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset or a dataset iterator. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset or a dataset iterator, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: An optional sample-weight array passed by the user to weight the importance of each sample in `x`. class_weight: An optional class-weight array by the user to weight the importance of samples in `x` based on the class they belong to, as conveyed by `y`. batch_size: Integer batch size. If provided, it is used to run additional validation checks on stateful models. check_steps: boolean, True if we want to check for validity of `steps` and False, otherwise. For example, when we are standardizing one batch of data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps` value is not required and we should not check for its validity in these cases. steps_name: The public API's parameter name for `steps`. steps: Integer or `None`. Total number of steps (batches of samples) to execute. validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. Returns: A tuple of 3 lists: input arrays, target arrays, sample-weight arrays. If the model's input and targets are symbolic, these lists are empty (since the model takes no user-provided data, instead the data comes from the symbolic inputs/targets). Raises: ValueError: In case of invalid user-provided data. RuntimeError: If the model was never compiled. """ if self._distribution_strategy: return self._distribution_standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, check_steps=check_steps, steps_name=steps_name, steps=steps, validation_split=validation_split) if isinstance(x, dataset_ops.Dataset): if context.executing_eagerly(): x = x.make_one_shot_iterator() else: if x in self._dataset_iterator_cache: x = self._dataset_iterator_cache[x] else: iterator = x.make_initializable_iterator() self._dataset_iterator_cache[x] = iterator x = iterator K.get_session().run(x.initializer) # Validates `steps` argument based on x's type. if check_steps: training_utils.check_steps_argument(x, steps, steps_name) is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator) is_x_iterator = isinstance(x, iterator_ops.Iterator) # Validate user inputs when data is given as a dataset or dataset iterator. if is_x_iterator or is_x_eager_iterator: training_utils.validate_iterator_input(x, y, sample_weight, validation_split) # For eager iterators, when we have to process multiple batches of samples, # we will standardize the data when we actually loop over iterator and get # the batches. For now, we just return the iterator as is. if is_x_eager_iterator and steps is not None: return x, y, sample_weight # If input data is a dataset iterator in graph mode or if it is an eager # iterator and only one batch of samples is required, we fetch the data # tensors from the iterator and then standardize them. if is_x_iterator or is_x_eager_iterator: try: if is_x_iterator: next_element = self._get_iterator_get_next_tensors(x) else: next_element = x.get_next() except errors.OutOfRangeError: raise RuntimeError('Your dataset iterator ran out of data; ' 'Make sure that your dataset can generate ' 'required number of samples.') if not isinstance(next_element, (list, tuple)) or len(next_element) != 2: raise ValueError('Please provide data as a list or tuple of 2 elements ' ' - input and target pair. Received %s' % next_element) x, y = next_element x, y, sample_weights = self._standardize_weights(x, y, sample_weight, class_weight, batch_size) return x, y, sample_weights def _standardize_weights(self, x, y, sample_weight=None, class_weight=None, batch_size=None,): # First, we build/compile the model on the fly if necessary. all_inputs = [] is_build_called = False is_compile_called = False if not self.inputs: # We need to use `x` to set the model inputs. # We type-check that `x` and `y` are either single arrays # or lists of arrays. if isinstance(x, (list, tuple)): if not all(isinstance(v, np.ndarray) or tensor_util.is_tensor(v) for v in x): raise ValueError('Please provide as model inputs either a single ' 'array or a list of arrays. You passed: x=' + str(x)) all_inputs += list(x) elif isinstance(x, dict): raise ValueError('Please do not pass a dictionary as model inputs.') else: if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x): raise ValueError('Please provide as model inputs either a single ' 'array or a list of arrays. You passed: x=' + str(x)) all_inputs.append(x) # Build the model using the retrieved inputs (value or symbolic). # If values, then in symbolic-mode placeholders will be created # to match the value shapes. if not self.inputs: is_build_called = True self._set_inputs(x) if y is not None: if not self.optimizer: raise RuntimeError('You must compile a model before ' 'training/testing. ' 'Use `model.compile(optimizer, loss)`.') if not self._is_compiled: # On-the-fly compilation of the model. # We need to use `y` to set the model targets. if isinstance(y, (list, tuple)): if not all(isinstance(v, np.ndarray) or tensor_util.is_tensor(v) for v in y): raise ValueError('Please provide as model targets either a single ' 'array or a list of arrays. ' 'You passed: y=' + str(y)) all_inputs += list(y) elif isinstance(y, dict): raise ValueError('Please do not pass a dictionary as model targets.') else: if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y): raise ValueError('Please provide as model targets either a single ' 'array or a list of arrays. ' 'You passed: y=' + str(y)) all_inputs.append(y) # Typecheck that all inputs are *either* value *or* symbolic. # TODO(fchollet): this check could be removed in Eager mode? if any(tensor_util.is_tensor(v) for v in all_inputs): if not all(tensor_util.is_tensor(v) for v in all_inputs): raise ValueError('Do not pass inputs that mix Numpy arrays and ' 'TensorFlow tensors. ' 'You passed: x=' + str(x) + '; y=' + str(y)) if context.executing_eagerly(): target_tensors = None else: # Handle target tensors if any passed. if not isinstance(y, (list, tuple)): y = [y] target_tensors = [v for v in y if tensor_util.is_tensor(v)] is_compile_called = True self.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics, loss_weights=self.loss_weights, target_tensors=target_tensors) # In graph mode, if we had just set inputs and targets as symbolic tensors # by invoking build and compile on the model respectively, we do not have to # feed anything to the model. Model already has input and target data as # part of the graph. # Note: in this case, `any` and `all` are equivalent since we disallow # mixed symbolic/value inputs. if (not context.executing_eagerly() and is_build_called and is_compile_called and any(tensor_util.is_tensor(v) for v in all_inputs)): return [], [], [] # What follows is input validation and standardization to list format, # in the case where all inputs are value arrays. if context.executing_eagerly(): # In eager mode, do not do shape validation # since the network has no input nodes (placeholders) to be fed. feed_input_names = self.input_names feed_input_shapes = None elif not self._is_graph_network: # Case: symbolic-mode subclassed network. Do not do shape validation. feed_input_names = self._feed_input_names feed_input_shapes = None else: # Case: symbolic-mode graph network. # In this case, we run extensive shape validation checks. feed_input_names = self._feed_input_names feed_input_shapes = self._feed_input_shapes # Standardize the inputs. x = training_utils.standardize_input_data( x, feed_input_names, feed_input_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='input') if y is not None: if not self._is_graph_network: feed_output_names = self._feed_output_names feed_output_shapes = None # Sample weighting not supported in this case. # TODO(fchollet): consider supporting it. feed_sample_weight_modes = [None for _ in self.outputs] else: feed_output_names = self._feed_output_names feed_sample_weight_modes = self._feed_sample_weight_modes feed_output_shapes = [] for output_shape, loss_fn in zip(self._feed_output_shapes, self._feed_loss_fns): if loss_fn is losses.sparse_categorical_crossentropy: if K.image_data_format() == 'channels_first': feed_output_shapes.append( (output_shape[0], 1) + output_shape[2:]) else: feed_output_shapes.append(output_shape[:-1] + (1,)) elif (not hasattr(loss_fn, '__name__') or getattr(losses, loss_fn.__name__, None) is None): # If `loss_fn` is not a function (e.g. callable class) # or if it not in the `losses` module, then # it is a user-defined loss and we make no assumptions # about it. feed_output_shapes.append(None) else: feed_output_shapes.append(output_shape) # Standardize the outputs. y = training_utils.standardize_input_data( y, feed_output_names, feed_output_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='target') # Generate sample-wise weight values given the `sample_weight` and # `class_weight` arguments. sample_weights = training_utils.standardize_sample_weights( sample_weight, feed_output_names) class_weights = training_utils.standardize_class_weights( class_weight, feed_output_names) sample_weights = [ training_utils.standardize_weights(ref, sw, cw, mode) for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights, feed_sample_weight_modes) ] # Check that all arrays have the same length. if not self._distribution_strategy: training_utils.check_array_lengths(x, y, sample_weights) if self._is_graph_network and not context.executing_eagerly(): # Additional checks to avoid users mistakenly using improper loss fns. training_utils.check_loss_and_target_compatibility( y, self._feed_loss_fns, feed_output_shapes) else: y = [] sample_weights = [] if self.stateful and batch_size: # Check that for stateful networks, number of samples is a multiple # of the static batch size. if x[0].shape[0] % batch_size != 0: raise ValueError('In a stateful network, ' 'you should only pass inputs with ' 'a number of samples that can be ' 'divided by the batch size. Found: ' + str(x[0].shape[0]) + ' samples') return x, y, sample_weights @checkpointable.no_automatic_dependency_tracking def _set_inputs(self, inputs, training=None): """Set model's input and output specs based on the input data received. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. Args: inputs: Single array, or list of arrays. The arrays could be placeholders, Numpy arrays, or data tensors. - if placeholders: the model is built on top of these placeholders, and we expect Numpy data to be fed for them when calling `fit`/etc. - if Numpy data: we create placeholders matching the shape of the Numpy arrays. We expect Numpy data to be fed for these placeholders when calling `fit`/etc. - if data tensors: the model is built on top of these tensors. We do not expect any Numpy data to be provided when calling `fit`/etc. training: Boolean or None. Only relevant in symbolic mode. Specifies whether to build the model's graph in inference mode (False), training mode (True), or using the Keras learning phase (None). """ call_convention = getattr( self, '_call_convention', base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT) if call_convention not in ( base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT, base_layer.CallConvention.SINGLE_POSITIONAL_ARGUMENT): raise NotImplementedError( 'Subclassed Models without "inputs" (or single positional arguments) ' 'in their call() signatures do not yet support shape inference. File ' 'a feature request if this limitation bothers you.') if self.__class__.__name__ == 'Sequential': if tensor_util.is_tensor(inputs): input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:]) self.build(input_shape=input_shape) else: input_shape = (None,) + inputs.shape[1:] self.build(input_shape=input_shape) if context.executing_eagerly(): self._eager_set_inputs(inputs) else: self._symbolic_set_inputs(inputs, training=training) @checkpointable.no_automatic_dependency_tracking def _eager_set_inputs(self, inputs): """Set model's input and output specs based on the input data received. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. We assume the number and ndim of outputs does not change over different calls. Args: inputs: Argument `x` (input data) passed by the user upon first model use. Raises: ValueError: If the model's inputs are already set. """ assert context.executing_eagerly() if self.inputs: raise ValueError('Model inputs are already set.') # On-the-fly setting of model inputs/outputs as DeferredTensors, # to keep track of number of inputs and outputs and their ndim. if isinstance(inputs, (list, tuple)): if tensor_util.is_tensor(inputs[0]): dummy_output_values = self.call( training_utils.cast_if_floating_dtype(inputs)) else: dummy_output_values = self.call( [ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs]) dummy_input_values = list(inputs) else: if tensor_util.is_tensor(inputs): dummy_output_values = self.call( training_utils.cast_if_floating_dtype(inputs)) else: dummy_output_values = self.call( ops.convert_to_tensor(inputs, dtype=K.floatx())) dummy_input_values = [inputs] if isinstance(dummy_output_values, (list, tuple)): dummy_output_values = list(dummy_output_values) else: dummy_output_values = [dummy_output_values] self.outputs = [ base_layer.DeferredTensor(shape=(None for _ in v.shape), dtype=v.dtype) for v in dummy_output_values] self.inputs = [ base_layer.DeferredTensor(shape=(None for _ in v.shape), dtype=v.dtype) for v in dummy_input_values] self.input_names = [ 'input_%d' % (i + 1) for i in range(len(dummy_input_values))] self.output_names = [ 'output_%d' % (i + 1) for i in range(len(dummy_output_values))] self.built = True @checkpointable.no_automatic_dependency_tracking def _symbolic_set_inputs(self, inputs, outputs=None, training=None): """Set model's inputs and output specs based. This is to be used for Model subclasses, which do not know at instantiation time what their inputs look like. Args: inputs: Argument `x` (input data) passed by the user upon first model use. outputs: None, a data tensor, or a list of data tensors. If None, the outputs will be determined by invoking self.call(), otherwise the provided value will be used. training: Boolean or None. Only relevant in symbolic mode. Specifies whether to build the model's graph in inference mode (False), training mode (True), or using the Keras learning phase (None). Raises: ValueError: If the model's inputs are already set. """ assert not context.executing_eagerly() if self.inputs: raise ValueError('Model inputs are already set.') # On-the-fly setting of symbolic model inputs (either by using the tensor # provided, or by creating a placeholder if Numpy data was provided). self.inputs = [] self.input_names = [] self._feed_inputs = [] self._feed_input_names = [] self._feed_input_shapes = [] if isinstance(inputs, (list, tuple)): inputs = list(inputs) else: inputs = [inputs] for i, v in enumerate(inputs): name = 'input_%d' % (i + 1) self.input_names.append(name) if isinstance(v, list): v = np.asarray(v) if v.ndim == 1: v = np.expand_dims(v, 1) if isinstance(v, (np.ndarray)): # We fix the placeholder shape except the batch size. # This is suboptimal, but it is the best we can do with the info # we have. The user should call `model._set_inputs(placeholders)` # to specify custom placeholders if the need arises. shape = (None,) + v.shape[1:] placeholder = K.placeholder(shape=shape, name=name) self.inputs.append(placeholder) self._feed_inputs.append(placeholder) self._feed_input_names.append(name) self._feed_input_shapes.append(shape) else: # Assumed tensor - TODO(fchollet) additional type check? self.inputs.append(v) if K.is_placeholder(v): self._feed_inputs.append(v) self._feed_input_names.append(name) self._feed_input_shapes.append(K.int_shape(v)) if outputs is None: # Obtain symbolic outputs by calling the model. if len(self.inputs) == 1: if self._expects_training_arg: outputs = self.call(self.inputs[0], training=training) else: outputs = self.call(self.inputs[0]) else: if self._expects_training_arg: outputs = self.call(self.inputs, training=training) else: outputs = self.call(self.inputs) if isinstance(outputs, (list, tuple)): outputs = list(outputs) else: outputs = [outputs] self.outputs = outputs self.output_names = [ 'output_%d' % (i + 1) for i in range(len(self.outputs))] self.built = True def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, **kwargs): """Trains the model for a fixed number of epochs (iterations on a dataset). Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset or a dataset iterator. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset or dataset iterator, `y` should not be specified (since targets will be obtained from the iterator). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` if your data is in the form of symbolic tensors, datasets, or dataset iterators (since they generate batches). epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire `x` and `y` data provided. Note that in conjunction with `initial_epoch`, `epochs` is to be understood as "final epoch". The model is not trained for a number of iterations given by `epochs`, but merely until the epoch of index `epochs` is reached. verbose: Integer. 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. callbacks: List of `keras.callbacks.Callback` instances. List of callbacks to apply during training. See [callbacks](/api_docs/python/tf/keras/callbacks). validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data. The model will set apart this fraction of the training data, will not train on it, and will evaluate the loss and any model metrics on this data at the end of each epoch. The validation data is selected from the last samples in the `x` and `y` data provided, before shuffling. This argument is not supported when `x` is a dataset or a dataset iterator. validation_data: Data on which to evaluate the loss and any model metrics at the end of each epoch. The model will not be trained on this data. `validation_data` will override `validation_split`. `validation_data` could be: - tuple `(x_val, y_val)` of Numpy arrays or tensors - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays - dataset or a dataset iterator shuffle: Boolean (whether to shuffle the training data before each epoch) or str (for 'batch'). 'batch' is a special option for dealing with the limitations of HDF5 data; it shuffles in batch-sized chunks. Has no effect when `steps_per_epoch` is not `None`. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only). This can be useful to tell the model to "pay more attention" to samples from an under-represented class. sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only). You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset or a dataset iterator. initial_epoch: Integer. Epoch at which to start training (useful for resuming a previous training run). steps_per_epoch: Integer or `None`. Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default `None` is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined. validation_steps: Only relevant if `steps_per_epoch` is specified. Total number of steps (batches of samples) to validate before stopping. **kwargs: Used for backwards compatibility. Returns: A `History` object. Its `History.history` attribute is a record of training loss values and metrics values at successive epochs, as well as validation loss values and validation metrics values (if applicable). Raises: RuntimeError: If the model was never compiled. ValueError: In case of mismatch between the provided input data and what the model expects. """ # TODO(fchollet): this method may be creating reference cycles, which would # lead to accumulating garbage in memory when called in a loop. Investigate. # Backwards compatibility if batch_size is None and steps_per_epoch is None: batch_size = 32 # Legacy support if 'nb_epoch' in kwargs: logging.warning( 'The `nb_epoch` argument in `fit` ' 'has been renamed `epochs`.') epochs = kwargs.pop('nb_epoch') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) # Validate and standardize user data. if self._distribution_strategy: distributed_training_utils.validate_callbacks(callbacks) x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, check_steps=True, steps_name='steps_per_epoch', steps=steps_per_epoch, validation_split=validation_split) # Prepare validation data. if validation_data: if (isinstance(validation_data, iterator_ops.Iterator) or isinstance(validation_data, iterator_ops.EagerIterator) or isinstance(validation_data, dataset_ops.Dataset)): val_x = validation_data val_y = None val_sample_weight = None elif len(validation_data) == 2: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None elif len(validation_data) == 3: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence else: raise ValueError( 'When passing a `validation_data` argument, ' 'it must contain either 2 items (x_val, y_val), ' 'or 3 items (x_val, y_val, val_sample_weights), ' 'or alternatively it could be a dataset or a ' 'dataset or a dataset iterator. ' 'However we received `validation_data=%s`' % validation_data) # Validate and standardize validation data. val_x, val_y, val_sample_weights = self._standardize_user_data( val_x, val_y, sample_weight=val_sample_weight, batch_size=batch_size, steps=validation_steps) elif validation_split and 0. < validation_split < 1.: if training_utils.has_symbolic_tensors(x): raise ValueError('If your data is in the form of symbolic tensors, ' 'you cannot use `validation_split`.') if hasattr(x[0], 'shape'): split_at = int(x[0].shape[0] * (1. - validation_split)) else: split_at = int(len(x[0]) * (1. - validation_split)) x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at)) y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at)) sample_weights, val_sample_weights = (slice_arrays( sample_weights, 0, split_at), slice_arrays(sample_weights, split_at)) elif validation_steps: val_x = [] val_y = [] val_sample_weights = [] else: val_x = None val_y = None val_sample_weights = None if context.executing_eagerly(): return training_eager.fit_loop( self, inputs=x, targets=y, sample_weights=sample_weights, class_weight=class_weight, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) elif self._distribution_strategy: return training_distributed.fit_loop( self, x, y, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) else: return training_arrays.fit_loop( self, x, y, sample_weights=sample_weights, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None): """Returns the loss value & metrics values for the model in test mode. Computation is done in batches. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset or a dataset iterator. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset or a dataset iterator, `y` should not be specified (since targets will be obtained from the iterator/dataset). batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` is your data is in the form of symbolic tensors, datasets, or dataset iterators (since they generate batches). verbose: 0 or 1. Verbosity mode. 0 = silent, 1 = progress bar. sample_weight: Optional Numpy array of weights for the test samples, used for weighting the loss function. You can either pass a flat (1D) Numpy array with the same length as the input samples (1:1 mapping between weights and samples), or in the case of temporal data, you can pass a 2D array with shape `(samples, sequence_length)`, to apply a different weight to every timestep of every sample. In this case you should make sure to specify `sample_weight_mode="temporal"` in `compile()`. This argument is not supported when `x` is a dataset or a dataset iterator. steps: Integer or `None`. Total number of steps (batches of samples) before declaring the evaluation round finished. Ignored with the default value of `None`. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. """ # Backwards compatibility. if batch_size is None and steps is None: batch_size = 32 # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, batch_size=batch_size, check_steps=True, steps_name='steps', steps=steps) if context.executing_eagerly(): return training_eager.test_loop( self, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps) elif self._distribution_strategy: return training_distributed.test_loop( self, inputs=x, targets=y, verbose=verbose, steps=steps) else: return training_arrays.test_loop( self, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps) def predict(self, x, batch_size=None, verbose=0, steps=None): """Generates output predictions for the input samples. Computation is done in batches. Arguments: x: Input samples. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset or a dataset iterator. batch_size: Integer or `None`. Number of samples per gradient update. If unspecified, `batch_size` will default to 32. Do not specify the `batch_size` is your data is in the form of symbolic tensors, dataset, or dataset iterators (since they generate batches). verbose: Verbosity mode, 0 or 1. steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of `None`. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between the provided input data and the model's expectations, or in case a stateful model receives a number of samples that is not a multiple of the batch size. """ # Backwards compatibility. if batch_size is None and steps is None: batch_size = 32 # Validate and standardize user data. x, _, _ = self._standardize_user_data( x, check_steps=True, steps_name='steps', steps=steps) if context.executing_eagerly(): return training_eager.predict_loop( self, x, batch_size=batch_size, verbose=verbose, steps=steps) elif self._distribution_strategy: return training_distributed.predict_loop( self, x, verbose=verbose, steps=steps) else: return training_arrays.predict_loop( self, x, batch_size=batch_size, verbose=verbose, steps=steps) def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None): """Runs a single gradient update on a single batch of data. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset or a dataset iterator. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset or a dataset iterator, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset or a dataset iterator. class_weight: Optional dictionary mapping class indices (integers) to a weight (float) to apply to the model's loss for the samples from this class during training. This can be useful to tell the model to "pay more attention" to samples from an under-represented class. Returns: Scalar training loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ if self._distribution_strategy: raise NotImplementedError('`train_on_batch` is not supported for models ' 'compiled with DistributionStrategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight) if context.executing_eagerly(): outputs = training_eager.train_on_batch( self, x, y, sample_weights=sample_weights) else: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = x + y + sample_weights + [1] else: ins = x + y + sample_weights self._make_train_function() outputs = self.train_function(ins) if len(outputs) == 1: return outputs[0] return outputs def test_on_batch(self, x, y=None, sample_weight=None): """Test the model on a single batch of samples. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A dict mapping input names to the corresponding array/tensors, if the model has named inputs. - A `tf.data` dataset or a dataset iterator. y: Target data. Like the input data `x`, it could be either Numpy array(s) or TensorFlow tensor(s). It should be consistent with `x` (you cannot have Numpy inputs and tensor targets, or inversely). If `x` is a dataset or a dataset iterator, `y` should not be specified (since targets will be obtained from the iterator). sample_weight: Optional array of the same length as x, containing weights to apply to the model's loss for each sample. In the case of temporal data, you can pass a 2D array with shape (samples, sequence_length), to apply a different weight to every timestep of every sample. In this case you should make sure to specify sample_weight_mode="temporal" in compile(). This argument is not supported when `x` is a dataset or a dataset iterator. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: In case of invalid user-provided arguments. """ if self._distribution_strategy: raise NotImplementedError('`test_on_batch` is not supported for models ' 'compiled with DistributionStrategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight) if context.executing_eagerly(): outputs = training_eager.test_on_batch( self, x, y, sample_weights=sample_weights) else: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = x + y + sample_weights + [0] else: ins = x + y + sample_weights self._make_test_function() outputs = self.test_function(ins) if len(outputs) == 1: return outputs[0] return outputs def predict_on_batch(self, x): """Returns predictions for a single batch of samples. Arguments: x: Input data. It could be: - A Numpy array (or array-like), or a list of arrays (in case the model has multiple inputs). - A TensorFlow tensor, or a list of tensors (in case the model has multiple inputs). - A `tf.data` dataset or a dataset iterator. Returns: Numpy array(s) of predictions. Raises: ValueError: In case of mismatch between given number of inputs and expectations of the model. """ if self._distribution_strategy: raise NotImplementedError('`predict_on_batch` is not supported for ' 'models compiled with DistributionStrategy.') # Validate and standardize user data. inputs, _, _ = self._standardize_user_data(x) if context.executing_eagerly(): if (isinstance(x, iterator_ops.EagerIterator) or (isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())): inputs = training_utils.cast_if_floating_dtype(inputs) else: inputs = [ ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs ] return self(inputs) # pylint: disable=not-callable if not context.executing_eagerly(): if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + [0] else: ins = inputs self._make_predict_function() outputs = self.predict_function(ins) if len(outputs) == 1: return outputs[0] return outputs def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): """Fits the model on data yielded batch-by-batch by a Python generator. The generator is run in parallel to the model, for efficiency. For instance, this allows you to do real-time data augmentation on images on CPU in parallel to training your model on GPU. The use of `keras.utils.Sequence` guarantees the ordering and guarantees the single use of every input per epoch when using `use_multiprocessing=True`. Arguments: generator: A generator or an instance of `Sequence` (`keras.utils.Sequence`) object in order to avoid duplicate data when using multiprocessing. The output of the generator must be either - a tuple `(inputs, targets)` - a tuple `(inputs, targets, sample_weights)`. This tuple (a single output of the generator) makes a single batch. Therefore, all arrays in this tuple must have the same length (equal to the size of this batch). Different batches may have different sizes. For example, the last batch of the epoch is commonly smaller than the others, if the size of the dataset is not divisible by the batch size. The generator is expected to loop over its data indefinitely. An epoch finishes when `steps_per_epoch` batches have been seen by the model. steps_per_epoch: Total number of steps (batches of samples) to yield from `generator` before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of samples of your dataset divided by the batch size. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. epochs: Integer, total number of iterations on the data. verbose: Verbosity mode, 0, 1, or 2. callbacks: List of callbacks to be called during training. validation_data: This can be either - a generator for the validation data - a tuple (inputs, targets) - a tuple (inputs, targets, sample_weights). validation_steps: Only relevant if `validation_data` is a generator. Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(validation_data)` as a number of steps. class_weight: Dictionary mapping class indices to a weight for the class. max_queue_size: Integer. Maximum size for the generator queue. If unspecified, `max_queue_size` will default to 10. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. shuffle: Boolean. Whether to shuffle the order of the batches at the beginning of each epoch. Only used with instances of `Sequence` (`keras.utils.Sequence`). Has no effect when `steps_per_epoch` is not `None`. initial_epoch: Epoch at which to start training (useful for resuming a previous training run) Returns: A `History` object. Example: ```python def generate_arrays_from_file(path): while 1: f = open(path) for line in f: # create numpy arrays of input data # and labels, from each line in the file x1, x2, y = process_line(line) yield ({'input_1': x1, 'input_2': x2}, {'output': y}) f.close() model.fit_generator(generate_arrays_from_file('/my_file.txt'), steps_per_epoch=10000, epochs=10) ``` Raises: ValueError: In case the generator yields data in an invalid format. """ if self._distribution_strategy: raise NotImplementedError('`fit_generator` is not supported for ' 'models compiled with DistributionStrategy.') if not self.built and not self._is_graph_network: raise NotImplementedError( '`fit_generator` is not yet enabled for unbuilt Model subclasses') return training_generator.fit_generator( self, generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch) def evaluate_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Evaluates the model on a data generator. The generator should return the same kind of data as accepted by `test_on_batch`. Arguments: generator: Generator yielding tuples (inputs, targets) or (inputs, targets, sample_weights) or an instance of Sequence (keras.utils.Sequence) object in order to avoid duplicate data when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. max_queue_size: maximum size for the generator queue workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. verbose: Verbosity mode, 0 or 1. Returns: Scalar test loss (if the model has a single output and no metrics) or list of scalars (if the model has multiple outputs and/or metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. Raises: ValueError: in case of invalid arguments. Raises: ValueError: In case the generator yields data in an invalid format. """ if self._distribution_strategy: raise NotImplementedError('`evaluate_generator` is not supported for ' 'models compiled with DistributionStrategy.') if not self.built and not self._is_graph_network: raise NotImplementedError( '`evaluate_generator` is not yet enabled for ' 'unbuilt Model subclasses') return training_generator.evaluate_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose) def predict_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): """Generates predictions for the input samples from a data generator. The generator should return the same kind of data as accepted by `predict_on_batch`. Arguments: generator: Generator yielding batches of input samples or an instance of Sequence (keras.utils.Sequence) object in order to avoid duplicate data when using multiprocessing. steps: Total number of steps (batches of samples) to yield from `generator` before stopping. Optional for `Sequence`: if unspecified, will use the `len(generator)` as a number of steps. max_queue_size: Maximum size for the generator queue. workers: Integer. Maximum number of processes to spin up when using process-based threading. If unspecified, `workers` will default to 1. If 0, will execute the generator on the main thread. use_multiprocessing: Boolean. If `True`, use process-based threading. If unspecified, `use_multiprocessing` will default to `False`. Note that because this implementation relies on multiprocessing, you should not pass non-picklable arguments to the generator as they can't be passed easily to children processes. verbose: verbosity mode, 0 or 1. Returns: Numpy array(s) of predictions. Raises: ValueError: In case the generator yields data in an invalid format. """ if self._distribution_strategy: raise NotImplementedError('`predict_generator` is not supported for ' 'models compiled with DistributionStrategy.') if not self.built and not self._is_graph_network: raise NotImplementedError( '`predict_generator` is not yet enabled for unbuilt Model subclasses') return training_generator.predict_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose) def _get_callback_model(self): """Returns the Callback Model for this Model.""" if hasattr(self, '_replicated_model') and self._replicated_model: # When using training_distributed, we set the callback model # to an instance of the `DistributedModel` that we create in # the `compile` call. The `DistributedModel` is initialized # with the first replicated model. We need to set the callback # model to a DistributedModel to allow us to override saving # and loading weights when we checkpoint the model during training. return self._replicated_model if hasattr(self, 'callback_model') and self.callback_model: return self.callback_model return self class DistributedCallbackModel(Model): """Model that is used for callbacks with DistributionStrategy.""" def __init__(self, model): super(DistributedCallbackModel, self).__init__() # TODO(anjalisridhar): Right now the only attributes set are the layer and # weights. We may need to set additional attributes as needed since we have # not called compile on this model. def set_original_model(self, orig_model): self._original_model = orig_model def save_weights(self, filepath, overwrite=True, save_format=None): self._replicated_model.save_weights(filepath, overwrite=overwrite, save_format=save_format) def save(self, filepath, overwrite=True, include_optimizer=True): # save weights from the distributed model to the original model distributed_model_weights = self.get_weights() self._original_model.set_weights(distributed_model_weights) # TODO(anjalisridhar): Do we need to save the original model here? # Saving the first replicated model works as well. self._original_model.save(filepath, overwrite=True, include_optimizer=False) def load_weights(self, filepath, by_name=False): self._original_model.load_weights(filepath, by_name=False) # Copy the weights from the original model to each of the replicated models. orig_model_weights = self._original_model.get_weights() distributed_training_utils.set_weights( self._original_model._distribution_strategy, self, # pylint: disable=protected-access orig_model_weights) def __getattr__(self, item): # Whitelisted atttributes of the model that can be accessed by the user # during a callback. if item not in ['_setattr_tracking']: logging.warning('You are accessing attribute ' + item + 'of the' 'DistributedCallbackModel that may not have been set' 'correctly.')
42.943993
99
0.640727
from __future__ import absolute_import from __future__ import division from __future__ import print_function import weakref import numpy as np from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.eager import context from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.keras import backend as K from tensorflow.python.keras import losses from tensorflow.python.keras import metrics as metrics_module from tensorflow.python.keras import optimizers from tensorflow.python.keras.engine import base_layer from tensorflow.python.keras.engine import distributed_training_utils from tensorflow.python.keras.engine import training_arrays from tensorflow.python.keras.engine import training_distributed from tensorflow.python.keras.engine import training_eager from tensorflow.python.keras.engine import training_generator from tensorflow.python.keras.engine import training_utils from tensorflow.python.keras.engine.network import Network from tensorflow.python.keras.utils.generic_utils import slice_arrays from tensorflow.python.ops import math_ops from tensorflow.python.ops import weights_broadcast_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import optimizer as tf_optimizer_module from tensorflow.python.training.checkpointable import base as checkpointable from tensorflow.python.util.tf_export import tf_export @tf_export('keras.models.Model', 'keras.Model') class Model(Network): def __init__(self, *args, **kwargs): super(Model, self).__init__(*args, **kwargs) self._iterator_get_next = weakref.WeakKeyDictionary() self._dataset_iterator_cache = weakref.WeakKeyDictionary() self._distribution_strategy = None def _set_sample_weight_attributes(self, sample_weight_mode, skip_target_weighing_indices): sample_weights, sample_weight_modes = training_utils.prepare_sample_weights( self.output_names, sample_weight_mode, skip_target_weighing_indices) self.sample_weights = sample_weights self.sample_weight_modes = sample_weight_modes self._feed_sample_weight_modes = [ sample_weight_modes[i] for i in range(len(self.outputs)) if i not in skip_target_weighing_indices ] self._feed_sample_weights = [ sample_weights[i] for i in range(len(sample_weights)) if i not in skip_target_weighing_indices ] def _get_metric_name(self, metric, output_index, weighted=False): metric_name_prefix = 'weighted_' if weighted else '' if metric in ('accuracy', 'acc', 'crossentropy', 'ce'): if metric in ('accuracy', 'acc'): suffix = 'acc' elif metric in ('crossentropy', 'ce'): suffix = 'ce' else: metric_fn = metrics_module.get(metric) if hasattr(metric_fn, 'name'): suffix = metric_fn.name else: suffix = metric_fn.__name__ metric_name = metric_name_prefix + suffix if len(self.output_names) > 1: metric_name = '%s_%s' % (self.output_names[output_index], metric_name) j = 1 base_metric_name = metric_name while metric_name in self.metrics_names: metric_name = '%s_%d' % (base_metric_name, j) j += 1 return metric_name def _handle_per_output_metrics(self, metrics, y_true, y_pred, output_index, output_shape, loss_fn, mask, weights=None): metric_results = [] for metric in metrics: metric_fn = training_utils.get_metric_function( metric, output_shape=output_shape, loss_fn=loss_fn) metric_name = self._get_metric_name( metric, output_index, weighted=weights is not None) with K.name_scope(metric_name): if y_true is not None and y_pred is not None: if isinstance(metric_fn, metrics_module.Metric): if mask is not None: mask = math_ops.cast(mask, y_pred.dtype) if weights is None: weights = mask else: mask, _, weights = metrics_module.squeeze_or_expand_dimensions( mask, None, weights) try: weights = weights_broadcast_ops.broadcast_weights( weights, mask) except ValueError: pass weights *= mask metric_result = metric_fn(y_true, y_pred, weights) else: weighted_metric_fn = training_utils.weighted_masked_objective( metric_fn) metric_result = weighted_metric_fn( y_true, y_pred, weights=weights, mask=mask) if not context.executing_eagerly(): self.metrics_tensors.append(metric_result) metric_results.append(metric_result) self.metrics_names.append(metric_name) if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful: self.stateful_metric_names.append(metric_name) self.stateful_metric_functions.append(metric_fn) if not context.executing_eagerly(): self.metrics_updates += metric_fn.updates return metric_results def _handle_metrics(self, outputs, skip_target_indices=None, targets=None, sample_weights=None, masks=None): skip_target_indices = skip_target_indices or [] metric_results = [] with K.name_scope('metrics'): for i in range(len(outputs)): if i in skip_target_indices: continue output = outputs[i] if outputs else None target = targets[i] if targets else None output_shape = None if output is None else output.get_shape().as_list() output_mask = masks[i] if masks else None metric_results.extend( self._handle_per_output_metrics( self.nested_metrics[i], target, output, i, output_shape, self.loss_functions[i], output_mask)) metric_results.extend( self._handle_per_output_metrics( self.nested_weighted_metrics[i], target, output, i, output_shape, self.loss_functions[i], output_mask, weights=sample_weights[i])) return metric_results @checkpointable.no_automatic_dependency_tracking def compile(self, optimizer, loss=None, metrics=None, loss_weights=None, sample_weight_mode=None, weighted_metrics=None, target_tensors=None, distribute=None, **kwargs): if distribute and not isinstance( optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)): raise NotImplementedError('Only TF native optimizers are supported with ' 'DistributionStrategy.') if distribute and context.executing_eagerly(): raise NotImplementedError('DistributionStrategy is not supported in ' 'Eager mode.') if distribute and sample_weight_mode: raise NotImplementedError('sample_weight_mode is not supported with ' 'DistributionStrategy.') if distribute and weighted_metrics: raise NotImplementedError('weighted_metrics is not supported with ' 'DistributionStrategy.') if distribute and target_tensors: raise ValueError('target_tensors is not supported with ' 'DistributionStrategy.') loss = loss or {} if context.executing_eagerly() and not isinstance( optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)): raise ValueError('Only TF native optimizers are supported in Eager mode.') self.optimizer = optimizers.get(optimizer) # to add a checkpoint dependency on the optimizer if it's checkpointable. if isinstance(self.optimizer, checkpointable.CheckpointableBase): self._track_checkpointable( self.optimizer, name='optimizer', overwrite=True) self.loss = loss self.metrics = metrics or [] self.loss_weights = loss_weights self.sample_weight_mode = sample_weight_mode self.weighted_metrics = weighted_metrics if context.executing_eagerly() and target_tensors is not None: raise ValueError('target_tensors is not supported in Eager mode.') self.target_tensors = target_tensors self._distribution_strategy = distribute if self._distribution_strategy is not None: self._grouped_model = self._compile_distributed_model( self._distribution_strategy) with self._distribution_strategy.scope(): first_replicated_model = self._distribution_strategy.unwrap( self._grouped_model)[0] if first_replicated_model.stateful_metric_names: raise NotImplementedError('Stateful metrics are not supported with ' 'DistributionStrategy.') # We initialize the callback model with the first replicated model. self._replicated_model = DistributedCallbackModel(first_replicated_model) self._replicated_model.set_original_model(self) if not self.built: # Model is not compilable because it does not know its number of inputs # and outputs, nor their shapes and names. We will compile after the first # time the model gets called on training data. return self._is_compiled = True # Prepare loss functions. if isinstance(loss, dict): for name in loss: if name not in self.output_names: raise ValueError( 'Unknown entry in loss ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) loss_functions = [] for name in self.output_names: if name not in loss: logging.warning( 'Output "' + name + '" missing from loss dictionary. We assume ' 'this was done on purpose. The fit and evaluate APIs will not be ' 'expecting any data to be passed to "' + name + '".') loss_functions.append(losses.get(loss.get(name))) elif isinstance(loss, list): if len(loss) != len(self.outputs): raise ValueError('When passing a list as loss, ' 'it should have one entry per model outputs. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss=' + str(loss)) loss_functions = [losses.get(l) for l in loss] else: loss_function = losses.get(loss) loss_functions = [loss_function for _ in range(len(self.outputs))] self.loss_functions = loss_functions weighted_losses = [training_utils.weighted_masked_objective(fn) for fn in loss_functions] skip_target_indices = [] skip_target_weighing_indices = [] self._feed_outputs = [] self._feed_output_names = [] self._feed_output_shapes = [] self._feed_loss_fns = [] for i in range(len(weighted_losses)): if weighted_losses[i] is None: skip_target_indices.append(i) skip_target_weighing_indices.append(i) # Prepare output masks. if not context.executing_eagerly(): masks = [getattr(x, '_keras_mask', None) for x in self.outputs] if not isinstance(masks, list): masks = [masks] # Prepare loss weights. if loss_weights is None: loss_weights_list = [1. for _ in range(len(self.outputs))] elif isinstance(loss_weights, dict): for name in loss_weights: if name not in self.output_names: raise ValueError( 'Unknown entry in loss_weights ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) loss_weights_list = [] for name in self.output_names: loss_weights_list.append(loss_weights.get(name, 1.)) elif isinstance(loss_weights, list): if len(loss_weights) != len(self.outputs): raise ValueError( 'When passing a list as loss_weights, ' 'it should have one entry per model output. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed loss_weights=' + str(loss_weights)) loss_weights_list = loss_weights else: raise TypeError('Could not interpret loss_weights argument: ' + str(loss_weights) + ' - expected a list of dicts.') self.loss_weights_list = loss_weights_list # Initialize model metric attributes. self.metrics_names = ['loss'] self.metrics_tensors = [] self.metrics_updates = [] self.stateful_metric_names = [] self.stateful_metric_functions = [] # Nested metrics is a list of list of metrics. # One list per output of the model. self.nested_metrics = training_utils.collect_metrics( metrics, self.output_names) self.nested_weighted_metrics = training_utils.collect_metrics( weighted_metrics, self.output_names) # Initialization for Eager mode execution. if context.executing_eagerly(): # Prepare sample weights. self._set_sample_weight_attributes(sample_weight_mode, skip_target_weighing_indices) if target_tensors is not None: raise ValueError('target_tensors are not currently supported in Eager ' 'mode.') self.total_loss = None for i in range(len(self.outputs)): if len(self.outputs) > 1: self.metrics_names.append(self.output_names[i] + '_loss') # Set metric attributes on model. self._handle_metrics( self.outputs, skip_target_indices=skip_target_indices, sample_weights=self.sample_weights) self.targets = [] for i in range(len(self.outputs)): self._feed_output_names.append(self.output_names[i]) self._collected_trainable_weights = self.trainable_weights return # Prepare targets of model. self.targets = [] self._feed_targets = [] if target_tensors not in (None, []): if isinstance(target_tensors, list): if len(target_tensors) != len(self.outputs): raise ValueError( 'When passing a list as `target_tensors`, ' 'it should have one entry per model output. ' 'The model has ' + str(len(self.outputs)) + ' outputs, but you passed target_tensors=' + str(target_tensors)) elif isinstance(target_tensors, dict): for name in target_tensors: if name not in self.output_names: raise ValueError( 'Unknown entry in `target_tensors` ' 'dictionary: "' + name + '". ' 'Only expected the following keys: ' + str(self.output_names)) tmp_target_tensors = [] for name in self.output_names: tmp_target_tensors.append(target_tensors.get(name, None)) target_tensors = tmp_target_tensors else: raise TypeError('Expected `target_tensors` to be ' 'a list or dict, but got:', target_tensors) for i in range(len(self.outputs)): if i in skip_target_indices: self.targets.append(None) else: shape = K.int_shape(self.outputs[i]) name = self.output_names[i] if target_tensors not in (None, []): target = target_tensors[i] else: target = None if target is None or K.is_placeholder(target): if target is None: target = K.placeholder( ndim=len(shape), name=name + '_target', sparse=K.is_sparse(self.outputs[i]), dtype=K.dtype(self.outputs[i])) self._feed_targets.append(target) self._feed_outputs.append(self.outputs[i]) self._feed_output_names.append(name) self._feed_output_shapes.append(shape) self._feed_loss_fns.append(self.loss_functions[i]) else: skip_target_weighing_indices.append(i) self.targets.append(target) # Prepare sample weights. self._set_sample_weight_attributes(sample_weight_mode, skip_target_weighing_indices) # Compute total loss. total_loss = None with K.name_scope('loss'): for i in range(len(self.outputs)): if i in skip_target_indices: continue y_true = self.targets[i] y_pred = self.outputs[i] weighted_loss = weighted_losses[i] sample_weight = self.sample_weights[i] mask = masks[i] loss_weight = loss_weights_list[i] with K.name_scope(self.output_names[i] + '_loss'): output_loss = weighted_loss(y_true, y_pred, sample_weight, mask) if len(self.outputs) > 1: self.metrics_tensors.append(output_loss) self.metrics_names.append(self.output_names[i] + '_loss') if total_loss is None: total_loss = loss_weight * output_loss else: total_loss += loss_weight * output_loss if total_loss is None: if not self.losses: raise ValueError('The model cannot be compiled ' 'because it has no loss to optimize.') else: total_loss = 0. # Add regularization penalties # and other layer-specific losses. for loss_tensor in self.losses: total_loss += loss_tensor # Invoke metric functions for all the outputs. self._handle_metrics( self.outputs, masks=masks, targets=self.targets, skip_target_indices=skip_target_indices, sample_weights=self.sample_weights) # Prepare gradient updates and state updates. self.total_loss = total_loss # Functions for train, test and predict will # be compiled lazily when required. # This saves time when the user is not using all functions. self._function_kwargs = kwargs self.train_function = None self.test_function = None self.predict_function = None # Collected trainable weights, sorted in topological order. trainable_weights = self.trainable_weights self._collected_trainable_weights = trainable_weights def _compile_distributed_model(self, distribution_strategy): # TODO(anjalisridhar): Can we move the clone_and_build_model to outside the # model? def _clone_model_per_tower(model): new_model = training_distributed.clone_and_build_model(model) return new_model with distribution_strategy.scope(): # Create a copy of this model on each of the devices. grouped_models = distribution_strategy.call_for_each_tower( _clone_model_per_tower, self) return grouped_models def _check_trainable_weights_consistency(self): if not hasattr(self, '_collected_trainable_weights'): return if len(self.trainable_weights) != len(self._collected_trainable_weights): logging.warning( UserWarning( 'Discrepancy between trainable weights and collected trainable' ' weights, did you set `model.trainable` without calling' ' `model.compile` after ?')) def _make_train_function(self): if not hasattr(self, 'train_function'): raise RuntimeError('You must compile your model before using it.') self._check_trainable_weights_consistency() if self.train_function is None: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] with K.name_scope('training'): with K.name_scope(self.optimizer.__class__.__name__): # Training updates updates = self.optimizer.get_updates( params=self._collected_trainable_weights, loss=self.total_loss) # Unconditional updates updates += self.get_updates_for(None) # Conditional updates relevant to this model updates += self.get_updates_for(self.inputs) # Stateful metrics updates updates += self.metrics_updates # Gets loss and metrics. Updates weights at each call. self.train_function = K.function( inputs, [self.total_loss] + self.metrics_tensors, updates=updates, name='train_function', **self._function_kwargs) def _make_test_function(self): if not hasattr(self, 'test_function'): raise RuntimeError('You must compile your model before using it.') if self.test_function is None: inputs = (self._feed_inputs + self._feed_targets + self._feed_sample_weights) if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs += [K.learning_phase()] # Return loss and metrics, no gradient updates. # Does update the network states. self.test_function = K.function( inputs, [self.total_loss] + self.metrics_tensors, updates=self.state_updates + self.metrics_updates, name='test_function', **self._function_kwargs) def _make_predict_function(self): if not hasattr(self, 'predict_function'): self.predict_function = None if self.predict_function is None: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): inputs = self._feed_inputs + [K.learning_phase()] else: inputs = self._feed_inputs # Gets network outputs. Does not update weights. # Does update the network states. kwargs = getattr(self, '_function_kwargs', {}) self.predict_function = K.function( inputs, self.outputs, updates=self.state_updates, name='predict_function', **kwargs) def _get_iterator_get_next_tensors(self, iterator): get_next_op = self._iterator_get_next.get(iterator, None) if get_next_op is None: get_next_op = iterator.get_next() self._iterator_get_next[iterator] = get_next_op return get_next_op def _distribution_standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, check_steps=False, steps_name='steps', steps=None, validation_split=0): if sample_weight is not None and sample_weight.all(): raise NotImplementedError('sample_weight is currently not supported when ' 'using DistributionStrategy.') if class_weight: raise NotImplementedError('class_weight is currently not supported when ' 'using DistributionStrategy.') # TODO(anjalisridhar): Can we use the iterator and getnext op cache? # We require users to pass Datasets since we distribute the dataset across # multiple devices. if not isinstance(x, dataset_ops.Dataset): raise ValueError('When using DistributionStrategy you must specify a ' 'Dataset object instead of a %s.' % type(x)) # TODO(anjalisridhar): We want distribute_dataset() to accept a Dataset or a # function which returns a Dataset. Currently distribute_dataset() only # accepts a function that returns a Dataset. Once we add support for being # able to clone a Dataset on multiple workers we can remove this lambda. result = self._distribution_strategy.distribute_dataset(lambda: x) iterator = result.make_initializable_iterator() K.get_session().run(iterator.initializer) # Validates `steps` argument based on x's type. if check_steps: if steps is None: raise ValueError('When using a Dataset instance as input to a model, ' 'you should specify the `{steps_name}` argument.' .format(steps_name=steps_name)) training_utils.validate_iterator_input(x, y, sample_weight, validation_split) next_element = iterator.get_next() if not isinstance(next_element, (list, tuple)) or len(next_element) != 2: raise ValueError('Please provide data as a list or tuple of 2 elements ' ' - input and target pair. Received %s' % next_element) x, y = next_element with self._distribution_strategy.scope(): x_values, y_values = distributed_training_utils.\ validate_distributed_dataset_inputs(self._distribution_strategy, x, y) _, _, sample_weights = self._standardize_weights(x_values, y_values, sample_weight, class_weight, batch_size) return x, y, sample_weights def _standardize_user_data(self, x, y=None, sample_weight=None, class_weight=None, batch_size=None, check_steps=False, steps_name='steps', steps=None, validation_split=0): if self._distribution_strategy: return self._distribution_standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, check_steps=check_steps, steps_name=steps_name, steps=steps, validation_split=validation_split) if isinstance(x, dataset_ops.Dataset): if context.executing_eagerly(): x = x.make_one_shot_iterator() else: if x in self._dataset_iterator_cache: x = self._dataset_iterator_cache[x] else: iterator = x.make_initializable_iterator() self._dataset_iterator_cache[x] = iterator x = iterator K.get_session().run(x.initializer) if check_steps: training_utils.check_steps_argument(x, steps, steps_name) is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator) is_x_iterator = isinstance(x, iterator_ops.Iterator) # Validate user inputs when data is given as a dataset or dataset iterator. if is_x_iterator or is_x_eager_iterator: training_utils.validate_iterator_input(x, y, sample_weight, validation_split) # For eager iterators, when we have to process multiple batches of samples, # we will standardize the data when we actually loop over iterator and get # the batches. For now, we just return the iterator as is. if is_x_eager_iterator and steps is not None: return x, y, sample_weight # If input data is a dataset iterator in graph mode or if it is an eager # iterator and only one batch of samples is required, we fetch the data # tensors from the iterator and then standardize them. if is_x_iterator or is_x_eager_iterator: try: if is_x_iterator: next_element = self._get_iterator_get_next_tensors(x) else: next_element = x.get_next() except errors.OutOfRangeError: raise RuntimeError('Your dataset iterator ran out of data; ' 'Make sure that your dataset can generate ' 'required number of samples.') if not isinstance(next_element, (list, tuple)) or len(next_element) != 2: raise ValueError('Please provide data as a list or tuple of 2 elements ' ' - input and target pair. Received %s' % next_element) x, y = next_element x, y, sample_weights = self._standardize_weights(x, y, sample_weight, class_weight, batch_size) return x, y, sample_weights def _standardize_weights(self, x, y, sample_weight=None, class_weight=None, batch_size=None,): # First, we build/compile the model on the fly if necessary. all_inputs = [] is_build_called = False is_compile_called = False if not self.inputs: # We need to use `x` to set the model inputs. # We type-check that `x` and `y` are either single arrays # or lists of arrays. if isinstance(x, (list, tuple)): if not all(isinstance(v, np.ndarray) or tensor_util.is_tensor(v) for v in x): raise ValueError('Please provide as model inputs either a single ' 'array or a list of arrays. You passed: x=' + str(x)) all_inputs += list(x) elif isinstance(x, dict): raise ValueError('Please do not pass a dictionary as model inputs.') else: if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x): raise ValueError('Please provide as model inputs either a single ' 'array or a list of arrays. You passed: x=' + str(x)) all_inputs.append(x) # Build the model using the retrieved inputs (value or symbolic). # If values, then in symbolic-mode placeholders will be created # to match the value shapes. if not self.inputs: is_build_called = True self._set_inputs(x) if y is not None: if not self.optimizer: raise RuntimeError('You must compile a model before ' 'training/testing. ' 'Use `model.compile(optimizer, loss)`.') if not self._is_compiled: # On-the-fly compilation of the model. # We need to use `y` to set the model targets. if isinstance(y, (list, tuple)): if not all(isinstance(v, np.ndarray) or tensor_util.is_tensor(v) for v in y): raise ValueError('Please provide as model targets either a single ' 'array or a list of arrays. ' 'You passed: y=' + str(y)) all_inputs += list(y) elif isinstance(y, dict): raise ValueError('Please do not pass a dictionary as model targets.') else: if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y): raise ValueError('Please provide as model targets either a single ' 'array or a list of arrays. ' 'You passed: y=' + str(y)) all_inputs.append(y) # Typecheck that all inputs are *either* value *or* symbolic. # TODO(fchollet): this check could be removed in Eager mode? if any(tensor_util.is_tensor(v) for v in all_inputs): if not all(tensor_util.is_tensor(v) for v in all_inputs): raise ValueError('Do not pass inputs that mix Numpy arrays and ' 'TensorFlow tensors. ' 'You passed: x=' + str(x) + '; y=' + str(y)) if context.executing_eagerly(): target_tensors = None else: # Handle target tensors if any passed. if not isinstance(y, (list, tuple)): y = [y] target_tensors = [v for v in y if tensor_util.is_tensor(v)] is_compile_called = True self.compile(optimizer=self.optimizer, loss=self.loss, metrics=self.metrics, loss_weights=self.loss_weights, target_tensors=target_tensors) # In graph mode, if we had just set inputs and targets as symbolic tensors # by invoking build and compile on the model respectively, we do not have to # feed anything to the model. Model already has input and target data as # part of the graph. # Note: in this case, `any` and `all` are equivalent since we disallow # mixed symbolic/value inputs. if (not context.executing_eagerly() and is_build_called and is_compile_called and any(tensor_util.is_tensor(v) for v in all_inputs)): return [], [], [] # What follows is input validation and standardization to list format, # in the case where all inputs are value arrays. if context.executing_eagerly(): # In eager mode, do not do shape validation # since the network has no input nodes (placeholders) to be fed. feed_input_names = self.input_names feed_input_shapes = None elif not self._is_graph_network: # Case: symbolic-mode subclassed network. Do not do shape validation. feed_input_names = self._feed_input_names feed_input_shapes = None else: # Case: symbolic-mode graph network. # In this case, we run extensive shape validation checks. feed_input_names = self._feed_input_names feed_input_shapes = self._feed_input_shapes # Standardize the inputs. x = training_utils.standardize_input_data( x, feed_input_names, feed_input_shapes, check_batch_axis=False, # Don't enforce the batch size. exception_prefix='input') if y is not None: if not self._is_graph_network: feed_output_names = self._feed_output_names feed_output_shapes = None feed_sample_weight_modes = [None for _ in self.outputs] else: feed_output_names = self._feed_output_names feed_sample_weight_modes = self._feed_sample_weight_modes feed_output_shapes = [] for output_shape, loss_fn in zip(self._feed_output_shapes, self._feed_loss_fns): if loss_fn is losses.sparse_categorical_crossentropy: if K.image_data_format() == 'channels_first': feed_output_shapes.append( (output_shape[0], 1) + output_shape[2:]) else: feed_output_shapes.append(output_shape[:-1] + (1,)) elif (not hasattr(loss_fn, '__name__') or getattr(losses, loss_fn.__name__, None) is None): feed_output_shapes.append(None) else: feed_output_shapes.append(output_shape) y = training_utils.standardize_input_data( y, feed_output_names, feed_output_shapes, check_batch_axis=False, exception_prefix='target') # Generate sample-wise weight values given the `sample_weight` and # `class_weight` arguments. sample_weights = training_utils.standardize_sample_weights( sample_weight, feed_output_names) class_weights = training_utils.standardize_class_weights( class_weight, feed_output_names) sample_weights = [ training_utils.standardize_weights(ref, sw, cw, mode) for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights, feed_sample_weight_modes) ] # Check that all arrays have the same length. if not self._distribution_strategy: training_utils.check_array_lengths(x, y, sample_weights) if self._is_graph_network and not context.executing_eagerly(): # Additional checks to avoid users mistakenly using improper loss fns. training_utils.check_loss_and_target_compatibility( y, self._feed_loss_fns, feed_output_shapes) else: y = [] sample_weights = [] if self.stateful and batch_size: # Check that for stateful networks, number of samples is a multiple # of the static batch size. if x[0].shape[0] % batch_size != 0: raise ValueError('In a stateful network, ' 'you should only pass inputs with ' 'a number of samples that can be ' 'divided by the batch size. Found: ' + str(x[0].shape[0]) + ' samples') return x, y, sample_weights @checkpointable.no_automatic_dependency_tracking def _set_inputs(self, inputs, training=None): call_convention = getattr( self, '_call_convention', base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT) if call_convention not in ( base_layer.CallConvention.EXPLICIT_INPUTS_ARGUMENT, base_layer.CallConvention.SINGLE_POSITIONAL_ARGUMENT): raise NotImplementedError( 'Subclassed Models without "inputs" (or single positional arguments) ' 'in their call() signatures do not yet support shape inference. File ' 'a feature request if this limitation bothers you.') if self.__class__.__name__ == 'Sequential': if tensor_util.is_tensor(inputs): input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:]) self.build(input_shape=input_shape) else: input_shape = (None,) + inputs.shape[1:] self.build(input_shape=input_shape) if context.executing_eagerly(): self._eager_set_inputs(inputs) else: self._symbolic_set_inputs(inputs, training=training) @checkpointable.no_automatic_dependency_tracking def _eager_set_inputs(self, inputs): assert context.executing_eagerly() if self.inputs: raise ValueError('Model inputs are already set.') # On-the-fly setting of model inputs/outputs as DeferredTensors, # to keep track of number of inputs and outputs and their ndim. if isinstance(inputs, (list, tuple)): if tensor_util.is_tensor(inputs[0]): dummy_output_values = self.call( training_utils.cast_if_floating_dtype(inputs)) else: dummy_output_values = self.call( [ops.convert_to_tensor(v, dtype=K.floatx()) for v in inputs]) dummy_input_values = list(inputs) else: if tensor_util.is_tensor(inputs): dummy_output_values = self.call( training_utils.cast_if_floating_dtype(inputs)) else: dummy_output_values = self.call( ops.convert_to_tensor(inputs, dtype=K.floatx())) dummy_input_values = [inputs] if isinstance(dummy_output_values, (list, tuple)): dummy_output_values = list(dummy_output_values) else: dummy_output_values = [dummy_output_values] self.outputs = [ base_layer.DeferredTensor(shape=(None for _ in v.shape), dtype=v.dtype) for v in dummy_output_values] self.inputs = [ base_layer.DeferredTensor(shape=(None for _ in v.shape), dtype=v.dtype) for v in dummy_input_values] self.input_names = [ 'input_%d' % (i + 1) for i in range(len(dummy_input_values))] self.output_names = [ 'output_%d' % (i + 1) for i in range(len(dummy_output_values))] self.built = True @checkpointable.no_automatic_dependency_tracking def _symbolic_set_inputs(self, inputs, outputs=None, training=None): assert not context.executing_eagerly() if self.inputs: raise ValueError('Model inputs are already set.') # On-the-fly setting of symbolic model inputs (either by using the tensor # provided, or by creating a placeholder if Numpy data was provided). self.inputs = [] self.input_names = [] self._feed_inputs = [] self._feed_input_names = [] self._feed_input_shapes = [] if isinstance(inputs, (list, tuple)): inputs = list(inputs) else: inputs = [inputs] for i, v in enumerate(inputs): name = 'input_%d' % (i + 1) self.input_names.append(name) if isinstance(v, list): v = np.asarray(v) if v.ndim == 1: v = np.expand_dims(v, 1) if isinstance(v, (np.ndarray)): # We fix the placeholder shape except the batch size. # This is suboptimal, but it is the best we can do with the info # we have. The user should call `model._set_inputs(placeholders)` # to specify custom placeholders if the need arises. shape = (None,) + v.shape[1:] placeholder = K.placeholder(shape=shape, name=name) self.inputs.append(placeholder) self._feed_inputs.append(placeholder) self._feed_input_names.append(name) self._feed_input_shapes.append(shape) else: # Assumed tensor - TODO(fchollet) additional type check? self.inputs.append(v) if K.is_placeholder(v): self._feed_inputs.append(v) self._feed_input_names.append(name) self._feed_input_shapes.append(K.int_shape(v)) if outputs is None: # Obtain symbolic outputs by calling the model. if len(self.inputs) == 1: if self._expects_training_arg: outputs = self.call(self.inputs[0], training=training) else: outputs = self.call(self.inputs[0]) else: if self._expects_training_arg: outputs = self.call(self.inputs, training=training) else: outputs = self.call(self.inputs) if isinstance(outputs, (list, tuple)): outputs = list(outputs) else: outputs = [outputs] self.outputs = outputs self.output_names = [ 'output_%d' % (i + 1) for i in range(len(self.outputs))] self.built = True def fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, steps_per_epoch=None, validation_steps=None, **kwargs): # TODO(fchollet): this method may be creating reference cycles, which would # lead to accumulating garbage in memory when called in a loop. Investigate. # Backwards compatibility if batch_size is None and steps_per_epoch is None: batch_size = 32 # Legacy support if 'nb_epoch' in kwargs: logging.warning( 'The `nb_epoch` argument in `fit` ' 'has been renamed `epochs`.') epochs = kwargs.pop('nb_epoch') if kwargs: raise TypeError('Unrecognized keyword arguments: ' + str(kwargs)) # Validate and standardize user data. if self._distribution_strategy: distributed_training_utils.validate_callbacks(callbacks) x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight, batch_size=batch_size, check_steps=True, steps_name='steps_per_epoch', steps=steps_per_epoch, validation_split=validation_split) # Prepare validation data. if validation_data: if (isinstance(validation_data, iterator_ops.Iterator) or isinstance(validation_data, iterator_ops.EagerIterator) or isinstance(validation_data, dataset_ops.Dataset)): val_x = validation_data val_y = None val_sample_weight = None elif len(validation_data) == 2: val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence val_sample_weight = None elif len(validation_data) == 3: val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence else: raise ValueError( 'When passing a `validation_data` argument, ' 'it must contain either 2 items (x_val, y_val), ' 'or 3 items (x_val, y_val, val_sample_weights), ' 'or alternatively it could be a dataset or a ' 'dataset or a dataset iterator. ' 'However we received `validation_data=%s`' % validation_data) # Validate and standardize validation data. val_x, val_y, val_sample_weights = self._standardize_user_data( val_x, val_y, sample_weight=val_sample_weight, batch_size=batch_size, steps=validation_steps) elif validation_split and 0. < validation_split < 1.: if training_utils.has_symbolic_tensors(x): raise ValueError('If your data is in the form of symbolic tensors, ' 'you cannot use `validation_split`.') if hasattr(x[0], 'shape'): split_at = int(x[0].shape[0] * (1. - validation_split)) else: split_at = int(len(x[0]) * (1. - validation_split)) x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at)) y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at)) sample_weights, val_sample_weights = (slice_arrays( sample_weights, 0, split_at), slice_arrays(sample_weights, split_at)) elif validation_steps: val_x = [] val_y = [] val_sample_weights = [] else: val_x = None val_y = None val_sample_weights = None if context.executing_eagerly(): return training_eager.fit_loop( self, inputs=x, targets=y, sample_weights=sample_weights, class_weight=class_weight, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) elif self._distribution_strategy: return training_distributed.fit_loop( self, x, y, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) else: return training_arrays.fit_loop( self, x, y, sample_weights=sample_weights, batch_size=batch_size, epochs=epochs, verbose=verbose, callbacks=callbacks, val_inputs=val_x, val_targets=val_y, val_sample_weights=val_sample_weights, shuffle=shuffle, initial_epoch=initial_epoch, steps_per_epoch=steps_per_epoch, validation_steps=validation_steps) def evaluate(self, x=None, y=None, batch_size=None, verbose=1, sample_weight=None, steps=None): # Backwards compatibility. if batch_size is None and steps is None: batch_size = 32 # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, batch_size=batch_size, check_steps=True, steps_name='steps', steps=steps) if context.executing_eagerly(): return training_eager.test_loop( self, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps) elif self._distribution_strategy: return training_distributed.test_loop( self, inputs=x, targets=y, verbose=verbose, steps=steps) else: return training_arrays.test_loop( self, inputs=x, targets=y, sample_weights=sample_weights, batch_size=batch_size, verbose=verbose, steps=steps) def predict(self, x, batch_size=None, verbose=0, steps=None): # Backwards compatibility. if batch_size is None and steps is None: batch_size = 32 # Validate and standardize user data. x, _, _ = self._standardize_user_data( x, check_steps=True, steps_name='steps', steps=steps) if context.executing_eagerly(): return training_eager.predict_loop( self, x, batch_size=batch_size, verbose=verbose, steps=steps) elif self._distribution_strategy: return training_distributed.predict_loop( self, x, verbose=verbose, steps=steps) else: return training_arrays.predict_loop( self, x, batch_size=batch_size, verbose=verbose, steps=steps) def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None): if self._distribution_strategy: raise NotImplementedError('`train_on_batch` is not supported for models ' 'compiled with DistributionStrategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight, class_weight=class_weight) if context.executing_eagerly(): outputs = training_eager.train_on_batch( self, x, y, sample_weights=sample_weights) else: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = x + y + sample_weights + [1] else: ins = x + y + sample_weights self._make_train_function() outputs = self.train_function(ins) if len(outputs) == 1: return outputs[0] return outputs def test_on_batch(self, x, y=None, sample_weight=None): if self._distribution_strategy: raise NotImplementedError('`test_on_batch` is not supported for models ' 'compiled with DistributionStrategy.') # Validate and standardize user data. x, y, sample_weights = self._standardize_user_data( x, y, sample_weight=sample_weight) if context.executing_eagerly(): outputs = training_eager.test_on_batch( self, x, y, sample_weights=sample_weights) else: if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = x + y + sample_weights + [0] else: ins = x + y + sample_weights self._make_test_function() outputs = self.test_function(ins) if len(outputs) == 1: return outputs[0] return outputs def predict_on_batch(self, x): if self._distribution_strategy: raise NotImplementedError('`predict_on_batch` is not supported for ' 'models compiled with DistributionStrategy.') # Validate and standardize user data. inputs, _, _ = self._standardize_user_data(x) if context.executing_eagerly(): if (isinstance(x, iterator_ops.EagerIterator) or (isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())): inputs = training_utils.cast_if_floating_dtype(inputs) else: inputs = [ ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs ] return self(inputs) # pylint: disable=not-callable if not context.executing_eagerly(): if self.uses_learning_phase and not isinstance(K.learning_phase(), int): ins = inputs + [0] else: ins = inputs self._make_predict_function() outputs = self.predict_function(ins) if len(outputs) == 1: return outputs[0] return outputs def fit_generator(self, generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0): if self._distribution_strategy: raise NotImplementedError('`fit_generator` is not supported for ' 'models compiled with DistributionStrategy.') if not self.built and not self._is_graph_network: raise NotImplementedError( '`fit_generator` is not yet enabled for unbuilt Model subclasses') return training_generator.fit_generator( self, generator, steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=verbose, callbacks=callbacks, validation_data=validation_data, validation_steps=validation_steps, class_weight=class_weight, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, shuffle=shuffle, initial_epoch=initial_epoch) def evaluate_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): if self._distribution_strategy: raise NotImplementedError('`evaluate_generator` is not supported for ' 'models compiled with DistributionStrategy.') if not self.built and not self._is_graph_network: raise NotImplementedError( '`evaluate_generator` is not yet enabled for ' 'unbuilt Model subclasses') return training_generator.evaluate_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose) def predict_generator(self, generator, steps=None, max_queue_size=10, workers=1, use_multiprocessing=False, verbose=0): if self._distribution_strategy: raise NotImplementedError('`predict_generator` is not supported for ' 'models compiled with DistributionStrategy.') if not self.built and not self._is_graph_network: raise NotImplementedError( '`predict_generator` is not yet enabled for unbuilt Model subclasses') return training_generator.predict_generator( self, generator, steps=steps, max_queue_size=max_queue_size, workers=workers, use_multiprocessing=use_multiprocessing, verbose=verbose) def _get_callback_model(self): if hasattr(self, '_replicated_model') and self._replicated_model: # When using training_distributed, we set the callback model # to an instance of the `DistributedModel` that we create in # the `compile` call. The `DistributedModel` is initialized # with the first replicated model. We need to set the callback # model to a DistributedModel to allow us to override saving # and loading weights when we checkpoint the model during training. return self._replicated_model if hasattr(self, 'callback_model') and self.callback_model: return self.callback_model return self class DistributedCallbackModel(Model): def __init__(self, model): super(DistributedCallbackModel, self).__init__() # TODO(anjalisridhar): Right now the only attributes set are the layer and # weights. We may need to set additional attributes as needed since we have # not called compile on this model. def set_original_model(self, orig_model): self._original_model = orig_model def save_weights(self, filepath, overwrite=True, save_format=None): self._replicated_model.save_weights(filepath, overwrite=overwrite, save_format=save_format) def save(self, filepath, overwrite=True, include_optimizer=True): # save weights from the distributed model to the original model distributed_model_weights = self.get_weights() self._original_model.set_weights(distributed_model_weights) # TODO(anjalisridhar): Do we need to save the original model here? # Saving the first replicated model works as well. self._original_model.save(filepath, overwrite=True, include_optimizer=False) def load_weights(self, filepath, by_name=False): self._original_model.load_weights(filepath, by_name=False) # Copy the weights from the original model to each of the replicated models. orig_model_weights = self._original_model.get_weights() distributed_training_utils.set_weights( self._original_model._distribution_strategy, self, # pylint: disable=protected-access orig_model_weights) def __getattr__(self, item): # Whitelisted atttributes of the model that can be accessed by the user # during a callback. if item not in ['_setattr_tracking']: logging.warning('You are accessing attribute ' + item + 'of the' 'DistributedCallbackModel that may not have been set' 'correctly.')
true
true
f713890a52d71352dbedc3981e74a4277fb9c534
3,523
py
Python
pip_services3_components/auth/MemoryCredentialStore.py
pip-services3-python/pip-services3-components-python
d8868f9db9760fb91e5ff7a815de8ce98fdb4b2a
[ "MIT" ]
null
null
null
pip_services3_components/auth/MemoryCredentialStore.py
pip-services3-python/pip-services3-components-python
d8868f9db9760fb91e5ff7a815de8ce98fdb4b2a
[ "MIT" ]
null
null
null
pip_services3_components/auth/MemoryCredentialStore.py
pip-services3-python/pip-services3-components-python
d8868f9db9760fb91e5ff7a815de8ce98fdb4b2a
[ "MIT" ]
1
2020-03-11T21:46:42.000Z
2020-03-11T21:46:42.000Z
# -*- coding: utf-8 -*- """ pip_services3_components.auth.MemoryCredentialStore ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Memory credential store implementation :copyright: Conceptual Vision Consulting LLC 2018-2019, see AUTHORS for more details. :license: MIT, see LICENSE for more details. """ from typing import Optional from pip_services3_commons.config.ConfigParams import ConfigParams from pip_services3_commons.config.IReconfigurable import IReconfigurable from pip_services3_commons.data.StringValueMap import StringValueMap from .CredentialParams import CredentialParams from .ICredentialStore import ICredentialStore class MemoryCredentialStore(ICredentialStore, IReconfigurable): """ Credential store that keeps credentials in memory. ### Configuration parameters ### - [credential key 1]: - ... credential parameters for key 1 - [credential key 2]: - ... credential parameters for key N - ... Example: .. code-block:: python config = ConfigParams.from_tuples("key1.user", "jdoe", "key1.pass", "pass123", "key2.user", "bsmith", "key2.pass", "mypass") credentialStore = MemoryCredentialStore() credentialStore.read_credentials(config) credentialStore.lookup("123", "key1") """ def __init__(self, config: ConfigParams = None): """ Creates a new instance of the credential store. :param config: (optional) configuration with credential parameters. """ self.__items: StringValueMap = StringValueMap() if not (config is None): self.configure(config) def configure(self, config: ConfigParams): """ Configures component by passing configuration parameters. :param config: configuration parameters to be set. """ self.read_credentials(config) def read_credentials(self, config: ConfigParams): """ Reads credentials from configuration parameters. Each section represents an individual CredentialParams :param config: configuration parameters to be read """ self.__items.clear() for key in config.get_keys(): value = config.get_as_nullable_string(key) self.__items.append(CredentialParams.from_tuples([key, value])) def store(self, correlation_id: Optional[str], key: str, credential: CredentialParams): """ Stores credential parameters into the store. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the credential parameters. :param credential: a credential parameters to be stored. """ if not (credential is None): self.__items.put(key, credential) else: self.__items.remove(key) def lookup(self, correlation_id: Optional[str], key: str) -> CredentialParams: """ Lookups credential parameters by its key. :param correlation_id: (optional) transaction id to trace execution through call chain. :param key: a key to uniquely identify the credential. :return: found credential parameters or None if nothing was found """ return self.__items.get_as_object(key)
34.539216
95
0.629009
from typing import Optional from pip_services3_commons.config.ConfigParams import ConfigParams from pip_services3_commons.config.IReconfigurable import IReconfigurable from pip_services3_commons.data.StringValueMap import StringValueMap from .CredentialParams import CredentialParams from .ICredentialStore import ICredentialStore class MemoryCredentialStore(ICredentialStore, IReconfigurable): def __init__(self, config: ConfigParams = None): self.__items: StringValueMap = StringValueMap() if not (config is None): self.configure(config) def configure(self, config: ConfigParams): self.read_credentials(config) def read_credentials(self, config: ConfigParams): self.__items.clear() for key in config.get_keys(): value = config.get_as_nullable_string(key) self.__items.append(CredentialParams.from_tuples([key, value])) def store(self, correlation_id: Optional[str], key: str, credential: CredentialParams): if not (credential is None): self.__items.put(key, credential) else: self.__items.remove(key) def lookup(self, correlation_id: Optional[str], key: str) -> CredentialParams: return self.__items.get_as_object(key)
true
true
f713896eb30831c63d845d2e4ba3486057cefff3
2,556
py
Python
dags/dcm_dag.py
nsutton00/starthinker
e597d679a95ca85a21af9cf4df3ff935ca34abf8
[ "Apache-2.0" ]
null
null
null
dags/dcm_dag.py
nsutton00/starthinker
e597d679a95ca85a21af9cf4df3ff935ca34abf8
[ "Apache-2.0" ]
null
null
null
dags/dcm_dag.py
nsutton00/starthinker
e597d679a95ca85a21af9cf4df3ff935ca34abf8
[ "Apache-2.0" ]
null
null
null
########################################################################### # # Copyright 2019 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ########################################################################### ''' -------------------------------------------------------------- Before running this Airflow module... Install StarThinker in cloud composer from open source: pip install git+https://github.com/google/starthinker Or push local code to the cloud composer plugins directory: source install/deploy.sh 4) Composer Menu l) Install All -------------------------------------------------------------- CM Report Create a CM report from a JSON definition. Add a an account as [account_id]@[profile_id] Fetch the report JSON definition. Arguably could be better. The account is automatically added to the report definition. ''' from starthinker_airflow.factory import DAG_Factory # Add the following credentials to your Airflow configuration. USER_CONN_ID = "starthinker_user" # The connection to use for user authentication. GCP_CONN_ID = "starthinker_service" # The connection to use for service authentication. INPUTS = { 'account': '', 'body': '{}', 'delete': False, } TASKS = [ { 'dcm': { 'auth': 'user', 'report': { 'account': { 'field': { 'name': 'account', 'kind': 'string', 'order': 1, 'default': '' } }, 'body': { 'field': { 'name': 'body', 'kind': 'json', 'order': 2, 'default': '{}' } } }, 'delete': { 'field': { 'name': 'delete', 'kind': 'boolean', 'order': 3, 'default': False } } } } ] DAG_FACTORY = DAG_Factory('dcm', { 'tasks':TASKS }, INPUTS) DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID) DAG = DAG_FACTORY.execute() if __name__ == "__main__": DAG_FACTORY.print_commandline()
26.081633
87
0.552426
true
true
f7138aafc10681d11133092224c7fea9e8041dbc
12,211
py
Python
tests/validation/cattlevalidationtest/core/test_github.py
wlan0/validation-tests
606822dfb26f61ba5ce6d4a7358f72a88773c762
[ "Apache-2.0" ]
null
null
null
tests/validation/cattlevalidationtest/core/test_github.py
wlan0/validation-tests
606822dfb26f61ba5ce6d4a7358f72a88773c762
[ "Apache-2.0" ]
null
null
null
tests/validation/cattlevalidationtest/core/test_github.py
wlan0/validation-tests
606822dfb26f61ba5ce6d4a7358f72a88773c762
[ "Apache-2.0" ]
null
null
null
from common_fixtures import * # NOQA from selenium import webdriver from selenium.webdriver.phantomjs.service import Service as PhantomJSService from requests.auth import AuthBase # test the github auth workflow USER_SCOPE = 'github_user' TEAM_SCOPE = 'github_team' ORG_SCOPE = 'github_org' class NewService(PhantomJSService): def __init__(self, *args, **kwargs): super(NewService, self).__init__(*args, **kwargs) webdriver.phantomjs.webdriver.Service = NewService if_github = pytest.mark.skipif(not os.environ.get('API_AUTH_GITHUB' '_CLIENT_SECRET'), reason='API_AUTH_GITHUB' '_CLIENT_SECRET is not set') BASE_URL = cattle_url() + '/v1/' URL = BASE_URL + 'schemas' @pytest.fixture(scope='session') def config(): needed_vars = [ 'API_AUTH_GITHUB_TEST_USER', 'API_AUTH_GITHUB_TEST_PASS', 'API_AUTH_GITHUB_CLIENT_ID', 'API_AUTH_GITHUB_CLIENT_SECRET', 'API_AUTH_RANCHER_TEST_PASS', ] for a in needed_vars: if os.getenv(a, None) is None: raise Exception('Please set ' + a + ' in the environment') config = {} config['username'] = os.getenv('API_AUTH_GITHUB_TEST_USER', None) config['password'] = os.getenv('API_AUTH_GITHUB_TEST_PASS', None) config['phantomjs_port'] = int(os.getenv('PHANTOMJS_WEBDRIVER_PORT', 4444)) config['phantomjs_bin'] = os.getenv('PHANTOMJS_BIN', '/usr/local/bin/phantomjs') assert config['phantomjs_bin'] is not None config['client_id'] = os.getenv('API_AUTH_GITHUB_CLIENT_ID', None) config['client_secret'] = os.getenv('API_AUTH_GITHUB_CLIENT_SECRET', None) config['users'] = {} config['users']['1'] = { 'password': os.getenv('API_AUTH_RANCHER_TEST_PASS', None), 'username': os.getenv('API_AUTH_RANCHER_TEST_USER_1', 'ranchertest01') } config['users']['2'] = { 'password': os.getenv('API_AUTH_RANCHER_TEST_PASS_2', None), 'username': os.getenv('API_AUTH_RANCHER_TEST_USER_2', 'ranchertest02') } return config @pytest.fixture(scope='module') def github_request_code(config, cattle_url, admin_client, request, user=None): def fin(): admin_client.create_githubconfig(enabled=False, accessMode='restricted') request.addfinalizer(fin) username = config['username'] password = config['password'] enabled = False if user is not None: username = user['username'] password = user['password'] enabled = True driver = webdriver.PhantomJS(config['phantomjs_bin'], port=config['phantomjs_port']) max_wait = 60 driver.set_page_load_timeout(max_wait) driver.set_script_timeout(max_wait) driver.implicitly_wait(10) # undo monkey patching webdriver.phantomjs.webdriver.Service = PhantomJSService driver.set_window_size(1120, 550) admin_client.create_githubconfig(enabled=enabled, accessMode='unrestricted', clientId=config['client_id'], clientSecret=config['client_secret']) urlx = "https://github.com/login/oauth/authorize?response_type=code&client_id=" +\ config['client_id'] + "&scope=read:org" driver.get(urlx) driver.find_element_by_id('login_field').send_keys(username) driver.find_element_by_id('password').send_keys(password) driver.find_element_by_name('commit').submit() try: driver.find_element_by_class_name('btn-primary').click() except: pass driver.get('https://github.com') cookie_dict = dict(driver.get_cookie('_gh_sess')) cookie_dict = {'_gh_sess': cookie_dict['value']} cookie_dict['user_session'] = driver.get_cookie('user_session')['value'] r = requests.get(urlx, cookies=cookie_dict, allow_redirects=False) redirect_url = r.headers['location'] code = redirect_url.rsplit('=')[1] driver.quit() return code @pytest.fixture(scope='module') def github_request_token(github_request_code): code = github_request_code c = requests.post(BASE_URL + 'token', {'code': code}) return c.json()['jwt'] @pytest.fixture(scope='module') def github_client(request, cattle_url, github_request_token, admin_client): github_client = from_env(url=cattle_url) github_client.delete_by_id = delete_by_id assert github_client.valid() jwt = github_request_token github_client._auth = GithubAuth(jwt) return github_client def delete_by_id(self, type, id): url = self.schema.types[type].links.collection if url.endswith('/'): url = url + id else: url = '/'.join([url, id]) return self._delete(url) def _create_member(name='rancherio', role='member', type=ORG_SCOPE): return { 'role': role, 'externalId': name, 'externalIdType': type } def diff_members(members, got_members): assert len(members) == len(got_members) members_a = set([]) members_b = set([]) for member in members: members_a.add(member['externalId'] + ' ' + member['externalIdType'] + ' ' + member['role']) for member in got_members: members_b.add(member['externalId'] + ' ' + member['externalIdType'] + ' ' + member['role']) assert members_a == members_b def get_plain_members(members): plain_members = [] for member in members.data: plain_members.append({ 'role': member.role, 'externalId': member.externalId, 'externalIdType': member.externalIdType }) return plain_members class GithubAuth(AuthBase): def __init__(self, jwt, prj_id=None): # setup any auth-related data here self.jwt = jwt self.prj_id = prj_id def __call__(self, r): # modify and return the request r.headers['Authorization'] = 'Bearer ' + self.jwt if self.prj_id is not None: r.headers['X-API-Project-Id'] = self.prj_id return r def switch_on_auth(admin_client, request, config): admin_client.create_githubconfig(enabled=True, accessMode='restricted', clientId=config['client_id'], clientSecret=config['client_secret']) def fin(): admin_client.create_githubconfig(enabled=False, accessMode='restricted', allowedUsers=[], allowedOrganizations=[], clientId='', clientSecret='') request.addfinalizer(fin) @if_github def test_github_auth_config_unauth_user(request, admin_client, config): switch_on_auth(admin_client, request, config) # do not set any auth headers no_auth = requests.get(URL) # test that auth is switched on assert no_auth.status_code == 401 @if_github def test_github_auth_config_invalid_user(request, admin_client, config): switch_on_auth(admin_client, request, config) # set invalid auth headers bad_auth = requests.get(URL, headers={'Authorization': 'Bearer some_random_string'}) # test that user does not have access assert bad_auth.status_code == 401 @if_github def test_github_auth_config_valid_user(github_request_token, admin_client, request, config): switch_on_auth(admin_client, request, config) jwt = github_request_token # set valid auth headers schemas = requests.get(URL, headers={'Authorization': 'Bearer ' + jwt}) # test that user has access assert schemas.status_code == 200 @if_github def test_github_auth_config_api_whitelist_users(admin_client, request, github_client, config): switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'], config['users']['2']['username'] ], clientId=config['client_id'], clientSecret=config['client_secret'] ) # test that these users were whitelisted r = github_client.list_githubconfig() users = r[0]['allowedUsers'] assert len(users) == 2 assert config['users']['1']['username'] in users assert config['users']['2']['username'] in users assert 'ranchertest02' in users @if_github def test_github_auth_config_api_whitelist_orgs(admin_client, request, config, github_client): switch_on_auth(admin_client, request, config) # set whitelisted org github_client.create_githubconfig(allowedOrganizations=['rancherio']) # test that these org was whitelisted r = github_client.list_githubconfig() orgs = r[0]['allowedOrganizations'] assert len(orgs) == 1 assert 'rancherio' in orgs @if_github def test_github_add_whitelisted_user(admin_client, config, github_client, request): switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'] ]) # test that these users were whitelisted r = github_client.list_githubconfig() users = r[0]['allowedUsers'] assert config['users']['1']['username'] in users new_token = github_request_code(config, cattle_url, admin_client, request, user=config['users']['1']) new_token = github_request_token(new_token) assert new_token is not None @if_github def test_github_projects(cattle_url, config, request, admin_client, github_client): user_client = from_env(url=cattle_url) switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'] ]) # test that the users is whitelisted r = github_client.list_githubconfig() users = r[0]['allowedUsers'] assert config['users']['1']['username'] in users new_token = github_request_code(config, cattle_url, admin_client, request, user=config['users']['1']) new_token = github_request_token(new_token) user_client._auth = GithubAuth(new_token) members = [_create_member( name=config['users']['1']['username'], type=USER_SCOPE, role='owner' ), _create_member()] project = user_client.create_project(members=members) assert len(project.projectMembers()) == 2 diff_members(get_plain_members(project.projectMembers()), members) project = user_client.wait_success(project) project = user_client.wait_success(project.deactivate()) project = user_client.wait_success(project.remove()) project = user_client.wait_success(project.purge()) project = user_client.by_id('project', project.id) assert project.state == 'purged' @if_github def test_github_id_name(config, cattle_url, request, admin_client, github_client): user_client = from_env(url=cattle_url) switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'] ]) new_token = github_request_code(config, cattle_url, admin_client, request, user=config['users']['1']) new_token = github_request_token(new_token) user_client._auth = GithubAuth(new_token) sent_members = [_create_member( name=config['users']['1']['username'], type=USER_SCOPE, role='owner' ), _create_member() ] project = user_client.create_project(members=sent_members) members = get_plain_members(project.projectMembers()) assert len(members) == 2 diff_members(members, sent_members)
34.013928
86
0.635001
from common_fixtures import * from selenium import webdriver from selenium.webdriver.phantomjs.service import Service as PhantomJSService from requests.auth import AuthBase USER_SCOPE = 'github_user' TEAM_SCOPE = 'github_team' ORG_SCOPE = 'github_org' class NewService(PhantomJSService): def __init__(self, *args, **kwargs): super(NewService, self).__init__(*args, **kwargs) webdriver.phantomjs.webdriver.Service = NewService if_github = pytest.mark.skipif(not os.environ.get('API_AUTH_GITHUB' '_CLIENT_SECRET'), reason='API_AUTH_GITHUB' '_CLIENT_SECRET is not set') BASE_URL = cattle_url() + '/v1/' URL = BASE_URL + 'schemas' @pytest.fixture(scope='session') def config(): needed_vars = [ 'API_AUTH_GITHUB_TEST_USER', 'API_AUTH_GITHUB_TEST_PASS', 'API_AUTH_GITHUB_CLIENT_ID', 'API_AUTH_GITHUB_CLIENT_SECRET', 'API_AUTH_RANCHER_TEST_PASS', ] for a in needed_vars: if os.getenv(a, None) is None: raise Exception('Please set ' + a + ' in the environment') config = {} config['username'] = os.getenv('API_AUTH_GITHUB_TEST_USER', None) config['password'] = os.getenv('API_AUTH_GITHUB_TEST_PASS', None) config['phantomjs_port'] = int(os.getenv('PHANTOMJS_WEBDRIVER_PORT', 4444)) config['phantomjs_bin'] = os.getenv('PHANTOMJS_BIN', '/usr/local/bin/phantomjs') assert config['phantomjs_bin'] is not None config['client_id'] = os.getenv('API_AUTH_GITHUB_CLIENT_ID', None) config['client_secret'] = os.getenv('API_AUTH_GITHUB_CLIENT_SECRET', None) config['users'] = {} config['users']['1'] = { 'password': os.getenv('API_AUTH_RANCHER_TEST_PASS', None), 'username': os.getenv('API_AUTH_RANCHER_TEST_USER_1', 'ranchertest01') } config['users']['2'] = { 'password': os.getenv('API_AUTH_RANCHER_TEST_PASS_2', None), 'username': os.getenv('API_AUTH_RANCHER_TEST_USER_2', 'ranchertest02') } return config @pytest.fixture(scope='module') def github_request_code(config, cattle_url, admin_client, request, user=None): def fin(): admin_client.create_githubconfig(enabled=False, accessMode='restricted') request.addfinalizer(fin) username = config['username'] password = config['password'] enabled = False if user is not None: username = user['username'] password = user['password'] enabled = True driver = webdriver.PhantomJS(config['phantomjs_bin'], port=config['phantomjs_port']) max_wait = 60 driver.set_page_load_timeout(max_wait) driver.set_script_timeout(max_wait) driver.implicitly_wait(10) webdriver.phantomjs.webdriver.Service = PhantomJSService driver.set_window_size(1120, 550) admin_client.create_githubconfig(enabled=enabled, accessMode='unrestricted', clientId=config['client_id'], clientSecret=config['client_secret']) urlx = "https://github.com/login/oauth/authorize?response_type=code&client_id=" +\ config['client_id'] + "&scope=read:org" driver.get(urlx) driver.find_element_by_id('login_field').send_keys(username) driver.find_element_by_id('password').send_keys(password) driver.find_element_by_name('commit').submit() try: driver.find_element_by_class_name('btn-primary').click() except: pass driver.get('https://github.com') cookie_dict = dict(driver.get_cookie('_gh_sess')) cookie_dict = {'_gh_sess': cookie_dict['value']} cookie_dict['user_session'] = driver.get_cookie('user_session')['value'] r = requests.get(urlx, cookies=cookie_dict, allow_redirects=False) redirect_url = r.headers['location'] code = redirect_url.rsplit('=')[1] driver.quit() return code @pytest.fixture(scope='module') def github_request_token(github_request_code): code = github_request_code c = requests.post(BASE_URL + 'token', {'code': code}) return c.json()['jwt'] @pytest.fixture(scope='module') def github_client(request, cattle_url, github_request_token, admin_client): github_client = from_env(url=cattle_url) github_client.delete_by_id = delete_by_id assert github_client.valid() jwt = github_request_token github_client._auth = GithubAuth(jwt) return github_client def delete_by_id(self, type, id): url = self.schema.types[type].links.collection if url.endswith('/'): url = url + id else: url = '/'.join([url, id]) return self._delete(url) def _create_member(name='rancherio', role='member', type=ORG_SCOPE): return { 'role': role, 'externalId': name, 'externalIdType': type } def diff_members(members, got_members): assert len(members) == len(got_members) members_a = set([]) members_b = set([]) for member in members: members_a.add(member['externalId'] + ' ' + member['externalIdType'] + ' ' + member['role']) for member in got_members: members_b.add(member['externalId'] + ' ' + member['externalIdType'] + ' ' + member['role']) assert members_a == members_b def get_plain_members(members): plain_members = [] for member in members.data: plain_members.append({ 'role': member.role, 'externalId': member.externalId, 'externalIdType': member.externalIdType }) return plain_members class GithubAuth(AuthBase): def __init__(self, jwt, prj_id=None): self.jwt = jwt self.prj_id = prj_id def __call__(self, r): r.headers['Authorization'] = 'Bearer ' + self.jwt if self.prj_id is not None: r.headers['X-API-Project-Id'] = self.prj_id return r def switch_on_auth(admin_client, request, config): admin_client.create_githubconfig(enabled=True, accessMode='restricted', clientId=config['client_id'], clientSecret=config['client_secret']) def fin(): admin_client.create_githubconfig(enabled=False, accessMode='restricted', allowedUsers=[], allowedOrganizations=[], clientId='', clientSecret='') request.addfinalizer(fin) @if_github def test_github_auth_config_unauth_user(request, admin_client, config): switch_on_auth(admin_client, request, config) no_auth = requests.get(URL) assert no_auth.status_code == 401 @if_github def test_github_auth_config_invalid_user(request, admin_client, config): switch_on_auth(admin_client, request, config) bad_auth = requests.get(URL, headers={'Authorization': 'Bearer some_random_string'}) assert bad_auth.status_code == 401 @if_github def test_github_auth_config_valid_user(github_request_token, admin_client, request, config): switch_on_auth(admin_client, request, config) jwt = github_request_token schemas = requests.get(URL, headers={'Authorization': 'Bearer ' + jwt}) assert schemas.status_code == 200 @if_github def test_github_auth_config_api_whitelist_users(admin_client, request, github_client, config): switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'], config['users']['2']['username'] ], clientId=config['client_id'], clientSecret=config['client_secret'] ) r = github_client.list_githubconfig() users = r[0]['allowedUsers'] assert len(users) == 2 assert config['users']['1']['username'] in users assert config['users']['2']['username'] in users assert 'ranchertest02' in users @if_github def test_github_auth_config_api_whitelist_orgs(admin_client, request, config, github_client): switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedOrganizations=['rancherio']) r = github_client.list_githubconfig() orgs = r[0]['allowedOrganizations'] assert len(orgs) == 1 assert 'rancherio' in orgs @if_github def test_github_add_whitelisted_user(admin_client, config, github_client, request): switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'] ]) r = github_client.list_githubconfig() users = r[0]['allowedUsers'] assert config['users']['1']['username'] in users new_token = github_request_code(config, cattle_url, admin_client, request, user=config['users']['1']) new_token = github_request_token(new_token) assert new_token is not None @if_github def test_github_projects(cattle_url, config, request, admin_client, github_client): user_client = from_env(url=cattle_url) switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'] ]) r = github_client.list_githubconfig() users = r[0]['allowedUsers'] assert config['users']['1']['username'] in users new_token = github_request_code(config, cattle_url, admin_client, request, user=config['users']['1']) new_token = github_request_token(new_token) user_client._auth = GithubAuth(new_token) members = [_create_member( name=config['users']['1']['username'], type=USER_SCOPE, role='owner' ), _create_member()] project = user_client.create_project(members=members) assert len(project.projectMembers()) == 2 diff_members(get_plain_members(project.projectMembers()), members) project = user_client.wait_success(project) project = user_client.wait_success(project.deactivate()) project = user_client.wait_success(project.remove()) project = user_client.wait_success(project.purge()) project = user_client.by_id('project', project.id) assert project.state == 'purged' @if_github def test_github_id_name(config, cattle_url, request, admin_client, github_client): user_client = from_env(url=cattle_url) switch_on_auth(admin_client, request, config) github_client.create_githubconfig(allowedUsers=[ config['users']['1']['username'] ]) new_token = github_request_code(config, cattle_url, admin_client, request, user=config['users']['1']) new_token = github_request_token(new_token) user_client._auth = GithubAuth(new_token) sent_members = [_create_member( name=config['users']['1']['username'], type=USER_SCOPE, role='owner' ), _create_member() ] project = user_client.create_project(members=sent_members) members = get_plain_members(project.projectMembers()) assert len(members) == 2 diff_members(members, sent_members)
true
true
f7138c64854f49ece2c496eff5f4166bc32c8367
1,271
py
Python
proxypool/processors/server.py
staugur/ProxyPool
e90002d287a541818c4fc811d0a627ae8b4b21a8
[ "MIT" ]
2
2022-01-20T01:11:18.000Z
2022-01-26T08:15:13.000Z
proxypool/processors/server.py
staugur/ProxyPool
e90002d287a541818c4fc811d0a627ae8b4b21a8
[ "MIT" ]
1
2021-12-31T05:02:20.000Z
2021-12-31T05:02:20.000Z
proxypool/processors/server.py
staugur/ProxyPool
e90002d287a541818c4fc811d0a627ae8b4b21a8
[ "MIT" ]
null
null
null
from flask import Flask, g from proxypool.storages.redis import RedisClient from proxypool.setting import API_HOST, API_PORT, API_THREADED, IS_DEV __all__ = ['app'] app = Flask(__name__) if IS_DEV: app.debug = True def get_conn(): """ get redis client object :return: """ if not hasattr(g, 'redis'): g.redis = RedisClient() return g.redis @app.route('/') def index(): """ get home page, you can define your own templates :return: """ return '<h2>Welcome to Proxy Pool System</h2>' @app.route('/random') def get_proxy(): """ get a random proxy :return: get a random proxy """ conn = get_conn() return conn.random().string() @app.route('/all') def get_proxy_all(): """ get a random proxy :return: get a random proxy """ conn = get_conn() proxies = conn.all() proxies_string = '' if proxies: for proxy in proxies: proxies_string += str(proxy) + '\n' return proxies_string @app.route('/count') def get_count(): """ get the count of proxies :return: count, int """ conn = get_conn() return str(conn.count()) if __name__ == '__main__': app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)
18.157143
70
0.608969
from flask import Flask, g from proxypool.storages.redis import RedisClient from proxypool.setting import API_HOST, API_PORT, API_THREADED, IS_DEV __all__ = ['app'] app = Flask(__name__) if IS_DEV: app.debug = True def get_conn(): if not hasattr(g, 'redis'): g.redis = RedisClient() return g.redis @app.route('/') def index(): return '<h2>Welcome to Proxy Pool System</h2>' @app.route('/random') def get_proxy(): conn = get_conn() return conn.random().string() @app.route('/all') def get_proxy_all(): conn = get_conn() proxies = conn.all() proxies_string = '' if proxies: for proxy in proxies: proxies_string += str(proxy) + '\n' return proxies_string @app.route('/count') def get_count(): conn = get_conn() return str(conn.count()) if __name__ == '__main__': app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)
true
true
f7138d56d61ad0e8b49d63539499d3a2e9af36e4
6,810
py
Python
flomaster/flomaster.py
HaykTarkhanyan/flomasterplot
28cc7e53a7b947cded45828efd8777c922b32649
[ "MIT" ]
1
2022-01-26T08:48:02.000Z
2022-01-26T08:48:02.000Z
flomaster/flomaster.py
HaykTarkhanyan/flomasterplot
28cc7e53a7b947cded45828efd8777c922b32649
[ "MIT" ]
3
2022-02-07T09:28:49.000Z
2022-02-07T09:30:32.000Z
flomaster/flomaster.py
HaykTarkhanyan/flomasterplot
28cc7e53a7b947cded45828efd8777c922b32649
[ "MIT" ]
null
null
null
from helpers import * from plots import * from col_type_detector import * # from configs import * import warnings def generate_flomaster_plot(df, x="None", y=[], group_by=None, plot_type=None, x_axis=None, y_axis=None, title=None): """ Function generates interactive plot for given dataframe and columns Args: df (pd.DataFrame) x (str): name of the column to use as x_axis y (str or list): either one column or list of columns to plot as y axis group_by (str): column by which to group data (default is None) plot_type (str): possible values vary depending on input data, the list is` ONE_NUMERIC = ['Histogram', 'Distplot'] ONE_CATEOGIRCAL = ['Donut', 'Pie', 'Histogram'] ONE_TEXT = ['Wordcloud'] TWO_NUMERIC = ["Scatter", "Scatter plot with margins", "2D density plot", "Distplot", "Histogram", "Basic Stats"] TWO_NUMERIC_SORTED = ['Connected Scatter', "Area plot", "Line plot"] ONE_CATEOGIRCAL_ONE_NUMERICAL = ['Box', "Violin", "Basic Stats"] TWO_CATEGORICAL = ['Cross tab', "Stacked bar"] ONE_DATETIME_ONE_NUMERIC = ['Connected Scatter'] x_axis (str): defaults to x columns name y_axis (str): defaults to y, if y is a list then to the first element of y title (str): defaults to f"{x_axis} vs {y_axis}" Note: Some illogical results might occur in case of column_type_detector classifies some columns incorrectly, also note that this package is in a very early stage of development Raises: ValueError: if plot_type is not from allowed list Returns: plotly figure object """ if type(y) == str: y = [y] data_types = get_column_types(df, num_unique_categories=2) if x_axis is None: x_axis = x if y != [] and y_axis is None: y_axis = y[0] if title is None: title = f"{x_axis} vs {y_axis}" x_dtype = get_data_type_for_given_feature(data_types, x) y_dtype = get_data_type_for_given_feature(data_types, y[0]) # print(x) # print(y) # print(x_dtype) # print(y_dtype) # one feature if x != "None" and y[0] == 'None': if x_dtype == 'numeric': # 1 possible_graphs = ONE_NUMERIC if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_numeric(df, x, group_by, plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig if x_dtype == 'categorical': # 2 possible_graphs = ONE_CATEOGIRCAL if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_categoric(df, x, group_by, plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig if x_dtype == 'texts': # 3 possible_graphs = ONE_TEXT if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_textual(df, x) return fig # two features if x != "None" and y[0] != 'None': # two numeric if x_dtype == "numeric" and y_dtype == 'numeric': # 4 global TWO_NUMERIC if df[x].to_list() == sorted(df[x].to_list()): TWO_NUMERIC += TWO_NUMERIC_SORTED possible_graphs = TWO_NUMERIC if len(df)>2000 and plot_type in ["Histogram", "Scatter"]: warnings.warn('**Data has too many rows, we suggest plotting \ with one of the following: "Scatter plot with margins", "2D density plot", "Distplot"**') if len(df)<2000 and plot_type not in ["Histogram", "Scatter", "Basic Stats"]: warnings.warn('**Data has few rows, we suggest plotting \ with one of the following: "Histogram", "Scatter"**') if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = two_numeric(df, x, y[0], group_by, plot_type) if plot_type in ["Basic Stats",'Histogram']: if y_axis == y[0]: y_axis = '' if x_axis == x: x_axis = '' add_labels_to_fig(fig, x_axis, y_axis, title) return fig #one numeric one categoric # 5 if x_dtype == "categorical" and y_dtype == 'numeric': possible_graphs = ONE_CATEOGIRCAL_ONE_NUMERICAL if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_numeric_one_categorical(df, x, y, group_by, plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig # two categoricals if x_dtype == "categorical" and y_dtype == 'categorical': possible_graphs = TWO_CATEGORICAL if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: if plot_type == 'Cross tab': fig = two_categorical(df, x, y[0], plot_type) elif plot_type == 'Stacked bar': fig = two_categorical(df, x, y[0], plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig # one datetime one numeric if x_dtype == "datetime" and y_dtype == 'numeric': global ONE_DATETIME_ONE_NUMERIC if check_list_in_list(list(df.columns), ['Date', "Open", "High", "Low", "Close"]): ONE_DATETIME_ONE_NUMERIC += ["Stock price"] possible_graphs = ONE_DATETIME_ONE_NUMERIC if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_datetime_one_numeric(df, x, y, group_by,plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig return "Something went wrong, contact team Flomaster"
41.52439
125
0.567988
from helpers import * from plots import * from col_type_detector import * import warnings def generate_flomaster_plot(df, x="None", y=[], group_by=None, plot_type=None, x_axis=None, y_axis=None, title=None): if type(y) == str: y = [y] data_types = get_column_types(df, num_unique_categories=2) if x_axis is None: x_axis = x if y != [] and y_axis is None: y_axis = y[0] if title is None: title = f"{x_axis} vs {y_axis}" x_dtype = get_data_type_for_given_feature(data_types, x) y_dtype = get_data_type_for_given_feature(data_types, y[0]) if x != "None" and y[0] == 'None': if x_dtype == 'numeric': possible_graphs = ONE_NUMERIC if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_numeric(df, x, group_by, plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig if x_dtype == 'categorical': possible_graphs = ONE_CATEOGIRCAL if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_categoric(df, x, group_by, plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig if x_dtype == 'texts': possible_graphs = ONE_TEXT if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_textual(df, x) return fig if x != "None" and y[0] != 'None': if x_dtype == "numeric" and y_dtype == 'numeric': global TWO_NUMERIC if df[x].to_list() == sorted(df[x].to_list()): TWO_NUMERIC += TWO_NUMERIC_SORTED possible_graphs = TWO_NUMERIC if len(df)>2000 and plot_type in ["Histogram", "Scatter"]: warnings.warn('**Data has too many rows, we suggest plotting \ with one of the following: "Scatter plot with margins", "2D density plot", "Distplot"**') if len(df)<2000 and plot_type not in ["Histogram", "Scatter", "Basic Stats"]: warnings.warn('**Data has few rows, we suggest plotting \ with one of the following: "Histogram", "Scatter"**') if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = two_numeric(df, x, y[0], group_by, plot_type) if plot_type in ["Basic Stats",'Histogram']: if y_axis == y[0]: y_axis = '' if x_axis == x: x_axis = '' add_labels_to_fig(fig, x_axis, y_axis, title) return fig if x_dtype == "categorical" and y_dtype == 'numeric': possible_graphs = ONE_CATEOGIRCAL_ONE_NUMERICAL if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_numeric_one_categorical(df, x, y, group_by, plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig if x_dtype == "categorical" and y_dtype == 'categorical': possible_graphs = TWO_CATEGORICAL if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: if plot_type == 'Cross tab': fig = two_categorical(df, x, y[0], plot_type) elif plot_type == 'Stacked bar': fig = two_categorical(df, x, y[0], plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig if x_dtype == "datetime" and y_dtype == 'numeric': global ONE_DATETIME_ONE_NUMERIC if check_list_in_list(list(df.columns), ['Date', "Open", "High", "Low", "Close"]): ONE_DATETIME_ONE_NUMERIC += ["Stock price"] possible_graphs = ONE_DATETIME_ONE_NUMERIC if (plot_type is not None) and (plot_type not in possible_graphs): raise ValueError(f"Please select one from {possible_graphs}") else: fig = one_datetime_one_numeric(df, x, y, group_by,plot_type) add_labels_to_fig(fig, x_axis, y_axis, title) return fig return "Something went wrong, contact team Flomaster"
true
true
f7138db8354b61ab6765a0e6828a80d6ecfe9b37
207
py
Python
l_06_lambda_functions/lists/ex_06_revers_list_in_place.py
VasAtanasov/SoftUni-Python-Fundamentals
471d0537dd6e5c8b61ede92b7673c0d67e2964fd
[ "MIT" ]
1
2019-06-05T11:16:08.000Z
2019-06-05T11:16:08.000Z
l_06_lambda_functions/lists/ex_06_revers_list_in_place.py
VasAtanasov/SoftUni-Python-Fundamentals
471d0537dd6e5c8b61ede92b7673c0d67e2964fd
[ "MIT" ]
null
null
null
l_06_lambda_functions/lists/ex_06_revers_list_in_place.py
VasAtanasov/SoftUni-Python-Fundamentals
471d0537dd6e5c8b61ede92b7673c0d67e2964fd
[ "MIT" ]
null
null
null
numbers = [int(num) for num in input().split(' ')] for i in range(len(numbers) // 2): temp = numbers[i] numbers[i] = numbers[- 1 - i] numbers[- 1 - i] = temp print(" ".join(map(str, numbers)))
23
50
0.565217
numbers = [int(num) for num in input().split(' ')] for i in range(len(numbers) // 2): temp = numbers[i] numbers[i] = numbers[- 1 - i] numbers[- 1 - i] = temp print(" ".join(map(str, numbers)))
true
true
f7138f66a9017b9b3ef79b6f7ee60acad455cd18
8,620
py
Python
angr/analyses/xrefs.py
Kyle-Kyle/angr
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
[ "BSD-2-Clause" ]
1
2020-11-02T00:37:29.000Z
2020-11-02T00:37:29.000Z
angr/analyses/xrefs.py
Kyle-Kyle/angr
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
[ "BSD-2-Clause" ]
null
null
null
angr/analyses/xrefs.py
Kyle-Kyle/angr
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
[ "BSD-2-Clause" ]
3
2019-10-17T07:47:36.000Z
2022-01-24T23:38:13.000Z
from collections import defaultdict import pyvex from ..knowledge_plugins.xrefs import XRef, XRefType from ..engines.light import SimEngineLight, SimEngineLightVEXMixin from .propagator.vex_vars import VEXTmp from .propagator.values import Top from . import register_analysis from .analysis import Analysis from .forward_analysis import FunctionGraphVisitor, SingleNodeGraphVisitor, ForwardAnalysis class SimEngineXRefsVEX( SimEngineLightVEXMixin, SimEngineLight, ): def __init__(self, xref_manager, project=None, replacements=None): super().__init__() self.project = project self.xref_manager = xref_manager self.replacements = replacements if replacements is not None else { } def add_xref(self, xref_type, from_loc, to_loc): self.xref_manager.add_xref(XRef(ins_addr=from_loc.ins_addr, block_addr=from_loc.block_addr, stmt_idx=from_loc.stmt_idx, dst=to_loc, xref_type=xref_type) ) # # Statement handlers # def _handle_WrTmp(self, stmt): # Don't execute the tmp write since it has been done during constant propagation self._expr(stmt.data) if type(stmt.data) is pyvex.IRExpr.Load: self._handle_data_offset_refs(stmt.tmp) def _handle_Put(self, stmt): # if there is a Load, get it executed self._expr(stmt.data) def _handle_Store(self, stmt): if isinstance(stmt.addr, pyvex.IRExpr.RdTmp): addr_tmp = VEXTmp(stmt.addr.tmp) blockloc = self._codeloc(block_only=True) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Write, self._codeloc(), addr) elif isinstance(stmt.addr, pyvex.IRExpr.Const): addr = stmt.addr.con.value self.add_xref(XRefType.Write, self._codeloc(), addr) def _handle_StoreG(self, stmt): blockloc = self._codeloc(block_only=True) if type(stmt.addr) is pyvex.IRExpr.RdTmp: addr_tmp = VEXTmp(stmt.addr.tmp) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Write, self._codeloc(), addr) def _handle_LoadG(self, stmt): # What are we reading? blockloc = self._codeloc(block_only=True) if type(stmt.addr) is pyvex.IRExpr.RdTmp: addr_tmp = VEXTmp(stmt.addr.tmp) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Read, self._codeloc(), addr) self._handle_data_offset_refs(stmt.dst) def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC): blockloc = self._codeloc(block_only=True) if isinstance(stmt.addr, pyvex.IRExpr.RdTmp): addr_tmp = VEXTmp(stmt.addr.tmp) if addr_tmp in self.replacements[blockloc]: addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): if stmt.storedata is None: # load-link xref_type = XRefType.Read else: xref_type = XRefType.Write self.add_xref(xref_type, self._codeloc(), addr) def _handle_data_offset_refs(self, data_tmp): # is this thing a pointer? # If so, produce the ida-style "Offset" XRefs. blockloc = self._codeloc(block_only=True) tmp = VEXTmp(data_tmp) if tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][tmp], Top): data = self.replacements[blockloc][tmp] # Is this thing not an integer? If so, get out of here # e.g., you can't find_object_containing on an SPOffset if not isinstance(data, int): return if data is not None and self.project.loader.find_object_containing(data) is not None: # HACK: Avoid spamming Xrefs if the binary is loaded at 0 # e.g., firmware! # (magic value chosen due to length of CM EVT) if data > 0x200: self.add_xref(XRefType.Offset, self._codeloc(), data) # # Expression handlers # def _handle_Get(self, expr): return None def _handle_Load(self, expr): blockloc = self._codeloc(block_only=True) if type(expr.addr) is pyvex.IRExpr.RdTmp: addr_tmp = VEXTmp(expr.addr.tmp) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Read, self._codeloc(), addr) elif type(expr.addr) is pyvex.IRExpr.Const: addr = expr.addr.con.value self.add_xref(XRefType.Read, self._codeloc(), addr) def _handle_CCall(self, expr): return None def _handle_function(self, func): # pylint: disable=unused-argument,no-self-use return None # TODO: Maybe add an execute-type XRef? class XRefsAnalysis(ForwardAnalysis, Analysis): # pylint:disable=abstract-method """ XRefsAnalysis recovers in-depth x-refs (cross-references) in disassembly code. Here is an example:: .text: 000023C8 LDR R2, =time_now 000023CA LDR R3, [R2] 000023CC ADDS R3, #1 000023CE STR R3, [R2] 000023D0 BX LR .bss: 1FFF36F4 time_now % 4 You will have the following x-refs for time_now:: 23c8 - offset 23ca - read access 23ce - write access """ def __init__(self, func=None, func_graph=None, block=None, max_iterations=1, replacements=None): if func is not None: if block is not None: raise ValueError('You cannot specify both "func" and "block".') # traversing a function graph_visitor = FunctionGraphVisitor(func, func_graph) if replacements is None: prop = self.project.analyses.Propagator(func=func, func_graph=func_graph) replacements = prop.replacements elif block is not None: # traversing a block graph_visitor = SingleNodeGraphVisitor(block) if replacements is None: prop = self.project.analyses.Propagator(block=block) replacements = prop.replacements else: raise ValueError('Unsupported analysis target.') ForwardAnalysis.__init__(self, order_jobs=True, allow_merging=True, allow_widening=False, graph_visitor=graph_visitor) self._function = func self._max_iterations = max_iterations self._replacements = replacements self._node_iterations = defaultdict(int) self._engine_vex = SimEngineXRefsVEX(self.kb.xrefs, project=self.project, replacements=replacements) self._engine_ail = None self._analyze() # # Main analysis routines # def _pre_analysis(self): pass def _pre_job_handling(self, job): pass def _initial_abstract_state(self, node): return None def _merge_states(self, node, *states): return None def _run_on_node(self, node, state): block = self.project.factory.block(node.addr, node.size, opt_level=1, cross_insn_opt=False) if block.size == 0: # VEX couldn't decode it return False, None block_key = node.addr engine = self._engine_vex engine.process(None, block=block, fail_fast=self._fail_fast) self._node_iterations[block_key] += 1 if self._node_iterations[block_key] < self._max_iterations: return True, None else: return False, None def _intra_analysis(self): pass def _post_analysis(self): pass register_analysis(XRefsAnalysis, "XRefs")
37.316017
118
0.616821
from collections import defaultdict import pyvex from ..knowledge_plugins.xrefs import XRef, XRefType from ..engines.light import SimEngineLight, SimEngineLightVEXMixin from .propagator.vex_vars import VEXTmp from .propagator.values import Top from . import register_analysis from .analysis import Analysis from .forward_analysis import FunctionGraphVisitor, SingleNodeGraphVisitor, ForwardAnalysis class SimEngineXRefsVEX( SimEngineLightVEXMixin, SimEngineLight, ): def __init__(self, xref_manager, project=None, replacements=None): super().__init__() self.project = project self.xref_manager = xref_manager self.replacements = replacements if replacements is not None else { } def add_xref(self, xref_type, from_loc, to_loc): self.xref_manager.add_xref(XRef(ins_addr=from_loc.ins_addr, block_addr=from_loc.block_addr, stmt_idx=from_loc.stmt_idx, dst=to_loc, xref_type=xref_type) ) def _handle_WrTmp(self, stmt): self._expr(stmt.data) if type(stmt.data) is pyvex.IRExpr.Load: self._handle_data_offset_refs(stmt.tmp) def _handle_Put(self, stmt): # if there is a Load, get it executed self._expr(stmt.data) def _handle_Store(self, stmt): if isinstance(stmt.addr, pyvex.IRExpr.RdTmp): addr_tmp = VEXTmp(stmt.addr.tmp) blockloc = self._codeloc(block_only=True) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Write, self._codeloc(), addr) elif isinstance(stmt.addr, pyvex.IRExpr.Const): addr = stmt.addr.con.value self.add_xref(XRefType.Write, self._codeloc(), addr) def _handle_StoreG(self, stmt): blockloc = self._codeloc(block_only=True) if type(stmt.addr) is pyvex.IRExpr.RdTmp: addr_tmp = VEXTmp(stmt.addr.tmp) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Write, self._codeloc(), addr) def _handle_LoadG(self, stmt): # What are we reading? blockloc = self._codeloc(block_only=True) if type(stmt.addr) is pyvex.IRExpr.RdTmp: addr_tmp = VEXTmp(stmt.addr.tmp) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Read, self._codeloc(), addr) self._handle_data_offset_refs(stmt.dst) def _handle_LLSC(self, stmt: pyvex.IRStmt.LLSC): blockloc = self._codeloc(block_only=True) if isinstance(stmt.addr, pyvex.IRExpr.RdTmp): addr_tmp = VEXTmp(stmt.addr.tmp) if addr_tmp in self.replacements[blockloc]: addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): if stmt.storedata is None: # load-link xref_type = XRefType.Read else: xref_type = XRefType.Write self.add_xref(xref_type, self._codeloc(), addr) def _handle_data_offset_refs(self, data_tmp): # is this thing a pointer? # If so, produce the ida-style "Offset" XRefs. blockloc = self._codeloc(block_only=True) tmp = VEXTmp(data_tmp) if tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][tmp], Top): data = self.replacements[blockloc][tmp] # Is this thing not an integer? If so, get out of here # e.g., you can't find_object_containing on an SPOffset if not isinstance(data, int): return if data is not None and self.project.loader.find_object_containing(data) is not None: if data > 0x200: self.add_xref(XRefType.Offset, self._codeloc(), data) def _handle_Get(self, expr): return None def _handle_Load(self, expr): blockloc = self._codeloc(block_only=True) if type(expr.addr) is pyvex.IRExpr.RdTmp: addr_tmp = VEXTmp(expr.addr.tmp) if addr_tmp in self.replacements[blockloc] and not isinstance(self.replacements[blockloc][addr_tmp], Top): addr = self.replacements[blockloc][addr_tmp] if isinstance(addr, int): self.add_xref(XRefType.Read, self._codeloc(), addr) elif type(expr.addr) is pyvex.IRExpr.Const: addr = expr.addr.con.value self.add_xref(XRefType.Read, self._codeloc(), addr) def _handle_CCall(self, expr): return None def _handle_function(self, func): return None class XRefsAnalysis(ForwardAnalysis, Analysis): def __init__(self, func=None, func_graph=None, block=None, max_iterations=1, replacements=None): if func is not None: if block is not None: raise ValueError('You cannot specify both "func" and "block".') graph_visitor = FunctionGraphVisitor(func, func_graph) if replacements is None: prop = self.project.analyses.Propagator(func=func, func_graph=func_graph) replacements = prop.replacements elif block is not None: graph_visitor = SingleNodeGraphVisitor(block) if replacements is None: prop = self.project.analyses.Propagator(block=block) replacements = prop.replacements else: raise ValueError('Unsupported analysis target.') ForwardAnalysis.__init__(self, order_jobs=True, allow_merging=True, allow_widening=False, graph_visitor=graph_visitor) self._function = func self._max_iterations = max_iterations self._replacements = replacements self._node_iterations = defaultdict(int) self._engine_vex = SimEngineXRefsVEX(self.kb.xrefs, project=self.project, replacements=replacements) self._engine_ail = None self._analyze() def _pre_analysis(self): pass def _pre_job_handling(self, job): pass def _initial_abstract_state(self, node): return None def _merge_states(self, node, *states): return None def _run_on_node(self, node, state): block = self.project.factory.block(node.addr, node.size, opt_level=1, cross_insn_opt=False) if block.size == 0: return False, None block_key = node.addr engine = self._engine_vex engine.process(None, block=block, fail_fast=self._fail_fast) self._node_iterations[block_key] += 1 if self._node_iterations[block_key] < self._max_iterations: return True, None else: return False, None def _intra_analysis(self): pass def _post_analysis(self): pass register_analysis(XRefsAnalysis, "XRefs")
true
true
f71391843e2e54a205556f3567aac75b3bdccfae
639
py
Python
howfairis/mixins/RepositoryMixin.py
benvanwerkhoven/howfairis
e7128cee164154950a14b613f12c284a5fca872b
[ "Apache-2.0" ]
null
null
null
howfairis/mixins/RepositoryMixin.py
benvanwerkhoven/howfairis
e7128cee164154950a14b613f12c284a5fca872b
[ "Apache-2.0" ]
null
null
null
howfairis/mixins/RepositoryMixin.py
benvanwerkhoven/howfairis
e7128cee164154950a14b613f12c284a5fca872b
[ "Apache-2.0" ]
null
null
null
import requests class RepositoryMixin: def has_open_repository(self): url = "https://api.github.com/repos/{0}/{1}".format(self.owner, self.repo) try: response = requests.get(url) # If the response was successful, no Exception will be raised response.raise_for_status() except requests.HTTPError: self.print_state(check_name="has_open_repository", state=False) return False except Exception as err: print(f"Other error occurred: {err}") self.print_state(check_name="has_open_repository", state=True) return True
31.95
82
0.635368
import requests class RepositoryMixin: def has_open_repository(self): url = "https://api.github.com/repos/{0}/{1}".format(self.owner, self.repo) try: response = requests.get(url) response.raise_for_status() except requests.HTTPError: self.print_state(check_name="has_open_repository", state=False) return False except Exception as err: print(f"Other error occurred: {err}") self.print_state(check_name="has_open_repository", state=True) return True
true
true
f71392d14012ff483e50a88f5af4140a2f1e471c
29,691
py
Python
tb_rest_client/api_client.py
moravcik94/python_tb_rest_client
985361890cdf4ccce93d2b24905ad9003c8dfcaa
[ "Apache-2.0" ]
1
2021-07-19T10:09:04.000Z
2021-07-19T10:09:04.000Z
tb_rest_client/api_client.py
moravcik94/python_tb_rest_client
985361890cdf4ccce93d2b24905ad9003c8dfcaa
[ "Apache-2.0" ]
null
null
null
tb_rest_client/api_client.py
moravcik94/python_tb_rest_client
985361890cdf4ccce93d2b24905ad9003c8dfcaa
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # Copyright 2020. ThingsBoard # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import import datetime import json import mimetypes from multiprocessing.pool import ThreadPool import os import re import tempfile # python 2 and python 3 compatibility library import six from six.moves.urllib.parse import quote from tb_rest_client.configuration import Configuration import tb_rest_client.models.models_ce import tb_rest_client.models.models_pe from tb_rest_client import rest class ApiClient(object): """ :param configuration: .Configuration object for this client :param header_name: a header to pass when making calls to the API. :param header_value: a header value to pass when making calls to the API. :param cookie: a cookie to include in the header when making calls to the API """ PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types NATIVE_TYPES_MAPPING = { 'int': int, 'long': int if six.PY3 else long, # noqa: F821 'float': float, 'str': str, 'bool': bool, 'date': datetime.date, 'datetime': datetime.datetime, 'object': object, } def __init__(self, configuration=None, header_name=None, header_value=None, cookie=None): if configuration is None: configuration = Configuration() self.configuration = configuration # Use the pool property to lazily initialize the ThreadPool. self._pool = None self.rest_client = rest.RESTClientObject(configuration) self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. self.user_agent = 'Swagger-Codegen/1.0.0/python' def __del__(self): if self._pool is not None: self._pool.close() self._pool.join() @property def pool(self): if self._pool is None: self._pool = ThreadPool() return self._pool @property def user_agent(self): """User agent for this API client""" return self.default_headers['User-Agent'] @user_agent.setter def user_agent(self, value): self.default_headers['User-Agent'] = value def set_default_header(self, header_name, header_value): self.default_headers[header_name] = header_value def __call_api( self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): config = self.configuration # header parameters header_params = header_params or {} header_params.update(self.default_headers) if self.cookie: header_params['Cookie'] = self.cookie if header_params: header_params = self.sanitize_for_serialization(header_params) header_params = dict(self.parameters_to_tuples(header_params, collection_formats)) # path parameters if path_params: path_params = self.sanitize_for_serialization(path_params) path_params = self.parameters_to_tuples(path_params, collection_formats) for k, v in path_params: if isinstance(v, dict) and v.get("entityType") is not None and v.get('id') is not None: v = v["id"] # specified safe chars, encode everything if v is not None: resource_path = resource_path.replace( '{%s}' % k, quote(str(v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( '{?%s}' % k, quote(str("?"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( '{?%s,' % k, quote(str("?"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( ',%s' % k, quote(str("&"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( ',?%s' % k, quote(str("&"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( ',%s}' % k, quote("}" + str(v) if v is not None else "", safe=config.safe_chars_for_path_param) ) # resource_path = resource_path.replace( # '{%s}' % k, # quote(str(k+"="+v), safe=config.safe_chars_for_path_param) # ).replace( # '{?%s}' % k, # quote(str("?"+k+"="+v), safe=config.safe_chars_for_path_param) # ).replace( # '{?%s,' % k, # quote(str("?"+k+"="+v) + "{", safe=config.safe_chars_for_path_param) # ).replace( # ',%s,' % k, # quote("}" + str("&"+k+"="+v) + "{", safe=config.safe_chars_for_path_param) # ).replace( # ',?%s,' % k, # quote("}" + str("&"+k+"="+v) + "{", safe=config.safe_chars_for_path_param) # ).replace( # ',%s}' % k, # quote("}" + str(v), safe=config.safe_chars_for_path_param) # ) # query parameters if query_params: query_params = self.sanitize_for_serialization(query_params) query_params = [param for param in query_params if (isinstance(param, tuple) and len(param) > 1 and param[1] is not None) or not isinstance(param, tuple)] query_params = self.parameters_to_tuples(query_params, collection_formats) # post parameters if post_params or files: post_params = self.prepare_post_parameters(post_params, files) post_params = [param for param in post_params if (isinstance(param, tuple) and len(param) > 1 and param[1] is not None) or not isinstance(param, tuple)] post_params = self.sanitize_for_serialization(post_params) post_params = self.parameters_to_tuples(post_params, collection_formats) # auth setting self.update_params_for_auth(header_params, query_params, auth_settings) # body if body: body = self.sanitize_for_serialization(body) # request url clean_path = self.sanitize_path(resource_path) url = self.configuration.host + clean_path # perform request and return response response_data = self.request( method, url, query_params=query_params, headers=header_params, post_params=post_params, body=body, _preload_content=_preload_content, _request_timeout=_request_timeout) self.last_response = response_data return_data = response_data if _preload_content: # deserialize response data if response_type: return_data = self.deserialize(response_data, response_type) else: return_data = None if _return_http_data_only: return (return_data) else: return (return_data, response_data.status, response_data.getheaders()) def sanitize_path(self, url): pattern = r'(\{[\?a-zA-Z,]{1,}\})' matching = re.search(pattern, url) if matching is not None and len(matching.groups()) > 0: for match in matching.groups(): clean_url = url.replace(match, "") else: clean_url = url return clean_url def sanitize_for_serialization(self, obj): """Builds a JSON POST object. If obj is None, return None. If obj is str, int, long, float, bool, return directly. If obj is datetime.datetime, datetime.date convert to string in iso8601 format. If obj is list, sanitize each element in the list. If obj is dict, return the dict. If obj is swagger model, return the properties dict. :param obj: The data to serialize. :return: The serialized form of data. """ if obj is None: return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: # Convert model obj to dict except # attributes `swagger_types`, `attribute_map` # and attributes which value is not None. # Convert attribute name to json key in # model definition for request. obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in six.iteritems(obj.swagger_types) if getattr(obj, attr) is not None} return {key: self.sanitize_for_serialization(val) for key, val in six.iteritems(obj_dict)} def deserialize(self, response, response_type): """Deserializes response into an object. :param response: RESTResponse object to be deserialized. :param response_type: class literal for deserialized object, or string of class name. :return: deserialized object. """ # handle file downloading # save response body into a tmp file and return the instance if response_type == "file": return self.__deserialize_file(response) # fetch data from response object try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type) def __deserialize(self, data, klass): """Deserializes dict, list, str into an object.. :param data: dict, list or str. :param klass: class literal, or string of class name. :return: object. """ if data is None: return None if klass == "DeferredResultResponseEntity": return self.__deserialize(data, type(data)) # # elif type(klass) == str: # # convert str to class elif klass in self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass] elif klass == list: return_data = [self.__deserialize(sub_data, type(sub_data)) for sub_data in data] return return_data elif klass == dict: return_data = {k: self.__deserialize(v, type(v)) for k, v in six.iteritems(data)} return return_data elif type(klass) == str: if klass.startswith('list['): sub_kls = re.match(r'list\[(.*)\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for k, v in six.iteritems(data)} # convert str to class if klass in self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass] try: found_class = getattr(tb_rest_client.models.models_pe, klass) # if sorted(list(found_class.attribute_map.values())) == sorted(list(data.keys())): if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())): klass = found_class else: found_class = getattr(tb_rest_client.models.models_ce, klass) # if sorted(list(found_class.attribute_map.values())) == sorted(list(data.keys())): if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())): klass = found_class except AttributeError: found_class = getattr(tb_rest_client.models.models_ce, klass) if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())): # if sorted(list(found_class.attribute_map.values())) == sorted(list(data.keys())): klass = found_class # else: # return self.__deserialize(data, type(data)) return self.__deserialize_data(data, klass) def __deserialize_data(self, data, klass): try: if klass in self.PRIMITIVE_TYPES: return self.__deserialize_primitive(data, klass) elif klass == object: return self.__deserialize_object(data) elif klass == datetime.date: return self.__deserialize_date(data) elif klass == datetime.datetime: return self.__deserialize_datatime(data) else: return self.__deserialize_model(data, klass) except Exception as e: return e def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, async_req=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): """Makes the HTTP request (synchronous) and returns deserialized data. To make an async request, set the async_req parameter. :param resource_path: Path to method endpoint. :param method: Method to call. :param path_params: Path parameters in the url. :param query_params: Query parameters in the url. :param header_params: Header parameters to be placed in the request header. :param body: Request body. :param post_params dict: Request post form parameters, for `application/x-www-form-urlencoded`, `multipart/form-data`. :param auth_settings list: Auth Settings names for the request. :param response: Response data type. :param files dict: key -> filename, value -> filepath, for `multipart/form-data`. :param async_req bool: execute request asynchronously :param _return_http_data_only: response data without head status code and headers :param collection_formats: dict of collection formats for path, query, header, and post parameters. :param _preload_content: if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. :param _request_timeout: timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. :return: If async_req parameter is True, the request will be called asynchronously. The method will return the request thread. If parameter async_req is False or missing, then the method will return the response directly. """ if not async_req: return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout) else: thread = self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout)) return thread def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): """Makes the HTTP request using RESTClient.""" if method == "GET": return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "HEAD": return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "OPTIONS": return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "POST": return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PUT": return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PATCH": return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "DELETE": return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ValueError( "http method must be `GET`, `HEAD`, `OPTIONS`," " `POST`, `PATCH`, `PUT` or `DELETE`." ) def parameters_to_tuples(self, params, collection_formats): """Get parameters as list of tuples, formatting collections. :param params: Parameters as dict or list of two-tuples :param dict collection_formats: Parameter collection formats :return: Parameters as list of tuples, collections formatted """ new_params = [] if collection_formats is None: collection_formats = {} for k, v in six.iteritems(params) if isinstance(params, dict) else params: # noqa: E501 if k in collection_formats: collection_format = collection_formats[k] if collection_format == 'multi': new_params.extend((k, value) for value in v) else: if collection_format == 'ssv': delimiter = ' ' elif collection_format == 'tsv': delimiter = '\t' elif collection_format == 'pipes': delimiter = '|' else: # csv is the default delimiter = ',' new_params.append( (k, delimiter.join(str(value) for value in v))) else: new_params.append((k, v)) return new_params def prepare_post_parameters(self, post_params=None, files=None): """Builds form parameters. :param post_params: Normal form parameters. :param files: File parameters. :return: Form parameters with files. """ params = [] if post_params: params = post_params if files: for k, v in six.iteritems(files): if not v: continue file_names = v if type(v) is list else [v] for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = (mimetypes.guess_type(filename)[0] or 'application/octet-stream') params.append( tuple([k, tuple([filename, filedata, mimetype])])) return params def select_header_accept(self, accepts): """Returns `Accept` based on an array of accepts provided. :param accepts: List of headers. :return: Accept (e.g. application/json). """ if not accepts: return accepts = [x.lower() for x in accepts] if 'application/json' in accepts: return 'application/json' else: return ', '.join(accepts) def select_header_content_type(self, content_types): """Returns `Content-Type` based on an array of content_types provided. :param content_types: List of content-types. :return: Content-Type (e.g. application/json). """ if not content_types: return 'application/json' content_types = [x.lower() for x in content_types] if 'application/json' in content_types or '*/*' in content_types: return 'application/json' else: return content_types[0] def update_params_for_auth(self, headers, querys, auth_settings): """Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. """ if not auth_settings: return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: if not auth_setting['value']: continue elif auth_setting['in'] == 'header': headers[auth_setting['key']] = auth_setting['value'] elif auth_setting['in'] == 'query': querys.append((auth_setting['key'], auth_setting['value'])) else: raise ValueError( 'Authentication token must be in `query` or `header`' ) def __deserialize_file(self, response): """Deserializes body to file Saves response body into a file in a temporary folder, using the filename from the `Content-Disposition` header if provided. :param response: RESTResponse. :return: file path. """ fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) content_disposition = response.getheader("Content-Disposition") if content_disposition: filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, "wb") as f: f.write(response.data) return path def __deserialize_primitive(self, data, klass): """Deserializes string to primitive type. :param data: str. :param klass: class literal. :return: int, long, float, str, bool. """ try: return klass(data) except UnicodeEncodeError: return six.text_type(data) except TypeError: return data def __deserialize_object(self, value): """Return a original value. :return: object. """ return value def __deserialize_date(self, string): """Deserializes string to date. :param string: str. :return: date. """ try: from dateutil.parser import parse return parse(string).date() except ImportError: return string except ValueError: raise rest.ApiException( status=0, reason="Failed to parse `{0}` as date object".format(string) ) def __deserialize_datatime(self, string): """Deserializes string to datetime. The string should be in iso8601 datetime format. :param string: str. :return: datetime. """ try: from dateutil.parser import parse return parse(string) except ImportError: return string except ValueError: raise rest.ApiException( status=0, reason=( "Failed to parse `{0}` as datetime object" .format(string) ) ) def __hasattr(self, object, name): return name in object.__class__.__dict__ def __deserialize_model(self, data, klass): """Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object. """ if (not klass.swagger_types and not self.__hasattr(klass, 'get_real_child_model')): return data kwargs = {} if klass.swagger_types is not None: for attr, attr_type in six.iteritems(klass.swagger_types): if (data is not None and klass.attribute_map[attr] in data and isinstance(data, (list, dict))): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if (isinstance(instance, dict) and klass.swagger_types is not None and isinstance(data, dict)): for key, value in data.items(): if key not in klass.swagger_types: instance[key] = value if self.__hasattr(instance, 'get_real_child_model'): klass_name = instance.get_real_child_model(data) if klass_name: instance = self.__deserialize(data, klass_name) return instance
41.180305
166
0.541814
from __future__ import absolute_import import datetime import json import mimetypes from multiprocessing.pool import ThreadPool import os import re import tempfile import six from six.moves.urllib.parse import quote from tb_rest_client.configuration import Configuration import tb_rest_client.models.models_ce import tb_rest_client.models.models_pe from tb_rest_client import rest class ApiClient(object): PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types NATIVE_TYPES_MAPPING = { 'int': int, 'long': int if six.PY3 else long, 'float': float, 'str': str, 'bool': bool, 'date': datetime.date, 'datetime': datetime.datetime, 'object': object, } def __init__(self, configuration=None, header_name=None, header_value=None, cookie=None): if configuration is None: configuration = Configuration() self.configuration = configuration self._pool = None self.rest_client = rest.RESTClientObject(configuration) self.default_headers = {} if header_name is not None: self.default_headers[header_name] = header_value self.cookie = cookie self.user_agent = 'Swagger-Codegen/1.0.0/python' def __del__(self): if self._pool is not None: self._pool.close() self._pool.join() @property def pool(self): if self._pool is None: self._pool = ThreadPool() return self._pool @property def user_agent(self): return self.default_headers['User-Agent'] @user_agent.setter def user_agent(self, value): self.default_headers['User-Agent'] = value def set_default_header(self, header_name, header_value): self.default_headers[header_name] = header_value def __call_api( self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): config = self.configuration header_params = header_params or {} header_params.update(self.default_headers) if self.cookie: header_params['Cookie'] = self.cookie if header_params: header_params = self.sanitize_for_serialization(header_params) header_params = dict(self.parameters_to_tuples(header_params, collection_formats)) if path_params: path_params = self.sanitize_for_serialization(path_params) path_params = self.parameters_to_tuples(path_params, collection_formats) for k, v in path_params: if isinstance(v, dict) and v.get("entityType") is not None and v.get('id') is not None: v = v["id"] if v is not None: resource_path = resource_path.replace( '{%s}' % k, quote(str(v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( '{?%s}' % k, quote(str("?"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( '{?%s,' % k, quote(str("?"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( ',%s' % k, quote(str("&"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( ',?%s' % k, quote(str("&"+k+"="+v) if v is not None else "", safe=config.safe_chars_for_path_param) ).replace( ',%s}' % k, quote("}" + str(v) if v is not None else "", safe=config.safe_chars_for_path_param) ) if query_params: query_params = self.sanitize_for_serialization(query_params) query_params = [param for param in query_params if (isinstance(param, tuple) and len(param) > 1 and param[1] is not None) or not isinstance(param, tuple)] query_params = self.parameters_to_tuples(query_params, collection_formats) if post_params or files: post_params = self.prepare_post_parameters(post_params, files) post_params = [param for param in post_params if (isinstance(param, tuple) and len(param) > 1 and param[1] is not None) or not isinstance(param, tuple)] post_params = self.sanitize_for_serialization(post_params) post_params = self.parameters_to_tuples(post_params, collection_formats) self.update_params_for_auth(header_params, query_params, auth_settings) if body: body = self.sanitize_for_serialization(body) clean_path = self.sanitize_path(resource_path) url = self.configuration.host + clean_path response_data = self.request( method, url, query_params=query_params, headers=header_params, post_params=post_params, body=body, _preload_content=_preload_content, _request_timeout=_request_timeout) self.last_response = response_data return_data = response_data if _preload_content: if response_type: return_data = self.deserialize(response_data, response_type) else: return_data = None if _return_http_data_only: return (return_data) else: return (return_data, response_data.status, response_data.getheaders()) def sanitize_path(self, url): pattern = r'(\{[\?a-zA-Z,]{1,}\})' matching = re.search(pattern, url) if matching is not None and len(matching.groups()) > 0: for match in matching.groups(): clean_url = url.replace(match, "") else: clean_url = url return clean_url def sanitize_for_serialization(self, obj): if obj is None: return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: obj_dict = {obj.attribute_map[attr]: getattr(obj, attr) for attr, _ in six.iteritems(obj.swagger_types) if getattr(obj, attr) is not None} return {key: self.sanitize_for_serialization(val) for key, val in six.iteritems(obj_dict)} def deserialize(self, response, response_type): if response_type == "file": return self.__deserialize_file(response) try: data = json.loads(response.data) except ValueError: data = response.data return self.__deserialize(data, response_type) def __deserialize(self, data, klass): if data is None: return None if klass == "DeferredResultResponseEntity": return self.__deserialize(data, type(data)) self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass] elif klass == list: return_data = [self.__deserialize(sub_data, type(sub_data)) for sub_data in data] return return_data elif klass == dict: return_data = {k: self.__deserialize(v, type(v)) for k, v in six.iteritems(data)} return return_data elif type(klass) == str: if klass.startswith('list['): sub_kls = re.match(r'list\[(.*)\]', klass).group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] if klass.startswith('dict('): sub_kls = re.match(r'dict\(([^,]*), (.*)\)', klass).group(2) return {k: self.__deserialize(v, sub_kls) for k, v in six.iteritems(data)} if klass in self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass] try: found_class = getattr(tb_rest_client.models.models_pe, klass) if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())): klass = found_class else: found_class = getattr(tb_rest_client.models.models_ce, klass) if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())): klass = found_class except AttributeError: found_class = getattr(tb_rest_client.models.models_ce, klass) if all(attr in list(found_class.attribute_map.values()) for attr in list(data.keys())): klass = found_class return self.__deserialize_data(data, klass) def __deserialize_data(self, data, klass): try: if klass in self.PRIMITIVE_TYPES: return self.__deserialize_primitive(data, klass) elif klass == object: return self.__deserialize_object(data) elif klass == datetime.date: return self.__deserialize_date(data) elif klass == datetime.datetime: return self.__deserialize_datatime(data) else: return self.__deserialize_model(data, klass) except Exception as e: return e def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None, post_params=None, files=None, response_type=None, auth_settings=None, async_req=None, _return_http_data_only=None, collection_formats=None, _preload_content=True, _request_timeout=None): if not async_req: return self.__call_api(resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout) else: thread = self.pool.apply_async(self.__call_api, (resource_path, method, path_params, query_params, header_params, body, post_params, files, response_type, auth_settings, _return_http_data_only, collection_formats, _preload_content, _request_timeout)) return thread def request(self, method, url, query_params=None, headers=None, post_params=None, body=None, _preload_content=True, _request_timeout=None): if method == "GET": return self.rest_client.GET(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "HEAD": return self.rest_client.HEAD(url, query_params=query_params, _preload_content=_preload_content, _request_timeout=_request_timeout, headers=headers) elif method == "OPTIONS": return self.rest_client.OPTIONS(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "POST": return self.rest_client.POST(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PUT": return self.rest_client.PUT(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "PATCH": return self.rest_client.PATCH(url, query_params=query_params, headers=headers, post_params=post_params, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) elif method == "DELETE": return self.rest_client.DELETE(url, query_params=query_params, headers=headers, _preload_content=_preload_content, _request_timeout=_request_timeout, body=body) else: raise ValueError( "http method must be `GET`, `HEAD`, `OPTIONS`," " `POST`, `PATCH`, `PUT` or `DELETE`." ) def parameters_to_tuples(self, params, collection_formats): new_params = [] if collection_formats is None: collection_formats = {} for k, v in six.iteritems(params) if isinstance(params, dict) else params: if k in collection_formats: collection_format = collection_formats[k] if collection_format == 'multi': new_params.extend((k, value) for value in v) else: if collection_format == 'ssv': delimiter = ' ' elif collection_format == 'tsv': delimiter = '\t' elif collection_format == 'pipes': delimiter = '|' else: delimiter = ',' new_params.append( (k, delimiter.join(str(value) for value in v))) else: new_params.append((k, v)) return new_params def prepare_post_parameters(self, post_params=None, files=None): params = [] if post_params: params = post_params if files: for k, v in six.iteritems(files): if not v: continue file_names = v if type(v) is list else [v] for n in file_names: with open(n, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = (mimetypes.guess_type(filename)[0] or 'application/octet-stream') params.append( tuple([k, tuple([filename, filedata, mimetype])])) return params def select_header_accept(self, accepts): if not accepts: return accepts = [x.lower() for x in accepts] if 'application/json' in accepts: return 'application/json' else: return ', '.join(accepts) def select_header_content_type(self, content_types): if not content_types: return 'application/json' content_types = [x.lower() for x in content_types] if 'application/json' in content_types or '*/*' in content_types: return 'application/json' else: return content_types[0] def update_params_for_auth(self, headers, querys, auth_settings): if not auth_settings: return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: if not auth_setting['value']: continue elif auth_setting['in'] == 'header': headers[auth_setting['key']] = auth_setting['value'] elif auth_setting['in'] == 'query': querys.append((auth_setting['key'], auth_setting['value'])) else: raise ValueError( 'Authentication token must be in `query` or `header`' ) def __deserialize_file(self, response): fd, path = tempfile.mkstemp(dir=self.configuration.temp_folder_path) os.close(fd) os.remove(path) content_disposition = response.getheader("Content-Disposition") if content_disposition: filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?', content_disposition).group(1) path = os.path.join(os.path.dirname(path), filename) with open(path, "wb") as f: f.write(response.data) return path def __deserialize_primitive(self, data, klass): try: return klass(data) except UnicodeEncodeError: return six.text_type(data) except TypeError: return data def __deserialize_object(self, value): return value def __deserialize_date(self, string): try: from dateutil.parser import parse return parse(string).date() except ImportError: return string except ValueError: raise rest.ApiException( status=0, reason="Failed to parse `{0}` as date object".format(string) ) def __deserialize_datatime(self, string): try: from dateutil.parser import parse return parse(string) except ImportError: return string except ValueError: raise rest.ApiException( status=0, reason=( "Failed to parse `{0}` as datetime object" .format(string) ) ) def __hasattr(self, object, name): return name in object.__class__.__dict__ def __deserialize_model(self, data, klass): if (not klass.swagger_types and not self.__hasattr(klass, 'get_real_child_model')): return data kwargs = {} if klass.swagger_types is not None: for attr, attr_type in six.iteritems(klass.swagger_types): if (data is not None and klass.attribute_map[attr] in data and isinstance(data, (list, dict))): value = data[klass.attribute_map[attr]] kwargs[attr] = self.__deserialize(value, attr_type) instance = klass(**kwargs) if (isinstance(instance, dict) and klass.swagger_types is not None and isinstance(data, dict)): for key, value in data.items(): if key not in klass.swagger_types: instance[key] = value if self.__hasattr(instance, 'get_real_child_model'): klass_name = instance.get_real_child_model(data) if klass_name: instance = self.__deserialize(data, klass_name) return instance
true
true
f71392e2fbf017a0e527288ef371bbfe610ae9e9
2,749
py
Python
python/craftassist/dialogue_objects/put_memory_handler.py
kepolol/craftassist
f60a7edd0b4ea72b774cca45ba468d2e275445c2
[ "MIT" ]
null
null
null
python/craftassist/dialogue_objects/put_memory_handler.py
kepolol/craftassist
f60a7edd0b4ea72b774cca45ba468d2e275445c2
[ "MIT" ]
null
null
null
python/craftassist/dialogue_objects/put_memory_handler.py
kepolol/craftassist
f60a7edd0b4ea72b774cca45ba468d2e275445c2
[ "MIT" ]
1
2020-07-16T02:16:24.000Z
2020-07-16T02:16:24.000Z
""" Copyright (c) Facebook, Inc. and its affiliates. """ import logging from typing import Dict, Tuple, Any, Optional from .dialogue_object import DialogueObject from memory_nodes import ObjectNode, RewardNode from .interpreter_helper import interpret_reference_object, ErrorWithResponse class PutMemoryHandler(DialogueObject): def __init__(self, speaker_name: str, action_dict: Dict, **kwargs): super().__init__(**kwargs) self.provisional: Dict = {} self.speaker_name = speaker_name self.action_dict = action_dict def step(self) -> Tuple[Optional[str], Any]: r = self._step() self.finished = True return r def _step(self) -> Tuple[Optional[str], Any]: assert self.action_dict["dialogue_type"] == "PUT_MEMORY" memory_type = self.action_dict["upsert"]["memory_data"]["memory_type"] if memory_type == "REWARD": return self.handle_reward() elif memory_type == "TRIPLE": return self.handle_triple() else: raise NotImplementedError def handle_reward(self) -> Tuple[Optional[str], Any]: reward_value = self.action_dict["upsert"]["memory_data"]["reward_value"] assert reward_value in ("POSITIVE", "NEGATIVE"), self.action_dict RewardNode.create(self.memory, reward_value) if reward_value == "POSITIVE": return "Thank you!", None else: return "I'll try to do better in the future.", None def handle_triple(self) -> Tuple[Optional[str], Any]: ref_obj_d = self.action_dict["filters"]["reference_object"] r = interpret_reference_object(self, self.speaker_name, ref_obj_d) if len(r) == 0: raise ErrorWithResponse("I don't know what you're referring to") mem = r[0] name = "it" triples = self.memory.get_triples(subj=mem.memid, pred="has_tag") if len(triples) > 0: name = triples[0][2].strip("_") memory_data = self.action_dict["upsert"]["memory_data"] schematic_memid = ( self.memory.convert_block_object_to_schematic(mem.memid).memid if isinstance(mem, ObjectNode) else None ) for k, v in memory_data.items(): if k.startswith("has_"): logging.info("Tagging {} {} {}".format(mem.memid, k, v)) self.memory.add_triple(mem.memid, k, v) if schematic_memid: self.memory.add_triple(schematic_memid, k, v) point_at_target = mem.get_point_at_target() self.agent.send_chat("OK I'm tagging this %r as %r " % (name, v)) self.agent.point_at(list(point_at_target)) return "Done!", None
37.148649
80
0.623863
import logging from typing import Dict, Tuple, Any, Optional from .dialogue_object import DialogueObject from memory_nodes import ObjectNode, RewardNode from .interpreter_helper import interpret_reference_object, ErrorWithResponse class PutMemoryHandler(DialogueObject): def __init__(self, speaker_name: str, action_dict: Dict, **kwargs): super().__init__(**kwargs) self.provisional: Dict = {} self.speaker_name = speaker_name self.action_dict = action_dict def step(self) -> Tuple[Optional[str], Any]: r = self._step() self.finished = True return r def _step(self) -> Tuple[Optional[str], Any]: assert self.action_dict["dialogue_type"] == "PUT_MEMORY" memory_type = self.action_dict["upsert"]["memory_data"]["memory_type"] if memory_type == "REWARD": return self.handle_reward() elif memory_type == "TRIPLE": return self.handle_triple() else: raise NotImplementedError def handle_reward(self) -> Tuple[Optional[str], Any]: reward_value = self.action_dict["upsert"]["memory_data"]["reward_value"] assert reward_value in ("POSITIVE", "NEGATIVE"), self.action_dict RewardNode.create(self.memory, reward_value) if reward_value == "POSITIVE": return "Thank you!", None else: return "I'll try to do better in the future.", None def handle_triple(self) -> Tuple[Optional[str], Any]: ref_obj_d = self.action_dict["filters"]["reference_object"] r = interpret_reference_object(self, self.speaker_name, ref_obj_d) if len(r) == 0: raise ErrorWithResponse("I don't know what you're referring to") mem = r[0] name = "it" triples = self.memory.get_triples(subj=mem.memid, pred="has_tag") if len(triples) > 0: name = triples[0][2].strip("_") memory_data = self.action_dict["upsert"]["memory_data"] schematic_memid = ( self.memory.convert_block_object_to_schematic(mem.memid).memid if isinstance(mem, ObjectNode) else None ) for k, v in memory_data.items(): if k.startswith("has_"): logging.info("Tagging {} {} {}".format(mem.memid, k, v)) self.memory.add_triple(mem.memid, k, v) if schematic_memid: self.memory.add_triple(schematic_memid, k, v) point_at_target = mem.get_point_at_target() self.agent.send_chat("OK I'm tagging this %r as %r " % (name, v)) self.agent.point_at(list(point_at_target)) return "Done!", None
true
true
f71393eb95ae2bfda6e5bcd40e868594b6708b0c
30,920
py
Python
test/functional/test_framework/p2p.py
ngi-nix/namecoin-core-1
c1625db14b15bf93a362916b268b0abf6fabf7b1
[ "MIT" ]
2
2021-06-04T10:39:59.000Z
2021-06-16T00:03:57.000Z
test/functional/test_framework/p2p.py
Penny-Admixture/namecoin-core
c750aba537f6d10a3a565814f53f4999577142ab
[ "MIT" ]
1
2021-09-06T21:01:56.000Z
2021-09-15T21:05:19.000Z
test/functional/test_framework/p2p.py
Penny-Admixture/namecoin-core
c750aba537f6d10a3a565814f53f4999577142ab
[ "MIT" ]
1
2021-08-10T14:03:36.000Z
2021-08-10T14:03:36.000Z
#!/usr/bin/env python3 # Copyright (c) 2010 ArtForz -- public domain half-a-node # Copyright (c) 2012 Jeff Garzik # Copyright (c) 2010-2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test objects for interacting with a bitcoind node over the p2p protocol. The P2PInterface objects interact with the bitcoind nodes under test using the node's p2p interface. They can be used to send messages to the node, and callbacks can be registered that execute when messages are received from the node. Messages are sent to/received from the node on an asyncio event loop. State held inside the objects must be guarded by the p2p_lock to avoid data races between the main testing thread and the event loop. P2PConnection: A low-level connection object to a node's P2P interface P2PInterface: A high-level interface object for communicating to a node over P2P P2PDataStore: A p2p interface class that keeps a store of transactions and blocks and can respond correctly to getdata and getheaders messages P2PTxInvStore: A p2p interface class that inherits from P2PDataStore, and keeps a count of how many times each txid has been announced.""" import asyncio from collections import defaultdict from io import BytesIO import logging import struct import sys import threading from test_framework.messages import ( CBlockHeader, MAX_HEADERS_RESULTS, msg_addr, msg_addrv2, msg_block, MSG_BLOCK, msg_blocktxn, msg_cfcheckpt, msg_cfheaders, msg_cfilter, msg_cmpctblock, msg_feefilter, msg_filteradd, msg_filterclear, msg_filterload, msg_getaddr, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_merkleblock, msg_notfound, msg_ping, msg_pong, msg_sendaddrv2, msg_sendcmpct, msg_sendheaders, msg_tx, MSG_TX, MSG_TYPE_MASK, msg_verack, msg_version, MSG_WTX, msg_wtxidrelay, NODE_NETWORK, NODE_WITNESS, sha256, ) from test_framework.util import ( MAX_NODES, p2p_port, wait_until_helper, ) logger = logging.getLogger("TestFramework.p2p") # The minimum P2P version that this test framework supports MIN_P2P_VERSION_SUPPORTED = 60001 # The P2P version that this test framework implements and sends in its `version` message # Version 110016 supports wtxid relay P2P_VERSION = 110016 # The services that this test framework offers in its `version` message P2P_SERVICES = NODE_NETWORK | NODE_WITNESS # The P2P user agent string that this test framework sends in its `version` message P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" # Value for relay that this test framework sends in its `version` message P2P_VERSION_RELAY = 1 MESSAGEMAP = { b"addr": msg_addr, b"addrv2": msg_addrv2, b"block": msg_block, b"blocktxn": msg_blocktxn, b"cfcheckpt": msg_cfcheckpt, b"cfheaders": msg_cfheaders, b"cfilter": msg_cfilter, b"cmpctblock": msg_cmpctblock, b"feefilter": msg_feefilter, b"filteradd": msg_filteradd, b"filterclear": msg_filterclear, b"filterload": msg_filterload, b"getaddr": msg_getaddr, b"getblocks": msg_getblocks, b"getblocktxn": msg_getblocktxn, b"getdata": msg_getdata, b"getheaders": msg_getheaders, b"headers": msg_headers, b"inv": msg_inv, b"mempool": msg_mempool, b"merkleblock": msg_merkleblock, b"notfound": msg_notfound, b"ping": msg_ping, b"pong": msg_pong, b"sendaddrv2": msg_sendaddrv2, b"sendcmpct": msg_sendcmpct, b"sendheaders": msg_sendheaders, b"tx": msg_tx, b"verack": msg_verack, b"version": msg_version, b"wtxidrelay": msg_wtxidrelay, } MAGIC_BYTES = { "mainnet": b"\xf9\xbe\xb4\xd9", # mainnet "testnet3": b"\x0b\x11\x09\x07", # testnet3 "regtest": b"\xfa\xbf\xb5\xda", # regtest "signet": b"\x0a\x03\xcf\x40", # signet } class P2PConnection(asyncio.Protocol): """A low-level connection object to a node's P2P interface. This class is responsible for: - opening and closing the TCP connection to the node - reading bytes from and writing bytes to the socket - deserializing and serializing the P2P message header - logging messages as they are sent and received This class contains no logic for handing the P2P message payloads. It must be sub-classed and the on_message() callback overridden.""" def __init__(self): # The underlying transport of the connection. # Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe self._transport = None @property def is_connected(self): return self._transport is not None def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor): assert not self.is_connected self.timeout_factor = timeout_factor self.dstaddr = dstaddr self.dstport = dstport # The initial message to send after the connection was made: self.on_connection_send_msg = None self.recvbuf = b"" self.magic_bytes = MAGIC_BYTES[net] def peer_connect(self, dstaddr, dstport, *, net, timeout_factor): self.peer_connect_helper(dstaddr, dstport, net, timeout_factor) loop = NetworkThread.network_event_loop logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport)) coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine) def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor): self.peer_connect_helper('0', 0, net, timeout_factor) logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id)) return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id) def peer_disconnect(self): # Connection could have already been closed by other end. NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort()) # Connection and disconnection methods def connection_made(self, transport): """asyncio callback when a connection is opened.""" assert not self._transport logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) self._transport = transport if self.on_connection_send_msg: self.send_message(self.on_connection_send_msg) self.on_connection_send_msg = None # Never used again self.on_open() def connection_lost(self, exc): """asyncio callback when a connection is closed.""" if exc: logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc)) else: logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport)) self._transport = None self.recvbuf = b"" self.on_close() # Socket read methods def data_received(self, t): """asyncio callback when data is read from the socket.""" if len(t) > 0: self.recvbuf += t self._on_data() def _on_data(self): """Try to read P2P messages from the recv buffer. This method reads data from the buffer in a loop. It deserializes, parses and verifies the P2P header, then passes the P2P payload to the on_message callback for processing.""" try: while True: if len(self.recvbuf) < 4: return if self.recvbuf[:4] != self.magic_bytes: raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] checksum = self.recvbuf[4+12+4:4+12+4+4] if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: return msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] th = sha256(msg) h = sha256(th) if checksum != h[:4]: raise ValueError("got bad checksum " + repr(self.recvbuf)) self.recvbuf = self.recvbuf[4+12+4+4+msglen:] if msgtype not in MESSAGEMAP: raise ValueError("Received unknown msgtype from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, msgtype, repr(msg))) f = BytesIO(msg) t = MESSAGEMAP[msgtype]() t.deserialize(f) self._log_message("receive", t) self.on_message(t) except Exception as e: logger.exception('Error reading message:', repr(e)) raise def on_message(self, message): """Callback for processing a P2P payload. Must be overridden by derived class.""" raise NotImplementedError # Socket write methods def send_message(self, message): """Send a P2P message over the socket. This method takes a P2P payload, builds the P2P header and adds the message to the send buffer to be sent over the socket.""" tmsg = self.build_message(message) self._log_message("send", message) return self.send_raw_message(tmsg) def send_raw_message(self, raw_message_bytes): if not self.is_connected: raise IOError('Not connected') def maybe_write(): if not self._transport: return if self._transport.is_closing(): return self._transport.write(raw_message_bytes) NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write) # Class utility methods def build_message(self, message): """Build a serialized P2P message""" msgtype = message.msgtype data = message.serialize() tmsg = self.magic_bytes tmsg += msgtype tmsg += b"\x00" * (12 - len(msgtype)) tmsg += struct.pack("<I", len(data)) th = sha256(data) h = sha256(th) tmsg += h[:4] tmsg += data return tmsg def _log_message(self, direction, msg): """Logs a message being sent or received over the connection.""" if direction == "send": log_message = "Send message to " elif direction == "receive": log_message = "Received message from " log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500]) if len(log_message) > 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): """A high-level P2P interface class for communicating with a Bitcoin node. This class provides high-level callbacks for processing P2P message payloads, as well as convenience methods for interacting with the node over P2P. Individual testcases should subclass this and override the on_* methods if they want to alter message handling behaviour.""" def __init__(self, support_addrv2=False, wtxidrelay=True): super().__init__() # Track number of messages of each type received. # Should be read-only in a test. self.message_count = defaultdict(int) # Track the most recent message of each type. # To wait for a message to be received, pop that message from # this and use self.wait_until. self.last_message = {} # A count of the number of ping messages we've sent to the node self.ping_counter = 1 # The network services received from the peer self.nServices = 0 self.support_addrv2 = support_addrv2 # If the peer supports wtxid-relay self.wtxidrelay = wtxidrelay def peer_connect_send_version(self, services): # Send a version msg vt = msg_version() vt.nVersion = P2P_VERSION vt.strSubVer = P2P_SUBVERSION vt.relay = P2P_VERSION_RELAY vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.on_connection_send_msg = vt # Will be sent in connection_made callback def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: self.peer_connect_send_version(services) return create_conn def peer_accept_connection(self, *args, services=NODE_NETWORK | NODE_WITNESS, **kwargs): create_conn = super().peer_accept_connection(*args, **kwargs) self.peer_connect_send_version(services) return create_conn # Message receiving methods def on_message(self, message): """Receive message and dispatch message to appropriate callback. We keep a count of how many of each message type has been received and the most recent message of each type.""" with p2p_lock: try: msgtype = message.msgtype.decode('ascii') self.message_count[msgtype] += 1 self.last_message[msgtype] = message getattr(self, 'on_' + msgtype)(message) except: print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_addrv2(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cfcheckpt(self, message): pass def on_cfheaders(self, message): pass def on_cfilter(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_filteradd(self, message): pass def on_filterclear(self, message): pass def on_filterload(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_merkleblock(self, message): pass def on_notfound(self, message): pass def on_pong(self, message): pass def on_sendaddrv2(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_wtxidrelay(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): pass def on_version(self, message): assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED) if message.nVersion >= 110016 and self.wtxidrelay: self.send_message(msg_wtxidrelay()) if self.support_addrv2: self.send_message(msg_sendaddrv2()) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods def wait_until(self, test_function_in, *, timeout=60, check_connected=True): def test_function(): if check_connected: assert self.is_connected return test_function_in() wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) def wait_for_connect(self, timeout=60): test_function = lambda: self.is_connected wait_until_helper(test_function, timeout=timeout, lock=p2p_lock) def wait_for_disconnect(self, timeout=60): test_function = lambda: not self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid self.wait_until(test_function, timeout=timeout) def wait_for_block(self, blockhash, timeout=60): def test_function(): return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): last_headers = self.last_message.get('headers') if not last_headers: return False return last_headers.headers[0].rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_getdata(self, hash_list, timeout=60): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" def test_function(): last_data = self.last_message.get("getdata") if not last_data: return False return [x.hash for x in last_data.inv] == hash_list self.wait_until(test_function, timeout=timeout) def wait_for_getheaders(self, timeout=60): """Waits for a getheaders message. Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"] value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" def test_function(): return self.last_message.get("getheaders") self.wait_until(test_function, timeout=timeout) def wait_for_inv(self, expected_inv, timeout=60): """Waits for an INV message and checks that the first inv object in the message was as expected.""" if len(expected_inv) > 1: raise NotImplementedError("wait_for_inv() will only verify the first inv object") def test_function(): return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash self.wait_until(test_function, timeout=timeout) def wait_for_verack(self, timeout=60): def test_function(): return "verack" in self.last_message self.wait_until(test_function, timeout=timeout) # Message sending helper functions def send_and_ping(self, message, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) def sync_send_with_ping(self, timeout=60): """Ensure SendMessages is called on this connection""" # Calling sync_with_ping twice requires that the node calls # `ProcessMessage` twice, and thus ensures `SendMessages` must have # been called at least once self.sync_with_ping() self.sync_with_ping() def sync_with_ping(self, timeout=60): """Ensure ProcessMessages is called on this connection""" self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter self.wait_until(test_function, timeout=timeout) self.ping_counter += 1 # One lock for synchronizing all data access between the network event loop (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. p2p_lock = threading.Lock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be created assert not self.network_event_loop NetworkThread.listeners = {} NetworkThread.protos = {} NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): """Start the network thread.""" self.network_event_loop.run_forever() def close(self, timeout=10): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. NetworkThread.network_event_loop = None @classmethod def listen(cls, p2p, callback, port=None, addr=None, idx=1): """ Ensure a listening server is running on the given port, and run the protocol specified by `p2p` on the next connection to it. Once ready for connections, call `callback`.""" if port is None: assert 0 < idx <= MAX_NODES port = p2p_port(MAX_NODES - idx) if addr is None: addr = '127.0.0.1' coroutine = cls.create_listen_server(addr, port, callback, p2p) cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine) @classmethod async def create_listen_server(cls, addr, port, callback, proto): def peer_protocol(): """Returns a function that does the protocol handling for a new connection. To allow different connections to have different behaviors, the protocol function is first put in the cls.protos dict. When the connection is made, the function removes the protocol function from that dict, and returns it so the event loop can start executing it.""" response = cls.protos.get((addr, port)) cls.protos[(addr, port)] = None return response if (addr, port) not in cls.listeners: # When creating a listener on a given (addr, port) we only need to # do it once. If we want different behaviors for different # connections, we can accomplish this by providing different # `proto` functions listener = await cls.network_event_loop.create_server(peer_protocol, addr, port) logger.debug("Listening server on %s:%d should be started" % (addr, port)) cls.listeners[(addr, port)] = listener cls.protos[(addr, port)] = proto callback(addr, port) class P2PDataStore(P2PInterface): """A P2P data store class. Keeps a block and transaction store and responds correctly to getdata and getheaders requests.""" def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} self.last_block_hash = '' # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): """Check for the tx/block in our stores and if found, reply with an inv message.""" for inv in message.inv: self.getdata_requests.append(inv.hash) if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: logger.debug('getdata message type {} received.'.format(hex(inv.type))) def on_getheaders(self, message): """Search back through our block store for the locator, and reply with a headers message if found.""" locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash))) break # Truncate the list if there are too many headers headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response) def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): """Send blocks to test node and test whether the tip advances. - add all blocks to our block_store - send a headers message for the final block - the on_getheaders handler will ensure that any getheaders are responded to - if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will ensure that any getdata messages are responded to. Otherwise send the full block unsolicited. - if success is True: assert that the node's tip advances to the most recent block - if success is False: assert that the node's tip doesn't advance - if reject_reason is set: assert that the correct reject message is logged""" with p2p_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) self.wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, check_connected=success, ) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with p2p_lock: for tx in txs: self.tx_store[tx.sha256] = tx reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash) else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash) class P2PTxInvStore(P2PInterface): """A P2PInterface which stores a count of how many times each txid has been announced.""" def __init__(self): super().__init__() self.tx_invs_received = defaultdict(int) def on_inv(self, message): super().on_inv(message) # Send getdata in response. # Store how many times invs have been received for each tx. for i in message.inv: if (i.type == MSG_TX) or (i.type == MSG_WTX): # save txid self.tx_invs_received[i.hash] += 1 def get_invs(self): with p2p_lock: return list(self.tx_invs_received.keys()) def wait_for_broadcast(self, txns, timeout=60): """Waits for the txns (list of txids) to complete initial broadcast. The mempool should mark unbroadcast=False for these transactions. """ # Wait until invs have been received (and getdatas sent) for each txid. self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout) # Flush messages and wait for the getdatas to be processed self.sync_with_ping()
39.08976
190
0.649968
import asyncio from collections import defaultdict from io import BytesIO import logging import struct import sys import threading from test_framework.messages import ( CBlockHeader, MAX_HEADERS_RESULTS, msg_addr, msg_addrv2, msg_block, MSG_BLOCK, msg_blocktxn, msg_cfcheckpt, msg_cfheaders, msg_cfilter, msg_cmpctblock, msg_feefilter, msg_filteradd, msg_filterclear, msg_filterload, msg_getaddr, msg_getblocks, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_mempool, msg_merkleblock, msg_notfound, msg_ping, msg_pong, msg_sendaddrv2, msg_sendcmpct, msg_sendheaders, msg_tx, MSG_TX, MSG_TYPE_MASK, msg_verack, msg_version, MSG_WTX, msg_wtxidrelay, NODE_NETWORK, NODE_WITNESS, sha256, ) from test_framework.util import ( MAX_NODES, p2p_port, wait_until_helper, ) logger = logging.getLogger("TestFramework.p2p") MIN_P2P_VERSION_SUPPORTED = 60001 P2P_VERSION = 110016 P2P_SERVICES = NODE_NETWORK | NODE_WITNESS P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" P2P_VERSION_RELAY = 1 MESSAGEMAP = { b"addr": msg_addr, b"addrv2": msg_addrv2, b"block": msg_block, b"blocktxn": msg_blocktxn, b"cfcheckpt": msg_cfcheckpt, b"cfheaders": msg_cfheaders, b"cfilter": msg_cfilter, b"cmpctblock": msg_cmpctblock, b"feefilter": msg_feefilter, b"filteradd": msg_filteradd, b"filterclear": msg_filterclear, b"filterload": msg_filterload, b"getaddr": msg_getaddr, b"getblocks": msg_getblocks, b"getblocktxn": msg_getblocktxn, b"getdata": msg_getdata, b"getheaders": msg_getheaders, b"headers": msg_headers, b"inv": msg_inv, b"mempool": msg_mempool, b"merkleblock": msg_merkleblock, b"notfound": msg_notfound, b"ping": msg_ping, b"pong": msg_pong, b"sendaddrv2": msg_sendaddrv2, b"sendcmpct": msg_sendcmpct, b"sendheaders": msg_sendheaders, b"tx": msg_tx, b"verack": msg_verack, b"version": msg_version, b"wtxidrelay": msg_wtxidrelay, } MAGIC_BYTES = { "mainnet": b"\xf9\xbe\xb4\xd9", "testnet3": b"\x0b\x11\x09\x07", "regtest": b"\xfa\xbf\xb5\xda", "signet": b"\x0a\x03\xcf\x40", } class P2PConnection(asyncio.Protocol): def __init__(self): self._transport = None @property def is_connected(self): return self._transport is not None def peer_connect_helper(self, dstaddr, dstport, net, timeout_factor): assert not self.is_connected self.timeout_factor = timeout_factor self.dstaddr = dstaddr self.dstport = dstport self.on_connection_send_msg = None self.recvbuf = b"" self.magic_bytes = MAGIC_BYTES[net] def peer_connect(self, dstaddr, dstport, *, net, timeout_factor): self.peer_connect_helper(dstaddr, dstport, net, timeout_factor) loop = NetworkThread.network_event_loop logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport)) coroutine = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport) return lambda: loop.call_soon_threadsafe(loop.create_task, coroutine) def peer_accept_connection(self, connect_id, connect_cb=lambda: None, *, net, timeout_factor): self.peer_connect_helper('0', 0, net, timeout_factor) logger.debug('Listening for Bitcoin Node with id: {}'.format(connect_id)) return lambda: NetworkThread.listen(self, connect_cb, idx=connect_id) def peer_disconnect(self): NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort()) def connection_made(self, transport): assert not self._transport logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport)) self._transport = transport if self.on_connection_send_msg: self.send_message(self.on_connection_send_msg) self.on_connection_send_msg = None self.on_open() def connection_lost(self, exc): if exc: logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc)) else: logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport)) self._transport = None self.recvbuf = b"" self.on_close() def data_received(self, t): if len(t) > 0: self.recvbuf += t self._on_data() def _on_data(self): try: while True: if len(self.recvbuf) < 4: return if self.recvbuf[:4] != self.magic_bytes: raise ValueError("magic bytes mismatch: {} != {}".format(repr(self.magic_bytes), repr(self.recvbuf))) if len(self.recvbuf) < 4 + 12 + 4 + 4: return msgtype = self.recvbuf[4:4+12].split(b"\x00", 1)[0] msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0] checksum = self.recvbuf[4+12+4:4+12+4+4] if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen: return msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen] th = sha256(msg) h = sha256(th) if checksum != h[:4]: raise ValueError("got bad checksum " + repr(self.recvbuf)) self.recvbuf = self.recvbuf[4+12+4+4+msglen:] if msgtype not in MESSAGEMAP: raise ValueError("Received unknown msgtype from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, msgtype, repr(msg))) f = BytesIO(msg) t = MESSAGEMAP[msgtype]() t.deserialize(f) self._log_message("receive", t) self.on_message(t) except Exception as e: logger.exception('Error reading message:', repr(e)) raise def on_message(self, message): raise NotImplementedError def send_message(self, message): tmsg = self.build_message(message) self._log_message("send", message) return self.send_raw_message(tmsg) def send_raw_message(self, raw_message_bytes): if not self.is_connected: raise IOError('Not connected') def maybe_write(): if not self._transport: return if self._transport.is_closing(): return self._transport.write(raw_message_bytes) NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write) def build_message(self, message): msgtype = message.msgtype data = message.serialize() tmsg = self.magic_bytes tmsg += msgtype tmsg += b"\x00" * (12 - len(msgtype)) tmsg += struct.pack("<I", len(data)) th = sha256(data) h = sha256(th) tmsg += h[:4] tmsg += data return tmsg def _log_message(self, direction, msg): if direction == "send": log_message = "Send message to " elif direction == "receive": log_message = "Received message from " log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500]) if len(log_message) > 500: log_message += "... (msg truncated)" logger.debug(log_message) class P2PInterface(P2PConnection): def __init__(self, support_addrv2=False, wtxidrelay=True): super().__init__() self.message_count = defaultdict(int) self.last_message = {} self.ping_counter = 1 # The network services received from the peer self.nServices = 0 self.support_addrv2 = support_addrv2 # If the peer supports wtxid-relay self.wtxidrelay = wtxidrelay def peer_connect_send_version(self, services): # Send a version msg vt = msg_version() vt.nVersion = P2P_VERSION vt.strSubVer = P2P_SUBVERSION vt.relay = P2P_VERSION_RELAY vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport vt.addrFrom.ip = "0.0.0.0" vt.addrFrom.port = 0 self.on_connection_send_msg = vt # Will be sent in connection_made callback def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: self.peer_connect_send_version(services) return create_conn def peer_accept_connection(self, *args, services=NODE_NETWORK | NODE_WITNESS, **kwargs): create_conn = super().peer_accept_connection(*args, **kwargs) self.peer_connect_send_version(services) return create_conn # Message receiving methods def on_message(self, message): with p2p_lock: try: msgtype = message.msgtype.decode('ascii') self.message_count[msgtype] += 1 self.last_message[msgtype] = message getattr(self, 'on_' + msgtype)(message) except: print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0])) raise # Callback methods. Can be overridden by subclasses in individual test # cases to provide custom message handling behaviour. def on_open(self): pass def on_close(self): pass def on_addr(self, message): pass def on_addrv2(self, message): pass def on_block(self, message): pass def on_blocktxn(self, message): pass def on_cfcheckpt(self, message): pass def on_cfheaders(self, message): pass def on_cfilter(self, message): pass def on_cmpctblock(self, message): pass def on_feefilter(self, message): pass def on_filteradd(self, message): pass def on_filterclear(self, message): pass def on_filterload(self, message): pass def on_getaddr(self, message): pass def on_getblocks(self, message): pass def on_getblocktxn(self, message): pass def on_getdata(self, message): pass def on_getheaders(self, message): pass def on_headers(self, message): pass def on_mempool(self, message): pass def on_merkleblock(self, message): pass def on_notfound(self, message): pass def on_pong(self, message): pass def on_sendaddrv2(self, message): pass def on_sendcmpct(self, message): pass def on_sendheaders(self, message): pass def on_tx(self, message): pass def on_wtxidrelay(self, message): pass def on_inv(self, message): want = msg_getdata() for i in message.inv: if i.type != 0: want.inv.append(i) if len(want.inv): self.send_message(want) def on_ping(self, message): self.send_message(msg_pong(message.nonce)) def on_verack(self, message): pass def on_version(self, message): assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED) if message.nVersion >= 110016 and self.wtxidrelay: self.send_message(msg_wtxidrelay()) if self.support_addrv2: self.send_message(msg_sendaddrv2()) self.send_message(msg_verack()) self.nServices = message.nServices # Connection helper methods def wait_until(self, test_function_in, *, timeout=60, check_connected=True): def test_function(): if check_connected: assert self.is_connected return test_function_in() wait_until_helper(test_function, timeout=timeout, lock=p2p_lock, timeout_factor=self.timeout_factor) def wait_for_connect(self, timeout=60): test_function = lambda: self.is_connected wait_until_helper(test_function, timeout=timeout, lock=p2p_lock) def wait_for_disconnect(self, timeout=60): test_function = lambda: not self.is_connected self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid self.wait_until(test_function, timeout=timeout) def wait_for_block(self, blockhash, timeout=60): def test_function(): return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): last_headers = self.last_message.get('headers') if not last_headers: return False return last_headers.headers[0].rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False return last_filtered_block.merkleblock.header.rehash() == int(blockhash, 16) self.wait_until(test_function, timeout=timeout) def wait_for_getdata(self, hash_list, timeout=60): def test_function(): last_data = self.last_message.get("getdata") if not last_data: return False return [x.hash for x in last_data.inv] == hash_list self.wait_until(test_function, timeout=timeout) def wait_for_getheaders(self, timeout=60): def test_function(): return self.last_message.get("getheaders") self.wait_until(test_function, timeout=timeout) def wait_for_inv(self, expected_inv, timeout=60): if len(expected_inv) > 1: raise NotImplementedError("wait_for_inv() will only verify the first inv object") def test_function(): return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash self.wait_until(test_function, timeout=timeout) def wait_for_verack(self, timeout=60): def test_function(): return "verack" in self.last_message self.wait_until(test_function, timeout=timeout) # Message sending helper functions def send_and_ping(self, message, timeout=60): self.send_message(message) self.sync_with_ping(timeout=timeout) def sync_send_with_ping(self, timeout=60): # Calling sync_with_ping twice requires that the node calls # `ProcessMessage` twice, and thus ensures `SendMessages` must have # been called at least once self.sync_with_ping() self.sync_with_ping() def sync_with_ping(self, timeout=60): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter self.wait_until(test_function, timeout=timeout) self.ping_counter += 1 # One lock for synchronizing all data access between the network event loop (see # NetworkThread below) and the thread running the test logic. For simplicity, # P2PConnection acquires this lock whenever delivering a message to a P2PInterface. # This lock should be acquired in the thread running the test logic to synchronize # access to any data shared with the P2PInterface or P2PConnection. p2p_lock = threading.Lock() class NetworkThread(threading.Thread): network_event_loop = None def __init__(self): super().__init__(name="NetworkThread") # There is only one event loop and no more than one thread must be created assert not self.network_event_loop NetworkThread.listeners = {} NetworkThread.protos = {} NetworkThread.network_event_loop = asyncio.new_event_loop() def run(self): self.network_event_loop.run_forever() def close(self, timeout=10): self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) wait_until_helper(lambda: not self.network_event_loop.is_running(), timeout=timeout) self.network_event_loop.close() self.join(timeout) # Safe to remove event loop. NetworkThread.network_event_loop = None @classmethod def listen(cls, p2p, callback, port=None, addr=None, idx=1): if port is None: assert 0 < idx <= MAX_NODES port = p2p_port(MAX_NODES - idx) if addr is None: addr = '127.0.0.1' coroutine = cls.create_listen_server(addr, port, callback, p2p) cls.network_event_loop.call_soon_threadsafe(cls.network_event_loop.create_task, coroutine) @classmethod async def create_listen_server(cls, addr, port, callback, proto): def peer_protocol(): response = cls.protos.get((addr, port)) cls.protos[(addr, port)] = None return response if (addr, port) not in cls.listeners: # When creating a listener on a given (addr, port) we only need to # do it once. If we want different behaviors for different # connections, we can accomplish this by providing different # `proto` functions listener = await cls.network_event_loop.create_server(peer_protocol, addr, port) logger.debug("Listening server on %s:%d should be started" % (addr, port)) cls.listeners[(addr, port)] = listener cls.protos[(addr, port)] = proto callback(addr, port) class P2PDataStore(P2PInterface): def __init__(self): super().__init__() # store of blocks. key is block hash, value is a CBlock object self.block_store = {} self.last_block_hash = '' # store of txs. key is txid, value is a CTransaction object self.tx_store = {} self.getdata_requests = [] def on_getdata(self, message): for inv in message.inv: self.getdata_requests.append(inv.hash) if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys(): self.send_message(msg_tx(self.tx_store[inv.hash])) elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys(): self.send_message(msg_block(self.block_store[inv.hash])) else: logger.debug('getdata message type {} received.'.format(hex(inv.type))) def on_getheaders(self, message): locator, hash_stop = message.locator, message.hashstop # Assume that the most recent block added is the tip if not self.block_store: return headers_list = [self.block_store[self.last_block_hash]] while headers_list[-1].sha256 not in locator.vHave: # Walk back through the block store, adding headers to headers_list # as we go. prev_block_hash = headers_list[-1].hashPrevBlock if prev_block_hash in self.block_store: prev_block_header = CBlockHeader(self.block_store[prev_block_hash]) headers_list.append(prev_block_header) if prev_block_header.sha256 == hash_stop: # if this is the hashstop header, stop here break else: logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash))) break # Truncate the list if there are too many headers headers_list = headers_list[:-MAX_HEADERS_RESULTS - 1:-1] response = msg_headers(headers_list) if response is not None: self.send_message(response) def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60): with p2p_lock: for block in blocks: self.block_store[block.sha256] = block self.last_block_hash = block.sha256 reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): if force_send: for b in blocks: self.send_message(msg_block(block=b)) else: self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) self.wait_until( lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, check_connected=success, ) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) else: self.sync_with_ping(timeout=timeout) if success: self.wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout) else: assert node.getbestblockhash() != blocks[-1].hash def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): with p2p_lock: for tx in txs: self.tx_store[tx.sha256] = tx reject_reason = [reject_reason] if reject_reason else [] with node.assert_debug_log(expected_msgs=reject_reason): for tx in txs: self.send_message(msg_tx(tx)) if expect_disconnect: self.wait_for_disconnect() else: self.sync_with_ping() raw_mempool = node.getrawmempool() if success: # Check that all txs are now in the mempool for tx in txs: assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash) else: # Check that none of the txs are now in the mempool for tx in txs: assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash) class P2PTxInvStore(P2PInterface): def __init__(self): super().__init__() self.tx_invs_received = defaultdict(int) def on_inv(self, message): super().on_inv(message) # Send getdata in response. # Store how many times invs have been received for each tx. for i in message.inv: if (i.type == MSG_TX) or (i.type == MSG_WTX): # save txid self.tx_invs_received[i.hash] += 1 def get_invs(self): with p2p_lock: return list(self.tx_invs_received.keys()) def wait_for_broadcast(self, txns, timeout=60): # Wait until invs have been received (and getdatas sent) for each txid. self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout) # Flush messages and wait for the getdatas to be processed self.sync_with_ping()
true
true
f713940d48a4cf34f076e6a66255232416836474
16,010
py
Python
game_agent.py
hussaial/Isolation-search-agent
754b57905c5a309bbfbec61cfba3478682e3fcd1
[ "MIT" ]
null
null
null
game_agent.py
hussaial/Isolation-search-agent
754b57905c5a309bbfbec61cfba3478682e3fcd1
[ "MIT" ]
null
null
null
game_agent.py
hussaial/Isolation-search-agent
754b57905c5a309bbfbec61cfba3478682e3fcd1
[ "MIT" ]
null
null
null
"""This file contains all the classes you must complete for this project. You can use the test cases in agent_test.py to help during development, and augment the test suite with your own test cases to further test your code. You must test your agent's strength against a set of agents with known relative strength using tournament.py and include the results in your report. """ import random import sys import math class Timeout(Exception): """Subclass base exception for code clarity.""" pass def custom_score(game, player): """Calculate the heuristic value of a game state from the point of view of the given player. Note: this function should be called from within a Player instance as `self.score()` -- you should not need to call this function directly. Parameters ---------- game : `isolation.Board` An instance of `isolation.Board` encoding the current state of the game (e.g., player locations and blocked cells). player : object A player instance in the current game (i.e., an object corresponding to one of the player objects `game.__player_1__` or `game.__player_2__`.) Returns ------- float The heuristic value of the current game state to the specified player. """ # TODO: finish this function! if game.is_loser(player): return float("-inf") if game.is_winner(player): return float("inf") ''' identify the corners of the board and avoid these topright_corner = (game.height-1,game.width-1) bottomright-corner = (0,game.width-1) bottomleft_corner = (0,0) topleft_corner = (game.height-1,0) ''' # heuristic function a variation of #my-moves-#opp_moves #go down four levels further #print('height :',game.height) #print('width: ',game.width) own_score = 0.0 opp_score = 0.0 own_moves = game.get_legal_moves(player) opp_moves = game.get_legal_moves(game.get_opponent(player)) percent_board_unoccupied = (len(game.get_blank_spaces())/(game.height*game.width))*100 #own_move_coverage = (len(game.get_legal_moves(player))/len(game.get_blank_spaces()))*100 #opp_move_coverage = (len(game.get_opponent(player))/len(game.get_blank_spaces()))*100 #common_moves = list(set(own_moves).intersection(opp_moves)) #union_moves = list(set(own_moves).union(opp_moves)) #opp_diff_moves = len(opp_moves)-len(common_moves) #own_diff_moves = len(own_moves)-len(common_moves) #corners= [(game.height-1,game.width-1),(0,game.width-1),(0,0),(game.height-1,0)] walls = [ [(0,i) for i in range(game.width)],[(i,0) for i in range(game.height)],[(game.height-1,i) for i in range(game.width)],[(i,game.width-1) for i in range(game.height)] ] #============================================================================== # for move in game.get_legal_moves(player): # if move in corners: # own_score -= 10 # else: # own_score += 1 # return own_score #============================================================================== centers = [(i,j) for i in range(math.floor(game.width/2)-1,math.floor(game.width/2)+1) for j in range(math.floor(game.height/2)-1,math.floor(game.height/2)+1)] #print(center) #============================================================================== # for move in own_moves: # if move in centers and percent_board_unoccupied<25: # own_score += 30 # elif move in centers and percent_board_unoccupied>=25 and percent_board_unoccupied<50: # own_score +=20 # elif move in centers and percent_board_unoccupied>=50 and percent_board_unoccupied<75: # own_score +=10 # elif move in centers and percent_board_unoccupied>=75: # own_score +=5 # return own_score #============================================================================== #============================================================================== # for move in opp_moves: # if move in centers and percent_board_unoccupied<25: # opp_score += 30 # elif move in centers and percent_board_unoccupied>=25 and percent_board_unoccupied<50: # opp_score +=20 # elif move in centers and percent_board_unoccupied>=50 and percent_board_unoccupied<75: # opp_score +=10 # elif move in centers and percent_board_unoccupied>=75: # opp_score +=5 # return opp_score #============================================================================== for move in own_moves: for wall in walls: if move in wall and percent_board_unoccupied<25: own_score -=5 #30 elif move in wall and (percent_board_unoccupied>=25 and percent_board_unoccupied<50): own_score -=10 #20 elif move in wall and (percent_board_unoccupied>=50 and percent_board_unoccupied<75): own_score -=20 #10 elif move in wall and percent_board_unoccupied>=75: own_score -=30 #5 elif move in centers: own_score +=20 else: own_score += 1 return own_score for move in opp_moves: for wall in walls: if move in wall and percent_board_unoccupied<25: opp_score -=30 elif move in wall and (percent_board_unoccupied>=25 and percent_board_unoccupied<50): opp_score -=20 elif move in wall and (percent_board_unoccupied>=50 and percent_board_unoccupied<75): opp_score -=10 elif move in wall and percent_board_unoccupied>=75: opp_score -=5 #91.43 elif move in centers: opp_score +=20 else: opp_score += 1 return opp_score return float((own_score)-(2*opp_score)) #float((own_score)-(2*len(opp_moves))) class CustomPlayer: """Game-playing agent that chooses a move using your evaluation function and a depth-limited minimax algorithm with alpha-beta pruning. You must finish and test this player to make sure it properly uses minimax and alpha-beta to return a good move before the search time limit expires. Parameters ---------- search_depth : int (optional) A strictly positive integer (i.e., 1, 2, 3,...) for the number of layers in the game tree to explore for fixed-depth search. (i.e., a depth of one (1) would only explore the immediate sucessors of the current state.) score_fn : callable (optional) A function to use for heuristic evaluation of game states. iterative : boolean (optional) Flag indicating whether to perform fixed-depth search (False) or iterative deepening search (True). method : {'minimax', 'alphabeta'} (optional) The name of the search method to use in get_move(). timeout : float (optional) Time remaining (in milliseconds) when search is aborted. Should be a positive value large enough to allow the function to return before the timer expires. """ def __init__(self, search_depth=3, score_fn=custom_score, iterative=True, method='minimax', timeout=10.): self.search_depth = search_depth self.iterative = iterative self.score = score_fn self.method = method self.time_left = None self.TIMER_THRESHOLD = timeout def get_move(self, game, legal_moves, time_left): """Search for the best move from the available legal moves and return a result before the time limit expires. This function must perform iterative deepening if self.iterative=True, and it must use the search method (minimax or alphabeta) corresponding to the self.method value. ********************************************************************** NOTE: If time_left < 0 when this function returns, the agent will forfeit the game due to timeout. You must return _before_ the timer reaches 0. ********************************************************************** Parameters ---------- game : `isolation.Board` An instance of `isolation.Board` encoding the current state of the game (e.g., player locations and blocked cells). legal_moves : list<(int, int)> A list containing legal moves. Moves are encoded as tuples of pairs of ints defining the next (row, col) for the agent to occupy. time_left : callable A function that returns the number of milliseconds left in the current turn. Returning with any less than 0 ms remaining forfeits the game. Returns ------- (int, int) Board coordinates corresponding to a legal move; may return (-1, -1) if there are no available legal moves. """ self.time_left = time_left # TODO: finish this function! # Perform any required initializations, including selecting an initial # move from the game board (i.e., an opening book), or returning # immediately if there are no legal moves try: # The search method call (alpha beta or minimax) should happen in # here in order to avoid timeout. The try/except block will # automatically catch the exception raised by the search method # when the timer gets close to expiring # Perform Iterative Deepening with depth d if not legal_moves: return (-1, -1) best_move = legal_moves[0] #have to start somewhere #most_depth = 0 if self.iterative: for d in range(0,sys.maxsize): #IDS goes from 0 to inf if self.method == 'minimax': _,best_move = self.minimax(game, d) else: _,best_move = self.alphabeta(game, d) #most_depth=d #print('board:', game.to_string()) return best_move except Timeout: # Handle any actions required at timeout, if necessary # In case of timeout return the last timeout return best_move # Return the best move from the last completed search iteration #raise NotImplementedError def minimax(self, game, depth, maximizing_player=True): """Implement the minimax search algorithm as described in the lectures. Parameters ---------- game : isolation.Board An instance of the Isolation game `Board` class representing the current game state depth : int Depth is an integer representing the maximum number of plies to search in the game tree before aborting maximizing_player : bool Flag indicating whether the current search depth corresponds to a maximizing layer (True) or a minimizing layer (False) Returns ------- float The score for the current search branch tuple(int, int) The best move for the current branch; (-1, -1) for no legal moves Notes ----- (1) You MUST use the `self.score()` method for board evaluation to pass the project unit tests; you cannot call any other evaluation function directly. """ if self.time_left() < self.TIMER_THRESHOLD: raise Timeout() # TODO: finish this function! if depth == 0: return self.score(game,self), game.get_player_location(self) else: if maximizing_player: best_score = float('-inf') for m in game.get_legal_moves(): score, move = self.minimax(game.forecast_move(m), depth - 1, False) if score > best_score: best_score = score best_move = m #print('score and move:',best_score,best_move) return best_score, best_move else: #minimizing player best_score = float('inf') for m in game.get_legal_moves(game.get_opponent(self)): score, move = self.minimax(game.forecast_move(m), depth - 1, True) if score < best_score: best_score = score best_move = m #print('score and move:',best_score,best_move) return best_score, best_move def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf"), maximizing_player=True): """Implement minimax search with alpha-beta pruning as described in the lectures. Parameters ---------- game : isolation.Board An instance of the Isolation game `Board` class representing the current game state depth : int Depth is an integer representing the maximum number of plies to search in the game tree before aborting alpha : float Alpha limits the lower bound of search on minimizing layers beta : float Beta limits the upper bound of search on maximizing layers maximizing_player : bool Flag indicating whether the current search depth corresponds to a maximizing layer (True) or a minimizing layer (False) Returns ------- float The score for the current search branch tuple(int, int) The best move for the current branch; (-1, -1) for no legal moves Notes ----- (1) You MUST use the `self.score()` method for board evaluation to pass the project unit tests; you cannot call any other evaluation function directly. """ if self.time_left() < self.TIMER_THRESHOLD: raise Timeout() # TODO: finish this function! #legal_moves=game.get_legal_moves() best_move=(3,3) #occupy the middle square if depth == 0: return self.score(game,self), game.get_player_location(self) else: if maximizing_player: best_score = float('-inf') for m in game.get_legal_moves(): score, move = self.alphabeta(game.forecast_move(m), depth - 1, alpha, beta, False) if score > best_score: best_score = score best_move = m alpha = max(alpha,best_score) if beta <= alpha: break #prune #print('score and move:',best_score,best_move) return best_score, best_move else: #minimizing player best_score = float('inf') for m in game.get_legal_moves(game.get_opponent(self)): score, move = self.alphabeta(game.forecast_move(m), depth - 1, alpha, beta, True) if score < best_score: best_score = score best_move = m beta = min(beta, best_score) if beta <= alpha: break #prune #print('score and move:',best_score,best_move) return best_score, best_move
38.393285
176
0.563835
import random import sys import math class Timeout(Exception): pass def custom_score(game, player): if game.is_loser(player): return float("-inf") if game.is_winner(player): return float("inf") = 0.0 opp_score = 0.0 own_moves = game.get_legal_moves(player) opp_moves = game.get_legal_moves(game.get_opponent(player)) percent_board_unoccupied = (len(game.get_blank_spaces())/(game.height*game.width))*100 walls = [ [(0,i) for i in range(game.width)],[(i,0) for i in range(game.height)],[(game.height-1,i) for i in range(game.width)],[(i,game.width-1) for i in range(game.height)] ] centers = [(i,j) for i in range(math.floor(game.width/2)-1,math.floor(game.width/2)+1) for j in range(math.floor(game.height/2)-1,math.floor(game.height/2)+1)] for move in own_moves: for wall in walls: if move in wall and percent_board_unoccupied<25: own_score -=5 elif move in wall and (percent_board_unoccupied>=25 and percent_board_unoccupied<50): own_score -=10 elif move in wall and (percent_board_unoccupied>=50 and percent_board_unoccupied<75): own_score -=20 elif move in wall and percent_board_unoccupied>=75: own_score -=30 elif move in centers: own_score +=20 else: own_score += 1 return own_score for move in opp_moves: for wall in walls: if move in wall and percent_board_unoccupied<25: opp_score -=30 elif move in wall and (percent_board_unoccupied>=25 and percent_board_unoccupied<50): opp_score -=20 elif move in wall and (percent_board_unoccupied>=50 and percent_board_unoccupied<75): opp_score -=10 elif move in wall and percent_board_unoccupied>=75: opp_score -=5 elif move in centers: opp_score +=20 else: opp_score += 1 return opp_score return float((own_score)-(2*opp_score)) class CustomPlayer: def __init__(self, search_depth=3, score_fn=custom_score, iterative=True, method='minimax', timeout=10.): self.search_depth = search_depth self.iterative = iterative self.score = score_fn self.method = method self.time_left = None self.TIMER_THRESHOLD = timeout def get_move(self, game, legal_moves, time_left): self.time_left = time_left try: if not legal_moves: return (-1, -1) best_move = legal_moves[0] if self.iterative: for d in range(0,sys.maxsize): if self.method == 'minimax': _,best_move = self.minimax(game, d) else: _,best_move = self.alphabeta(game, d) return best_move except Timeout: return best_move def minimax(self, game, depth, maximizing_player=True): if self.time_left() < self.TIMER_THRESHOLD: raise Timeout() if depth == 0: return self.score(game,self), game.get_player_location(self) else: if maximizing_player: best_score = float('-inf') for m in game.get_legal_moves(): score, move = self.minimax(game.forecast_move(m), depth - 1, False) if score > best_score: best_score = score best_move = m return best_score, best_move else: best_score = float('inf') for m in game.get_legal_moves(game.get_opponent(self)): score, move = self.minimax(game.forecast_move(m), depth - 1, True) if score < best_score: best_score = score best_move = m return best_score, best_move def alphabeta(self, game, depth, alpha=float("-inf"), beta=float("inf"), maximizing_player=True): if self.time_left() < self.TIMER_THRESHOLD: raise Timeout() best_move=(3,3) if depth == 0: return self.score(game,self), game.get_player_location(self) else: if maximizing_player: best_score = float('-inf') for m in game.get_legal_moves(): score, move = self.alphabeta(game.forecast_move(m), depth - 1, alpha, beta, False) if score > best_score: best_score = score best_move = m alpha = max(alpha,best_score) if beta <= alpha: break return best_score, best_move else: best_score = float('inf') for m in game.get_legal_moves(game.get_opponent(self)): score, move = self.alphabeta(game.forecast_move(m), depth - 1, alpha, beta, True) if score < best_score: best_score = score best_move = m beta = min(beta, best_score) if beta <= alpha: break return best_score, best_move
true
true
f713965066ae4da4f72c16ee7b79fbf81894ed84
2,734
py
Python
scheduler_task/scheduler_task.py
IanVzs/WindWhisper
742bc3e08d3edbd3ad9112c3149c0ad291b5e762
[ "MIT" ]
2
2020-11-18T11:01:24.000Z
2020-11-19T16:10:59.000Z
scheduler_task/scheduler_task.py
IanVzs/WindWhisper
742bc3e08d3edbd3ad9112c3149c0ad291b5e762
[ "MIT" ]
2
2020-09-10T06:20:28.000Z
2020-11-18T10:45:19.000Z
scheduler_task/scheduler_task.py
IanVzs/WindWhisper
742bc3e08d3edbd3ad9112c3149c0ad291b5e762
[ "MIT" ]
null
null
null
""" requests: sqlalchemy apscheduler 定时任务 sqlalchemy 文档: https://apscheduler.readthedocs.io/en/stable/index.html """ import time import json try: from pytz import utc, timezone china_tz = timezone('Asia/Shanghai') from apscheduler.schedulers.background import BackgroundScheduler # from apscheduler.jobstores.mongodb import MongoDBJobStore from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR except: print("需安装下述包") print("pip3 install sqlalchemy", "pip3 install apscheduler") raise "Stop!" import os import sys upath = os.path.dirname(os.path.abspath(__file__)) path = upath.split("/")[:-1] path = '/'.join(path) sys.path.append(path) from loggers import scheLog import weather class AllScheduler(): def __init__(self): pass def listener(self, event): """任务执行状态监听""" if event.exception: log_job = { "code": event.code, "jobid": event.job_id, "jobstore": event.jobstore, } scheLog.error(f'The job {event.job_id} crashed :( | {log_job}') else: scheLog.info(f'The job {event.job_id} worked :)') def run(self): jobstores = { # 'mongo': MongoDBJobStore(), # 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite') "memory": MemoryJobStore(), } executors = {'default': ThreadPoolExecutor(5), 'processpool': ProcessPoolExecutor(2)} job_defaults = {'coalesce': False, 'max_instances': 3} scheduler = BackgroundScheduler( jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=china_tz) scheduler.add_listener(self.listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR) #scheduler.add_job(weather.weather_alarm, 'interval', seconds=10*60, id='sign_push_report') scheduler.add_job(weather.weather_alarm, 'interval', seconds=2, id='sign_weather_alarm') scheduler.start() return scheduler # scheLog.info(f"scheduler.get_jobs: {scheduler.get_jobs()}") # scheduler.remove_job('sign_push_report') # scheduler.shutdown(wait=True) if __name__ == "__main__": jobs = AllScheduler().run() time.sleep(3) jobs.remove_job('sign_weather_alarm') jobs.shutdown(wait=True) while jobs: try: time.sleep(3) except: jobs.remove_job('sign_weather_alarm') jobs.shutdown(wait=True) print("Stop.")
32.939759
99
0.656547
import time import json try: from pytz import utc, timezone china_tz = timezone('Asia/Shanghai') from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR except: print("需安装下述包") print("pip3 install sqlalchemy", "pip3 install apscheduler") raise "Stop!" import os import sys upath = os.path.dirname(os.path.abspath(__file__)) path = upath.split("/")[:-1] path = '/'.join(path) sys.path.append(path) from loggers import scheLog import weather class AllScheduler(): def __init__(self): pass def listener(self, event): if event.exception: log_job = { "code": event.code, "jobid": event.job_id, "jobstore": event.jobstore, } scheLog.error(f'The job {event.job_id} crashed :( | {log_job}') else: scheLog.info(f'The job {event.job_id} worked :)') def run(self): jobstores = { "memory": MemoryJobStore(), } executors = {'default': ThreadPoolExecutor(5), 'processpool': ProcessPoolExecutor(2)} job_defaults = {'coalesce': False, 'max_instances': 3} scheduler = BackgroundScheduler( jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=china_tz) scheduler.add_listener(self.listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR) scheduler.add_job(weather.weather_alarm, 'interval', seconds=2, id='sign_weather_alarm') scheduler.start() return scheduler if __name__ == "__main__": jobs = AllScheduler().run() time.sleep(3) jobs.remove_job('sign_weather_alarm') jobs.shutdown(wait=True) while jobs: try: time.sleep(3) except: jobs.remove_job('sign_weather_alarm') jobs.shutdown(wait=True) print("Stop.")
true
true
f713967d33b8d32a374b78f34d77db9b0b616d88
29,988
py
Python
pynot/phot.py
jkrogager/PyNOT
2514a443079e50c12a13ebbd89a48f91a8d20626
[ "MIT" ]
2
2021-06-09T11:54:52.000Z
2021-07-29T07:47:05.000Z
pynot/phot.py
jkrogager/PyNOT
2514a443079e50c12a13ebbd89a48f91a8d20626
[ "MIT" ]
8
2021-06-21T09:44:57.000Z
2022-03-30T11:13:32.000Z
pynot/phot.py
jkrogager/PyNOT
2514a443079e50c12a13ebbd89a48f91a8d20626
[ "MIT" ]
1
2021-04-01T07:42:24.000Z
2021-04-01T07:42:24.000Z
""" Functions for Imaging Pipeline """ import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from astropy.io import fits from astropy.modeling import models, fitting from astropy.table import Table from scipy.optimize import curve_fit import os from astropy.coordinates import SkyCoord import astropy.units as u from astroquery.sdss import SDSS import astroalign as aa import sep from pynot import alfosc from pynot.functions import get_version_number, mad from pynot.data.organizer import get_filter __version__ = get_version_number() def source_detection(fname, zeropoint=0., threshold=5.0, aperture=10.0, kwargs_bg={}, kwargs_ext={}): """ Run source detection in the input image using the python package SEP, based on the SExtractor algorithm. Parameters ---------- fname : str Filename of the FITS image to be analyzed. The image must have at least two extensions: the first should be the image in counts, and one should be named ERR holding the associated error image zeropoint : float [default=0.] Magnitude zero-point for the given photometric filter used for the observations. By defualt instrument magnitudes will be returned if no zero-point is given. threshold : float [default=5.0] Detection threshold in 'sigmas'. aperture : float [default=10.] Circular aperture radius in pixels. kwargs_bg : dict Parameters to pass to background subtraction (sep.Background()). See defition in `default_options_img.yml` kwargs_ext : dict Parameters to pass to source extraction (sep.extract()). See defition in `default_options_img.yml` Returns ------- table_fname : str The autogenerated filename of the source catalog. The format is: file-base of the input filename + '_phot.fits'. Ex.: fname='alfosc_rband.fits' -> table_fname='alfosc_rband_phot.fits' segmap_fname : str The autogenerated filename of the segmentation map. This image holds the regions associated to each source in the source catalog. The format is: file-base of the input filename + '_sep.fits' output_msg : str Log of messages from the function call. """ msg = list() # get GAIN from header data = fits.getdata(fname) error_image = fits.getdata(fname, 'ERR') hdr = fits.getheader(fname) msg.append(" - Loaded input image: %s" % fname) if 'EXPTIME' in hdr: exptime = hdr['EXPTIME'] msg.append(" - Loaded exposure time from image header: %.1f" % exptime) else: exptime = 1. msg.append("[WARNING] - No exposure time found in image header! Assuming image in counts.") data = data * 1. error_image = error_image * 1. if 'threshold' in kwargs_ext: threshold = kwargs_ext.pop('threshold') if 'aperture' in kwargs_ext: aperture = kwargs_ext.pop('aperture') bkg = sep.Background(data, **kwargs_bg) data_sub = data - bkg msg.append(" - Subtracted sky background") msg.append(" - Background RMS: %.2e" % bkg.globalrms) data_sub = data_sub.byteswap().newbyteorder() error_image = error_image.byteswap().newbyteorder() if data_sub.dtype.byteorder != '<': data_sub = data_sub.byteswap().newbyteorder() error_image = error_image.byteswap().newbyteorder() extract_output = sep.extract(data_sub, threshold, err=bkg.globalrms, **kwargs_ext) if len(extract_output) == 2: objects, segmap = extract_output else: objects = extract_output segmap = None N_obj = len(objects) msg.append(" - Detected %i objects" % N_obj) # Calculate fixed aperture magnitudes: aper_results = sep.sum_circle(data_sub, objects['x'], objects['y'], aperture, err=error_image) aper_flux, aper_fluxerr, aper_flag = aper_results msg.append(" - Calculating fluxes within circular aperture of: %i pixels" % aperture) # Calculate Kron radius: x = objects['x'] y = objects['y'] a = objects['a'] b = objects['b'] theta = objects['theta'] kronrad, krflag = sep.kron_radius(data_sub, x, y, a, b, theta, 6.0) kronrad[kronrad < 1.] = 1. # Sum fluxes in ellipse apertures: flux, fluxerr, flag = sep.sum_ellipse(data_sub, x, y, a, b, theta, 2.5*kronrad, subpix=1) msg.append(" - Calculating Kron radii and fluxes within elliptical apertures") # combine flags: flag |= krflag # If the Kron radius is less than r_min (aperture), use aperture fluxes: r_min = aperture use_circle = kronrad * np.sqrt(b * a) < r_min flux[use_circle] = aper_flux[use_circle] fluxerr[use_circle] = aper_fluxerr[use_circle] flag[use_circle] = aper_flag[use_circle] msg.append(" - Targets with Kron radii below R_min (%.2f) are ignored" % r_min) msg.append(" - Circular aperture fluxes used instead where R_kron < R_min") if np.sum(use_circle) == 1: msg.append(" - %i source identified with R_kron < R_min" % np.sum(use_circle)) else: msg.append(" - %i sources identified with R_kron < R_min" % np.sum(use_circle)) # Save output table: base, ext = os.path.splitext(fname) table_fname = base + '_phot.fits' object_table = Table(objects) object_table['flux_auto'] = flux object_table['flux_err_auto'] = fluxerr object_table['flux_aper'] = aper_flux object_table['flux_err_aper'] = aper_fluxerr object_table['R_kron'] = kronrad flux[flux <= 0] = 1. object_table['mag_auto'] = zeropoint - 2.5*np.log10(flux) object_table.write(table_fname, format='fits', overwrite=True) msg.append(" [OUTPUT] - Saved extraction table: %s" % table_fname) # Save segmentation map: if segmap is not None: segmap_fname = base + '_seg.fits' seg_hdr = fits.Header() seg_hdr['AUTHOR'] = 'PyNOT version %s' % __version__ seg_hdr['IMAGE'] = fname seg_hdr['FILTER'] = get_filter(hdr) seg_hdr.add_comment("Segmentation map from SEP (SExtractor)") fits.writeto(segmap_fname, segmap, header=seg_hdr, overwrite=True) msg.append(" [OUTPUT] - Saved source segmentation map: %s" % segmap_fname) else: segmap_fname = '' # Plot source identifications: fig_fname = base + '_sources.pdf' plot_objects(fig_fname, data_sub, objects, threshold=threshold) msg.append(" [OUTPUT] - Saved source identification overview: %s" % fig_fname) msg.append("") output_msg = "\n".join(msg) return table_fname, segmap_fname, output_msg def plot_objects(fig_fname, data, objects, threshold=5.): """ Create a plot of the image and the detected sources from SEP. Parameters ---------- fig_fname : str Filename of the resulting figure data : np.array, shape (N, M) Numpy array of the image data, must be a 2D array. objects : astropy.table.Table or List[dict] List of dictionaries or astropy table holding the object information: x, y : x, y positions a, b : aperture minor and major axes in pixels theta : aperture orientation in radians threshold : float [default=5.] Constract threshold for the image. The color-scale is normalized based on the image statistics (median and MAD). The min and max values are -1*MAD and +`threshold`*MAD around the median value of the image counts, where MAD is the median absolute deviation. Returns ------- None """ # plot background-subtracted image fig, ax = plt.subplots() m, s = np.median(data), 1.5*mad(data) ax.imshow(data, interpolation='nearest', cmap='gray_r', vmin=m-1*s, vmax=m+threshold*s, origin='lower') # plot an ellipse for each object for item in objects: e = Ellipse(xy=(item['x'], item['y']), width=10*item['a'], height=10*item['b'], angle=item['theta'] * 180. / np.pi) e.set_facecolor('none') e.set_edgecolor('red') e.set_linewidth(0.8) ax.add_artist(e) fig.tight_layout() fig.savefig(fig_fname) def load_fits_image(fname): """Load a FITS image with an associated error extension and an optional data quality MASK.""" with fits.open(fname) as hdu_list: image = hdu_list[0].data hdr = hdu_list[0].header if 'ERR' in hdu_list: error = hdu_list['ERR'].data else: raise TypeError("No error image detected") if 'MASK' in hdu_list: mask = hdu_list['MASK'].data else: mask = np.zeros_like(image, dtype=bool) return image, error, mask, hdr def measure_seeing(img, centers, size=20, max_obj=10): """ Measure the average seeing in an image by fitting a 2D Gaussian to pre-defined point sources. Parameters ---------- img : np.array, shape(N, M) Numpy array of the image to analyze. centers : list[number, number] List of positions of point sources (x, y) in pixels size : int [default=20] Image cutout size. The Gaussian PSF is fitted in a box of size 2*size by 2*size pixels. max_obj : int [default=10] Maximum number of sources to include in the fitting. Returns ------- fwhm : float The average seeing FWHM in pixels. ratio : float The average axis ratio (ellipticity) of the Gaussian PSF. msg : str Output message of the function call. If no warnings occurred, this is an emptry string. """ X = np.arange(img.shape[1]) Y = np.arange(img.shape[0]) sigmas = list() ratios = list() good_x = (centers[:, 0] > size) & (centers[:, 0] < X.max()-size) good_y = (centers[:, 1] > size) & (centers[:, 1] < Y.max()-size) if np.sum(good_x & good_y) < 2: msg = "[WARNING] - Not enough sources to measure seeing." return (-1, -1, msg) max_obj = min(max_obj, np.sum(good_x & good_y)) idx = np.random.choice(np.arange(len(centers))[good_x & good_y], max_obj, replace=False) for x_cen, y_cen in centers[idx]: x1, x2 = int(x_cen)-size, int(x_cen)+size y1, y2 = int(y_cen)-size, int(y_cen)+size cutout = img[y1:y2, x1:x2] x, y = np.meshgrid(X[x1:x2], Y[y1:y2]) A = img[int(y_cen), int(x_cen)] p_init = models.Gaussian2D(amplitude=A, x_mean=x_cen, y_mean=y_cen, x_stddev=5, y_stddev=5, theta=0) try: fitter = fitting.LevMarLSQFitter() except TypeError: continue p_opt = fitter(p_init, x, y, cutout-np.median(cutout)) sigma_x = p_opt.x_stddev sigma_y = p_opt.y_stddev sig = np.sqrt(sigma_x**2 + sigma_y**2) ba = min(sigma_x, sigma_y) / max(sigma_x, sigma_y) sigmas.append(sig) ratios.append(ba) if len(sigmas) < 2: msg = "[WARNING] - Not enough sources to measure seeing." return (-1, -1, msg) fwhm = np.median(sigmas) * 2.35 ratio = np.median(ratios) msg = "" return (fwhm, ratio, msg) def save_file_log(log_name, image_log, target_hdr): with open(log_name, 'w') as out: out.write("# PyNOT Combination Log of Target: %s\n" % target_hdr['OBJECT']) out.write("# Filter: %s\n" % get_filter(target_hdr)) out.write("# Col 1: Filename\n") out.write("# Col 2: FWHM / pixels (seeing)\n") out.write("# Col 3: PSF axis ratio (minor/major)\n") out.write("# Col 4: Exp. Time / seconds\n") out.write("# " + 40*"-" + "\n") for line in image_log: out.write(" %s %.1f %5.2f %6.1f\n" % tuple(line)) def image_combine(corrected_images, output='', log_name='', fringe_image='', method='weighted', max_control_points=50, detection_sigma=5, min_area=9): """ Register and combine a list of FITS images using affine transformation. Parameters ---------- corrected_images : List[str] List of input filenames of `corrected` images, i.e., bias, flat corrected and trimmed for filter/aperture vignetting. output : str [default=''] Output filename of the combined image. If not given, it is generated from the OBJECT keyword of the FITS header. log_name : str [default=''] Filename of the combination log. This table holds the average seeing FWHM, PSF ellipticity, and exposure time for each image in the input list. fringe_image : str [default=''] Filename of the fringe image (FITS format) from `pynot.create_fringe_image`. If given, this image will be subtracted from each input image before combination. method : str [default='weighted'] Method for image combination: mean, median or weighted. By default an inverse-variance weighting is used. max_control_points : int [default=50] Maximum number of control point-sources to find the transformation. A lower number will converge faster but may result in a less robust image registration. detection_sigma : float [default=5.] Detection threshold for control points in units of standard deviations of the sky background. min_area : int [default=9] Minimum number of connected pixels to be considered a source Returns ------- output_msg : str Log of messages from the function call. """ msg = list() if fringe_image != '': norm_sky = fits.getdata(fringe_image) msg.append(" - Loaded normalized fringe image: %s" % fringe_image) else: norm_sky = 1. target_fname = corrected_images[0] target, target_err, target_mask, target_hdr = load_fits_image(target_fname) target = target - norm_sky*np.median(target) exptime = target_hdr['EXPTIME'] target /= exptime target_err /= exptime target_hdr['BUNIT'] = 'count / s' msg.append(" - Aligning all images to reference: %s" % target_fname) msg.append(" - Registering input images:") shifted_images = [target] shifted_vars = [target_err**2] target = target.byteswap().newbyteorder() if target.dtype.byteorder != '<': target = target.byteswap().newbyteorder() final_exptime = exptime image_log = list() if len(corrected_images) > 1: for fname in corrected_images[1:]: msg.append(" - Input image: %s" % fname) source, source_err, source_mask, hdr_i = load_fits_image(fname) source = source - norm_sky*np.median(source) source /= hdr_i['EXPTIME'] source_err /= hdr_i['EXPTIME'] final_exptime += hdr_i['EXPTIME'] try: transf, (coords) = aa.find_transform(source, target, max_control_points=max_control_points, detection_sigma=detection_sigma, min_area=min_area) except: msg.append(" [ERROR] - Failed to find image transformation!") msg.append(" - Skipping image") continue source = source.byteswap().newbyteorder() source_err = source_err.byteswap().newbyteorder() source_mask = source_mask.byteswap().newbyteorder() if source.dtype.byteorder != '<': source = source.byteswap().newbyteorder() if source_err.dtype.byteorder != '<': source_err = source_err.byteswap().newbyteorder() if source_mask.dtype.byteorder != '<': source_mask = source_mask.byteswap().newbyteorder() registered_image, _ = aa.apply_transform(transf, source, target, fill_value=0) registered_error, _ = aa.apply_transform(transf, source_err, target, fill_value=0) registered_mask, _ = aa.apply_transform(transf, source_mask, target, fill_value=0) target_mask += 1 * (registered_mask > 0) registered_error[registered_error == 0] = np.mean(registered_error)*10 shifted_images.append(registered_image) shifted_vars.append(registered_error**2) source_list, target_list = coords if len(image_log) == 0: fwhm, ratio, seeing_msg = measure_seeing(target, target_list) image_log.append([os.path.basename(target_fname), fwhm, ratio, exptime]) if seeing_msg: msg.append(seeing_msg) fwhm, ratio, seeing_msg = measure_seeing(source, source_list) if seeing_msg: msg.append(seeing_msg) image_log.append([os.path.basename(fname), fwhm, ratio, hdr_i['EXPTIME']]) if log_name == '': filter_name = alfosc.filter_translate[get_filter(target_hdr)] log_name = 'filelist_%s_%s.txt' % (target_hdr['OBJECT'], filter_name) save_file_log(log_name, image_log, target_hdr) msg.append(" [OUTPUT] - Saved file log and image stats: %s" % log_name) if method == 'median': final_image = np.nanmedian(shifted_images, axis=0) final_error = np.sqrt(np.nanmean(shifted_vars, axis=0)) target_hdr['COMBINE'] = "Median" elif method == 'mean': final_image = np.nanmean(shifted_images, axis=0) final_error = np.sqrt(np.nanmean(shifted_vars, axis=0)) target_hdr['COMBINE'] = "Mean" else: w = 1./np.array(shifted_vars) shifted_images = np.array(shifted_images) final_image = np.nansum(w*shifted_images, axis=0) / np.sum(w, axis=0) final_error = np.sqrt(1. / np.nansum(w, axis=0)) target_hdr['COMBINE'] = "Inverse Variance Weighted" final_mask = 1 * (target_mask > 0) else: final_image = target final_error = target_err final_mask = target_mask target_hdr['COMBINE'] = "None" target_hdr['NCOMBINE'] = len(shifted_images) target_hdr['EXPTIME'] = final_exptime / len(shifted_images) # Fix NaN values from negative pixel values: err_NaN = np.isnan(final_error) final_error[err_NaN] = np.nanmean(final_error)*100 msg.append(" - Correcting NaNs in noise image: %i pixel(s)" % np.sum(err_NaN)) target_hdr['DATAMIN'] = np.nanmin(final_image) target_hdr['DATAMAX'] = np.nanmax(final_image) target_hdr['EXTNAME'] = 'DATA' target_hdr['AUTHOR'] = 'PyNOT version %s' % __version__ mask_hdr = fits.Header() mask_hdr.add_comment("0 = Good Pixels") mask_hdr.add_comment("1 = Cosmic Ray Hits") if output == '': output = "combined_%s.fits" % target_hdr['OBJECT'] sci_ext = fits.PrimaryHDU(final_image, header=target_hdr) err_ext = fits.ImageHDU(final_error, header=target_hdr, name='ERR') mask_ext = fits.ImageHDU(final_mask, header=mask_hdr, name='MASK') output_HDU = fits.HDUList([sci_ext, err_ext, mask_ext]) output_HDU.writeto(output, overwrite=True) msg.append(" - Successfully combined the images") msg.append(" [OUTPUT] - Saving output: %s" % output) msg.append("") output_msg = "\n".join(msg) return output_msg def plot_image2D(fname, image, vmin=-2, vmax=2): fig = plt.figure() ax = fig.add_subplot(111) med = np.median(image) s = mad(image) im = ax.imshow(image, origin='lower', vmin=med+vmin*s, vmax=med+vmax*s) fig.colorbar(im) fig.tight_layout() fig.savefig(fname) def create_fringe_image(input_filenames, output='', fig_fname='', threshold=3.0): """ Create a normalized average fringe image for a list of images taken with the same filter. Parameters ---------- input_filenames : str List of FITS filenames of images taken in the same photometric band. output : str [default=''] Output filename of the fringe image. fig_fname : str [default=''] Output filename of the diagnostic figure showing the normalized fringe image. threshold : float [default=3.] Threshold for source rejection in the image stacking in units of the standard deviation of the sky background (estimated via median absolute deviation). Returns ------- output_msg : str Log of messages from the function call. """ msg = list() hdr = fits.getheader(input_filenames[0]) img_list = [fits.getdata(fname) for fname in input_filenames] exptimes = [fits.getheader(fname)['EXPTIME'] for fname in input_filenames] msg.append(" - Loaded input images") mask = [np.fabs(im-np.median(im)) < threshold*mad(im) for im in img_list] msg.append(" - Created image mask using threshold: %.2f" % threshold) N = np.sum(mask, 0) skysum = np.sum([im*m/t for im, m, t in zip(img_list, mask, exptimes)], axis=0) skysum[N == 0] = np.median(skysum) N[N == 0] = 1 sky = skysum / N norm_sky = sky / np.median(sky) msg.append(" - Created normalized fringe image") if fig_fname: plot_image2D(fig_fname, norm_sky, vmin=-2, vmax=2) msg.append(" [OUTPUT] - Saving figure: %s" % fig_fname) if output == '': output = "fringe_%s.fits" % hdr['OBJECT'] hdr['OBJECT'] = 'Fringe Image' hdr['EXTNAME'] = 'MODEL' hdr.add_comment('Average Fringe image, median normalized') fits.writeto(output, norm_sky, header=hdr, overwrite=True) msg.append(" [OUTPUT] - Saving output: %s" % output) msg.append("") output_msg = "\n".join(msg) return output_msg def match_phot_catalogs(sep, phot, match_radius=1.): """ Match a source catalog from SEP to a photometric catalog `phot`. Both catalogs must include columns 'ra' and 'dec'. Parameters ---------- match_radius : float [default=1.0] Matching radius in arcseconds Returns ------- matched_sep : astropy.table.Table An astropy table of sources in the SEP source catalog that have matches in the reference `phot` catalog. matched_phot : astropy.table.Table An astropy table of sources in the reference `phot` catalog that have matches in the SEP source catalog. """ matched_sep = list() matched_phot = list() refs = np.array([phot['ra'], phot['dec']]).T for row in sep: xy = np.array([row['ra'], row['dec']]) dist = np.sqrt(np.sum((refs - xy)**2, axis=1)) index = np.argmin(dist) if np.min(dist) < match_radius/3600.: matched_phot.append(np.array(phot[index])) matched_sep.append(np.array(row)) matched_sep = np.array(matched_sep) matched_phot = np.array(matched_phot) return Table(matched_sep), Table(matched_phot) def get_sdss_catalog(ra, dec, radius=4.): """Download the SDSS photometry using astroquery for a circular region of radius in deg.""" catalog_fname = 'sdss_phot_%.2f%+.2f.csv' % (ra, dec) fields = ['ra', 'dec', 'psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z', 'psfMagErr_u', 'psfMagErr_g', 'psfMagErr_r', 'psfMagErr_i', 'psfMagErr_z'] field_center = SkyCoord(ra, dec, frame='icrs', unit='deg') sdss_result = SDSS.query_region(field_center, radius*u.arcmin, photoobj_fields=fields) if sdss_result is not None: sdss_result.write(catalog_fname, format='ascii.csv', overwrite=True) return sdss_result ext_coeffs = {'u': 0.517, 'g': 0.165, 'r': 0.0754, 'i': 0.0257, 'z': 0.0114} def flux_calibration_sdss(img_fname, sep_fname, fig_fname='', q_lim=0.8, kappa=3, match_radius=1.): """ Self-calibration of magnitude zero point using SDSS photometry as reference Parameters ---------- img_fname : string Filename of WCS calibrated image (_wcs.fits) sep_fname : string Filename of the source extraction table (_phot.fits) fig_fname : string Filename of the diagnostic figure. Autogenerated by default. q_lim : float [default=0.8] Reject elliptical sources with axis ratio < `q_lim`. Axis ratio is defined as minor/major. kappa : float [default=3] Threshold for projected distance filtering. Sources are rejected if the distance differs more then `kappa` times the median absolute deviation from the median of all distances. match_radius : float [default=1] Matching radius between SDSS sources and image sources Returns ------- output_msg : string Log of messages from the function call. """ # -- Get SDSS catalog msg = list() hdr = fits.getheader(img_fname) msg.append(" - Loaded image: %s" % img_fname) radius = np.sqrt(hdr['CD1_1']**2 + hdr['CD1_2']**2)*60 * hdr['NAXIS1'] / np.sqrt(2) msg.append(" - Downloading SDSS photometric catalog...") try: sdss_cat = get_sdss_catalog(hdr['CRVAL1'], hdr['CRVAL2'], radius) except: msg.append(" [ERROR] - Could not connect to SDSS server. Check your internet connection.") msg.append("") return "\n".join(msg) def line(x, zp): return zp + x if sdss_cat is None: msg.append(" [ERROR] - No data found in SDSS. No zero point calculated") msg.append("") return "\n".join(msg) airmass = hdr['AIRMASS'] filter = alfosc.filter_translate[alfosc.get_filter(hdr)] if 'SDSS' in filter: band = filter.split('_')[0] else: msg.append(" [ERROR] - The image was not taken with an SDSS filter. No zero point calculated") msg.append("") return "\n".join(msg) # For r-band: (measured from La Palma extinction curve) mag_key = 'psfMag_%s' % band mag_err_key = 'psfMagErr_%s' % band good = (sdss_cat[mag_key] > 0) & (sdss_cat[mag_key] < 30) sdss_cat = sdss_cat[good] # Load SEP filename: try: sep_cat = Table.read(sep_fname) sep_hdr = fits.getheader(sep_fname) msg.append(" - Loaded SEP source table: %s" % sep_fname) except (FileNotFoundError, OSError): msg.append(" [ERROR] - Could not load SEP source table: %s" % sep_fname) msg.append("") return "\n".join(msg) if 'MAG_ZP' in sep_hdr: msg.append("[WARNING] - The source table has already been flux calibrated by PyNOT") msg.append(" - Terminating task...") msg.append("") return "\n".join(msg) axis_ratio = sep_cat['b']/sep_cat['a'] # Select only 'round' sources: sep_points = sep_cat[axis_ratio > q_lim] # Match catalogs: match_sep, match_sdss = match_phot_catalogs(sep_points, sdss_cat) msg.append(" - Cross matched source catalog") mag = match_sdss[mag_key] mag_err = match_sdss[mag_err_key] m_inst = match_sep['mag_auto'] k = ext_coeffs[band] # Get first estimate using the median: zp0, _ = curve_fit(line, m_inst+k*airmass, mag, p0=[27], sigma=mag_err) # Filter outliers: cut = np.abs(zp0 + m_inst + k*airmass - mag) < kappa*mad(zp0 + m_inst + k*airmass - mag) cut &= (mag < 20.1) & (mag > 15) # Get weighted average zero point: w = 1./mag_err[cut]**2 zp = np.sum((mag[cut] - m_inst[cut] - k*airmass) * w) / np.sum(w) msg.append(" - Calculating zero point in SDSS %s band using %i sources" % (band, len(w))) # Zero point dispersion: zp_err = np.std(mag[cut] - zp - m_inst[cut] - k*airmass) msg.append(" - Zero Point = %.3f ± %.3f mag" % (zp, zp_err)) sep_cat['mag_auto'] += zp sep_cat.write(sep_fname, overwrite=True) with fits.open(sep_fname, 'update') as sep_file: sep_file[0].header.add_comment("Self-calibration of mag. zero point using SDSS") sep_file[0].header['MAG_ZP'] = (np.round(zp, 3), "Magnitude zero point (AB mag)") sep_file[0].header['ZP_ERR'] = (np.round(zp_err, 3), "Uncertainty on magnitude zero point (AB mag)") msg.append(" [OUTPUT] - Updating magnitudes in source table: %s" % sep_fname) # -- Plot the zero point for visual aid: base, _ = os.path.splitext(os.path.basename(img_fname)) dirname = os.path.dirname(img_fname) if fig_fname == '': fig_fname = 'zero_point_' + base + '.pdf' fig_fname = os.path.join(dirname, fig_fname) fig = plt.figure() ax = fig.add_subplot(111) ax.errorbar(m_inst, mag, 3*mag_err, ls='', marker='.', color='k', alpha=0.8) ax.plot(m_inst[cut], mag[cut], ls='', marker='o', color='b', alpha=0.7) ax.plot(np.sort(m_inst), zp + np.sort(m_inst) + k*airmass, ls='--', color='crimson', label='ZP = %.2f ± %.2f' % (zp, zp_err)) ax.set_ylim(np.min(mag)-0.2, np.max(mag)+0.5) ax.set_xlabel("Instrument Magnitude") ax.set_ylabel("Reference SDSS Magnitude (r-band)") ax.legend() ax.tick_params(which='both', top=False, right=False) fig.tight_layout() fig.savefig(fig_fname) msg.append(" [OUTPUT] - Saving diagnostic figure: %s" % fig_fname) # -- Update header in FITS image: with fits.open(img_fname) as hdu_list: hdu_list['DATA'].header.add_comment("Self-calibration of mag. zero point using SDSS") hdu_list['DATA'].header['MAG_ZP'] = (np.round(zp, 3), "Magnitude zero point (AB mag)") hdu_list['DATA'].header['ZP_ERR'] = (np.round(zp_err, 3), "Uncertainty on magnitude zero point (AB mag)") hdu_list.writeto(img_fname, overwrite=True) msg.append(" [OUTPUT] - Updating header of input image: %s" % img_fname) msg.append(" - MAG_ZP = %10.3f / %s" % (zp, "Magnitude zero point (AB mag)")) msg.append(" - ZP_ERR = %10.3f / %s" % (zp_err, "Uncertainty on magnitude zero point (AB mag)")) msg.append("") return "\n".join(msg)
39.097784
150
0.627318
import numpy as np import matplotlib.pyplot as plt from matplotlib.patches import Ellipse from astropy.io import fits from astropy.modeling import models, fitting from astropy.table import Table from scipy.optimize import curve_fit import os from astropy.coordinates import SkyCoord import astropy.units as u from astroquery.sdss import SDSS import astroalign as aa import sep from pynot import alfosc from pynot.functions import get_version_number, mad from pynot.data.organizer import get_filter __version__ = get_version_number() def source_detection(fname, zeropoint=0., threshold=5.0, aperture=10.0, kwargs_bg={}, kwargs_ext={}): msg = list() data = fits.getdata(fname) error_image = fits.getdata(fname, 'ERR') hdr = fits.getheader(fname) msg.append(" - Loaded input image: %s" % fname) if 'EXPTIME' in hdr: exptime = hdr['EXPTIME'] msg.append(" - Loaded exposure time from image header: %.1f" % exptime) else: exptime = 1. msg.append("[WARNING] - No exposure time found in image header! Assuming image in counts.") data = data * 1. error_image = error_image * 1. if 'threshold' in kwargs_ext: threshold = kwargs_ext.pop('threshold') if 'aperture' in kwargs_ext: aperture = kwargs_ext.pop('aperture') bkg = sep.Background(data, **kwargs_bg) data_sub = data - bkg msg.append(" - Subtracted sky background") msg.append(" - Background RMS: %.2e" % bkg.globalrms) data_sub = data_sub.byteswap().newbyteorder() error_image = error_image.byteswap().newbyteorder() if data_sub.dtype.byteorder != '<': data_sub = data_sub.byteswap().newbyteorder() error_image = error_image.byteswap().newbyteorder() extract_output = sep.extract(data_sub, threshold, err=bkg.globalrms, **kwargs_ext) if len(extract_output) == 2: objects, segmap = extract_output else: objects = extract_output segmap = None N_obj = len(objects) msg.append(" - Detected %i objects" % N_obj) aper_results = sep.sum_circle(data_sub, objects['x'], objects['y'], aperture, err=error_image) aper_flux, aper_fluxerr, aper_flag = aper_results msg.append(" - Calculating fluxes within circular aperture of: %i pixels" % aperture) x = objects['x'] y = objects['y'] a = objects['a'] b = objects['b'] theta = objects['theta'] kronrad, krflag = sep.kron_radius(data_sub, x, y, a, b, theta, 6.0) kronrad[kronrad < 1.] = 1. flux, fluxerr, flag = sep.sum_ellipse(data_sub, x, y, a, b, theta, 2.5*kronrad, subpix=1) msg.append(" - Calculating Kron radii and fluxes within elliptical apertures") flag |= krflag r_min = aperture use_circle = kronrad * np.sqrt(b * a) < r_min flux[use_circle] = aper_flux[use_circle] fluxerr[use_circle] = aper_fluxerr[use_circle] flag[use_circle] = aper_flag[use_circle] msg.append(" - Targets with Kron radii below R_min (%.2f) are ignored" % r_min) msg.append(" - Circular aperture fluxes used instead where R_kron < R_min") if np.sum(use_circle) == 1: msg.append(" - %i source identified with R_kron < R_min" % np.sum(use_circle)) else: msg.append(" - %i sources identified with R_kron < R_min" % np.sum(use_circle)) base, ext = os.path.splitext(fname) table_fname = base + '_phot.fits' object_table = Table(objects) object_table['flux_auto'] = flux object_table['flux_err_auto'] = fluxerr object_table['flux_aper'] = aper_flux object_table['flux_err_aper'] = aper_fluxerr object_table['R_kron'] = kronrad flux[flux <= 0] = 1. object_table['mag_auto'] = zeropoint - 2.5*np.log10(flux) object_table.write(table_fname, format='fits', overwrite=True) msg.append(" [OUTPUT] - Saved extraction table: %s" % table_fname) if segmap is not None: segmap_fname = base + '_seg.fits' seg_hdr = fits.Header() seg_hdr['AUTHOR'] = 'PyNOT version %s' % __version__ seg_hdr['IMAGE'] = fname seg_hdr['FILTER'] = get_filter(hdr) seg_hdr.add_comment("Segmentation map from SEP (SExtractor)") fits.writeto(segmap_fname, segmap, header=seg_hdr, overwrite=True) msg.append(" [OUTPUT] - Saved source segmentation map: %s" % segmap_fname) else: segmap_fname = '' fig_fname = base + '_sources.pdf' plot_objects(fig_fname, data_sub, objects, threshold=threshold) msg.append(" [OUTPUT] - Saved source identification overview: %s" % fig_fname) msg.append("") output_msg = "\n".join(msg) return table_fname, segmap_fname, output_msg def plot_objects(fig_fname, data, objects, threshold=5.): fig, ax = plt.subplots() m, s = np.median(data), 1.5*mad(data) ax.imshow(data, interpolation='nearest', cmap='gray_r', vmin=m-1*s, vmax=m+threshold*s, origin='lower') for item in objects: e = Ellipse(xy=(item['x'], item['y']), width=10*item['a'], height=10*item['b'], angle=item['theta'] * 180. / np.pi) e.set_facecolor('none') e.set_edgecolor('red') e.set_linewidth(0.8) ax.add_artist(e) fig.tight_layout() fig.savefig(fig_fname) def load_fits_image(fname): with fits.open(fname) as hdu_list: image = hdu_list[0].data hdr = hdu_list[0].header if 'ERR' in hdu_list: error = hdu_list['ERR'].data else: raise TypeError("No error image detected") if 'MASK' in hdu_list: mask = hdu_list['MASK'].data else: mask = np.zeros_like(image, dtype=bool) return image, error, mask, hdr def measure_seeing(img, centers, size=20, max_obj=10): X = np.arange(img.shape[1]) Y = np.arange(img.shape[0]) sigmas = list() ratios = list() good_x = (centers[:, 0] > size) & (centers[:, 0] < X.max()-size) good_y = (centers[:, 1] > size) & (centers[:, 1] < Y.max()-size) if np.sum(good_x & good_y) < 2: msg = "[WARNING] - Not enough sources to measure seeing." return (-1, -1, msg) max_obj = min(max_obj, np.sum(good_x & good_y)) idx = np.random.choice(np.arange(len(centers))[good_x & good_y], max_obj, replace=False) for x_cen, y_cen in centers[idx]: x1, x2 = int(x_cen)-size, int(x_cen)+size y1, y2 = int(y_cen)-size, int(y_cen)+size cutout = img[y1:y2, x1:x2] x, y = np.meshgrid(X[x1:x2], Y[y1:y2]) A = img[int(y_cen), int(x_cen)] p_init = models.Gaussian2D(amplitude=A, x_mean=x_cen, y_mean=y_cen, x_stddev=5, y_stddev=5, theta=0) try: fitter = fitting.LevMarLSQFitter() except TypeError: continue p_opt = fitter(p_init, x, y, cutout-np.median(cutout)) sigma_x = p_opt.x_stddev sigma_y = p_opt.y_stddev sig = np.sqrt(sigma_x**2 + sigma_y**2) ba = min(sigma_x, sigma_y) / max(sigma_x, sigma_y) sigmas.append(sig) ratios.append(ba) if len(sigmas) < 2: msg = "[WARNING] - Not enough sources to measure seeing." return (-1, -1, msg) fwhm = np.median(sigmas) * 2.35 ratio = np.median(ratios) msg = "" return (fwhm, ratio, msg) def save_file_log(log_name, image_log, target_hdr): with open(log_name, 'w') as out: out.write("# PyNOT Combination Log of Target: %s\n" % target_hdr['OBJECT']) out.write("# Filter: %s\n" % get_filter(target_hdr)) out.write("# Col 1: Filename\n") out.write("# Col 2: FWHM / pixels (seeing)\n") out.write("# Col 3: PSF axis ratio (minor/major)\n") out.write("# Col 4: Exp. Time / seconds\n") out.write("# " + 40*"-" + "\n") for line in image_log: out.write(" %s %.1f %5.2f %6.1f\n" % tuple(line)) def image_combine(corrected_images, output='', log_name='', fringe_image='', method='weighted', max_control_points=50, detection_sigma=5, min_area=9): msg = list() if fringe_image != '': norm_sky = fits.getdata(fringe_image) msg.append(" - Loaded normalized fringe image: %s" % fringe_image) else: norm_sky = 1. target_fname = corrected_images[0] target, target_err, target_mask, target_hdr = load_fits_image(target_fname) target = target - norm_sky*np.median(target) exptime = target_hdr['EXPTIME'] target /= exptime target_err /= exptime target_hdr['BUNIT'] = 'count / s' msg.append(" - Aligning all images to reference: %s" % target_fname) msg.append(" - Registering input images:") shifted_images = [target] shifted_vars = [target_err**2] target = target.byteswap().newbyteorder() if target.dtype.byteorder != '<': target = target.byteswap().newbyteorder() final_exptime = exptime image_log = list() if len(corrected_images) > 1: for fname in corrected_images[1:]: msg.append(" - Input image: %s" % fname) source, source_err, source_mask, hdr_i = load_fits_image(fname) source = source - norm_sky*np.median(source) source /= hdr_i['EXPTIME'] source_err /= hdr_i['EXPTIME'] final_exptime += hdr_i['EXPTIME'] try: transf, (coords) = aa.find_transform(source, target, max_control_points=max_control_points, detection_sigma=detection_sigma, min_area=min_area) except: msg.append(" [ERROR] - Failed to find image transformation!") msg.append(" - Skipping image") continue source = source.byteswap().newbyteorder() source_err = source_err.byteswap().newbyteorder() source_mask = source_mask.byteswap().newbyteorder() if source.dtype.byteorder != '<': source = source.byteswap().newbyteorder() if source_err.dtype.byteorder != '<': source_err = source_err.byteswap().newbyteorder() if source_mask.dtype.byteorder != '<': source_mask = source_mask.byteswap().newbyteorder() registered_image, _ = aa.apply_transform(transf, source, target, fill_value=0) registered_error, _ = aa.apply_transform(transf, source_err, target, fill_value=0) registered_mask, _ = aa.apply_transform(transf, source_mask, target, fill_value=0) target_mask += 1 * (registered_mask > 0) registered_error[registered_error == 0] = np.mean(registered_error)*10 shifted_images.append(registered_image) shifted_vars.append(registered_error**2) source_list, target_list = coords if len(image_log) == 0: fwhm, ratio, seeing_msg = measure_seeing(target, target_list) image_log.append([os.path.basename(target_fname), fwhm, ratio, exptime]) if seeing_msg: msg.append(seeing_msg) fwhm, ratio, seeing_msg = measure_seeing(source, source_list) if seeing_msg: msg.append(seeing_msg) image_log.append([os.path.basename(fname), fwhm, ratio, hdr_i['EXPTIME']]) if log_name == '': filter_name = alfosc.filter_translate[get_filter(target_hdr)] log_name = 'filelist_%s_%s.txt' % (target_hdr['OBJECT'], filter_name) save_file_log(log_name, image_log, target_hdr) msg.append(" [OUTPUT] - Saved file log and image stats: %s" % log_name) if method == 'median': final_image = np.nanmedian(shifted_images, axis=0) final_error = np.sqrt(np.nanmean(shifted_vars, axis=0)) target_hdr['COMBINE'] = "Median" elif method == 'mean': final_image = np.nanmean(shifted_images, axis=0) final_error = np.sqrt(np.nanmean(shifted_vars, axis=0)) target_hdr['COMBINE'] = "Mean" else: w = 1./np.array(shifted_vars) shifted_images = np.array(shifted_images) final_image = np.nansum(w*shifted_images, axis=0) / np.sum(w, axis=0) final_error = np.sqrt(1. / np.nansum(w, axis=0)) target_hdr['COMBINE'] = "Inverse Variance Weighted" final_mask = 1 * (target_mask > 0) else: final_image = target final_error = target_err final_mask = target_mask target_hdr['COMBINE'] = "None" target_hdr['NCOMBINE'] = len(shifted_images) target_hdr['EXPTIME'] = final_exptime / len(shifted_images) err_NaN = np.isnan(final_error) final_error[err_NaN] = np.nanmean(final_error)*100 msg.append(" - Correcting NaNs in noise image: %i pixel(s)" % np.sum(err_NaN)) target_hdr['DATAMIN'] = np.nanmin(final_image) target_hdr['DATAMAX'] = np.nanmax(final_image) target_hdr['EXTNAME'] = 'DATA' target_hdr['AUTHOR'] = 'PyNOT version %s' % __version__ mask_hdr = fits.Header() mask_hdr.add_comment("0 = Good Pixels") mask_hdr.add_comment("1 = Cosmic Ray Hits") if output == '': output = "combined_%s.fits" % target_hdr['OBJECT'] sci_ext = fits.PrimaryHDU(final_image, header=target_hdr) err_ext = fits.ImageHDU(final_error, header=target_hdr, name='ERR') mask_ext = fits.ImageHDU(final_mask, header=mask_hdr, name='MASK') output_HDU = fits.HDUList([sci_ext, err_ext, mask_ext]) output_HDU.writeto(output, overwrite=True) msg.append(" - Successfully combined the images") msg.append(" [OUTPUT] - Saving output: %s" % output) msg.append("") output_msg = "\n".join(msg) return output_msg def plot_image2D(fname, image, vmin=-2, vmax=2): fig = plt.figure() ax = fig.add_subplot(111) med = np.median(image) s = mad(image) im = ax.imshow(image, origin='lower', vmin=med+vmin*s, vmax=med+vmax*s) fig.colorbar(im) fig.tight_layout() fig.savefig(fname) def create_fringe_image(input_filenames, output='', fig_fname='', threshold=3.0): msg = list() hdr = fits.getheader(input_filenames[0]) img_list = [fits.getdata(fname) for fname in input_filenames] exptimes = [fits.getheader(fname)['EXPTIME'] for fname in input_filenames] msg.append(" - Loaded input images") mask = [np.fabs(im-np.median(im)) < threshold*mad(im) for im in img_list] msg.append(" - Created image mask using threshold: %.2f" % threshold) N = np.sum(mask, 0) skysum = np.sum([im*m/t for im, m, t in zip(img_list, mask, exptimes)], axis=0) skysum[N == 0] = np.median(skysum) N[N == 0] = 1 sky = skysum / N norm_sky = sky / np.median(sky) msg.append(" - Created normalized fringe image") if fig_fname: plot_image2D(fig_fname, norm_sky, vmin=-2, vmax=2) msg.append(" [OUTPUT] - Saving figure: %s" % fig_fname) if output == '': output = "fringe_%s.fits" % hdr['OBJECT'] hdr['OBJECT'] = 'Fringe Image' hdr['EXTNAME'] = 'MODEL' hdr.add_comment('Average Fringe image, median normalized') fits.writeto(output, norm_sky, header=hdr, overwrite=True) msg.append(" [OUTPUT] - Saving output: %s" % output) msg.append("") output_msg = "\n".join(msg) return output_msg def match_phot_catalogs(sep, phot, match_radius=1.): matched_sep = list() matched_phot = list() refs = np.array([phot['ra'], phot['dec']]).T for row in sep: xy = np.array([row['ra'], row['dec']]) dist = np.sqrt(np.sum((refs - xy)**2, axis=1)) index = np.argmin(dist) if np.min(dist) < match_radius/3600.: matched_phot.append(np.array(phot[index])) matched_sep.append(np.array(row)) matched_sep = np.array(matched_sep) matched_phot = np.array(matched_phot) return Table(matched_sep), Table(matched_phot) def get_sdss_catalog(ra, dec, radius=4.): catalog_fname = 'sdss_phot_%.2f%+.2f.csv' % (ra, dec) fields = ['ra', 'dec', 'psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z', 'psfMagErr_u', 'psfMagErr_g', 'psfMagErr_r', 'psfMagErr_i', 'psfMagErr_z'] field_center = SkyCoord(ra, dec, frame='icrs', unit='deg') sdss_result = SDSS.query_region(field_center, radius*u.arcmin, photoobj_fields=fields) if sdss_result is not None: sdss_result.write(catalog_fname, format='ascii.csv', overwrite=True) return sdss_result ext_coeffs = {'u': 0.517, 'g': 0.165, 'r': 0.0754, 'i': 0.0257, 'z': 0.0114} def flux_calibration_sdss(img_fname, sep_fname, fig_fname='', q_lim=0.8, kappa=3, match_radius=1.): msg = list() hdr = fits.getheader(img_fname) msg.append(" - Loaded image: %s" % img_fname) radius = np.sqrt(hdr['CD1_1']**2 + hdr['CD1_2']**2)*60 * hdr['NAXIS1'] / np.sqrt(2) msg.append(" - Downloading SDSS photometric catalog...") try: sdss_cat = get_sdss_catalog(hdr['CRVAL1'], hdr['CRVAL2'], radius) except: msg.append(" [ERROR] - Could not connect to SDSS server. Check your internet connection.") msg.append("") return "\n".join(msg) def line(x, zp): return zp + x if sdss_cat is None: msg.append(" [ERROR] - No data found in SDSS. No zero point calculated") msg.append("") return "\n".join(msg) airmass = hdr['AIRMASS'] filter = alfosc.filter_translate[alfosc.get_filter(hdr)] if 'SDSS' in filter: band = filter.split('_')[0] else: msg.append(" [ERROR] - The image was not taken with an SDSS filter. No zero point calculated") msg.append("") return "\n".join(msg) mag_key = 'psfMag_%s' % band mag_err_key = 'psfMagErr_%s' % band good = (sdss_cat[mag_key] > 0) & (sdss_cat[mag_key] < 30) sdss_cat = sdss_cat[good] try: sep_cat = Table.read(sep_fname) sep_hdr = fits.getheader(sep_fname) msg.append(" - Loaded SEP source table: %s" % sep_fname) except (FileNotFoundError, OSError): msg.append(" [ERROR] - Could not load SEP source table: %s" % sep_fname) msg.append("") return "\n".join(msg) if 'MAG_ZP' in sep_hdr: msg.append("[WARNING] - The source table has already been flux calibrated by PyNOT") msg.append(" - Terminating task...") msg.append("") return "\n".join(msg) axis_ratio = sep_cat['b']/sep_cat['a'] sep_points = sep_cat[axis_ratio > q_lim] match_sep, match_sdss = match_phot_catalogs(sep_points, sdss_cat) msg.append(" - Cross matched source catalog") mag = match_sdss[mag_key] mag_err = match_sdss[mag_err_key] m_inst = match_sep['mag_auto'] k = ext_coeffs[band] zp0, _ = curve_fit(line, m_inst+k*airmass, mag, p0=[27], sigma=mag_err) cut = np.abs(zp0 + m_inst + k*airmass - mag) < kappa*mad(zp0 + m_inst + k*airmass - mag) cut &= (mag < 20.1) & (mag > 15) w = 1./mag_err[cut]**2 zp = np.sum((mag[cut] - m_inst[cut] - k*airmass) * w) / np.sum(w) msg.append(" - Calculating zero point in SDSS %s band using %i sources" % (band, len(w))) zp_err = np.std(mag[cut] - zp - m_inst[cut] - k*airmass) msg.append(" - Zero Point = %.3f ± %.3f mag" % (zp, zp_err)) sep_cat['mag_auto'] += zp sep_cat.write(sep_fname, overwrite=True) with fits.open(sep_fname, 'update') as sep_file: sep_file[0].header.add_comment("Self-calibration of mag. zero point using SDSS") sep_file[0].header['MAG_ZP'] = (np.round(zp, 3), "Magnitude zero point (AB mag)") sep_file[0].header['ZP_ERR'] = (np.round(zp_err, 3), "Uncertainty on magnitude zero point (AB mag)") msg.append(" [OUTPUT] - Updating magnitudes in source table: %s" % sep_fname) base, _ = os.path.splitext(os.path.basename(img_fname)) dirname = os.path.dirname(img_fname) if fig_fname == '': fig_fname = 'zero_point_' + base + '.pdf' fig_fname = os.path.join(dirname, fig_fname) fig = plt.figure() ax = fig.add_subplot(111) ax.errorbar(m_inst, mag, 3*mag_err, ls='', marker='.', color='k', alpha=0.8) ax.plot(m_inst[cut], mag[cut], ls='', marker='o', color='b', alpha=0.7) ax.plot(np.sort(m_inst), zp + np.sort(m_inst) + k*airmass, ls='--', color='crimson', label='ZP = %.2f ± %.2f' % (zp, zp_err)) ax.set_ylim(np.min(mag)-0.2, np.max(mag)+0.5) ax.set_xlabel("Instrument Magnitude") ax.set_ylabel("Reference SDSS Magnitude (r-band)") ax.legend() ax.tick_params(which='both', top=False, right=False) fig.tight_layout() fig.savefig(fig_fname) msg.append(" [OUTPUT] - Saving diagnostic figure: %s" % fig_fname) with fits.open(img_fname) as hdu_list: hdu_list['DATA'].header.add_comment("Self-calibration of mag. zero point using SDSS") hdu_list['DATA'].header['MAG_ZP'] = (np.round(zp, 3), "Magnitude zero point (AB mag)") hdu_list['DATA'].header['ZP_ERR'] = (np.round(zp_err, 3), "Uncertainty on magnitude zero point (AB mag)") hdu_list.writeto(img_fname, overwrite=True) msg.append(" [OUTPUT] - Updating header of input image: %s" % img_fname) msg.append(" - MAG_ZP = %10.3f / %s" % (zp, "Magnitude zero point (AB mag)")) msg.append(" - ZP_ERR = %10.3f / %s" % (zp_err, "Uncertainty on magnitude zero point (AB mag)")) msg.append("") return "\n".join(msg)
true
true
f71396a904140ae652cf61c26217b8c4aeb3d744
1,418
py
Python
fiases/snapshot.py
u4097/elasticsearch-fias
d03e16492af2f39a7cc59723aa2af8d04998ecfd
[ "MIT" ]
3
2020-02-14T06:20:14.000Z
2022-01-10T12:40:13.000Z
fiases/snapshot.py
u4097/elasticsearch-fias
d03e16492af2f39a7cc59723aa2af8d04998ecfd
[ "MIT" ]
null
null
null
fiases/snapshot.py
u4097/elasticsearch-fias
d03e16492af2f39a7cc59723aa2af8d04998ecfd
[ "MIT" ]
1
2020-02-14T06:28:29.000Z
2020-02-14T06:28:29.000Z
from elasticsearch.client import SnapshotClient from fiases.fias_data import ES import fiases.fias_data sn = SnapshotClient(ES) def register(location="/usr/share/elasticsearch/snapshots"): sn_body = { "type": "fs", "settings": { "compress": "true", "location": location } } sn.create_repository(repository="fias", body=sn_body) def restore(): ES.indices.delete(index=address.INDEX, ignore=[400, 404]) ES.indices.delete(index=houses.INDEX, ignore=[400, 404]) sn.restore(repository="fias", snapshot="fias_full", body={ "indices": [address.INDEX, houses.INDEX] }) def restoreIfNotExist(): if not ES.indices.exists(address.INDEX): sn.restore(repository="fias", snapshot="fias_full", body={ "indices": [address.INDEX, houses.INDEX] }) else: pass def createFullSnapshot(): try: sn.delete(repository="fias", snapshot="fias_full") except(Exception): pass sn_body = { "indices": [address.INDEX, houses.INDEX], "ignore_unavailable": "true", "include_global_state": "false", "metadata": { "taken_by": "fias", "taken_because": "backup before update" } } sn.create(repository="fias", snapshot="fias_full", body=sn_body)
25.321429
68
0.580395
from elasticsearch.client import SnapshotClient from fiases.fias_data import ES import fiases.fias_data sn = SnapshotClient(ES) def register(location="/usr/share/elasticsearch/snapshots"): sn_body = { "type": "fs", "settings": { "compress": "true", "location": location } } sn.create_repository(repository="fias", body=sn_body) def restore(): ES.indices.delete(index=address.INDEX, ignore=[400, 404]) ES.indices.delete(index=houses.INDEX, ignore=[400, 404]) sn.restore(repository="fias", snapshot="fias_full", body={ "indices": [address.INDEX, houses.INDEX] }) def restoreIfNotExist(): if not ES.indices.exists(address.INDEX): sn.restore(repository="fias", snapshot="fias_full", body={ "indices": [address.INDEX, houses.INDEX] }) else: pass def createFullSnapshot(): try: sn.delete(repository="fias", snapshot="fias_full") except(Exception): pass sn_body = { "indices": [address.INDEX, houses.INDEX], "ignore_unavailable": "true", "include_global_state": "false", "metadata": { "taken_by": "fias", "taken_because": "backup before update" } } sn.create(repository="fias", snapshot="fias_full", body=sn_body)
true
true
f713982085f31293684f7aebfef7c22becf7b805
1,743
py
Python
app.py
cop1fab/Tasky
1be5436d770d78d8eb29b21a4a523e0c5b5d36ef
[ "MIT" ]
1
2019-09-29T19:27:08.000Z
2019-09-29T19:27:08.000Z
app.py
cop1fab/Tasky
1be5436d770d78d8eb29b21a4a523e0c5b5d36ef
[ "MIT" ]
null
null
null
app.py
cop1fab/Tasky
1be5436d770d78d8eb29b21a4a523e0c5b5d36ef
[ "MIT" ]
null
null
null
from flask import Flask, render_template, url_for, request, redirect from flask_sqlalchemy import SQLAlchemy from datetime import datetime app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db' db = SQLAlchemy(app) class Todo(db.Model): id = db.Column(db.Integer, primary_key=True) content = db.Column(db.String(200), nullable=False) date_created = db.Column(db.DateTime, default=datetime.utcnow) def __repr__(self): return '<Task %r>' % self.id @app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST': task_content = request.form['content'] new_task = Todo(content=task_content) try: db.session.add(new_task) db.session.commit() return redirect('/') except: return 'There was an issue adding your task' else: tasks = Todo.query.order_by(Todo.date_created).all() return render_template('index.html', tasks=tasks) @app.route('/delete/<int:id>') def delete(id): task_to_delete = Todo.query.get_or_404(id) try: db.session.delete(task_to_delete) db.session.commit() return redirect('/') except: return 'There was a problem deleting that task' @app.route('/update/<int:id>', methods=['GET', 'POST']) def update(id): task = Todo.query.get_or_404(id) if request.method == 'POST': task.content = request.form['content'] try: db.session.commit() return redirect('/') except: return 'There was an issue updating your task' else: return render_template('update.html', task=task) if __name__ == "__main__": app.run(debug=True)
25.26087
68
0.626506
from flask import Flask, render_template, url_for, request, redirect from flask_sqlalchemy import SQLAlchemy from datetime import datetime app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db' db = SQLAlchemy(app) class Todo(db.Model): id = db.Column(db.Integer, primary_key=True) content = db.Column(db.String(200), nullable=False) date_created = db.Column(db.DateTime, default=datetime.utcnow) def __repr__(self): return '<Task %r>' % self.id @app.route('/', methods=['POST', 'GET']) def index(): if request.method == 'POST': task_content = request.form['content'] new_task = Todo(content=task_content) try: db.session.add(new_task) db.session.commit() return redirect('/') except: return 'There was an issue adding your task' else: tasks = Todo.query.order_by(Todo.date_created).all() return render_template('index.html', tasks=tasks) @app.route('/delete/<int:id>') def delete(id): task_to_delete = Todo.query.get_or_404(id) try: db.session.delete(task_to_delete) db.session.commit() return redirect('/') except: return 'There was a problem deleting that task' @app.route('/update/<int:id>', methods=['GET', 'POST']) def update(id): task = Todo.query.get_or_404(id) if request.method == 'POST': task.content = request.form['content'] try: db.session.commit() return redirect('/') except: return 'There was an issue updating your task' else: return render_template('update.html', task=task) if __name__ == "__main__": app.run(debug=True)
true
true
f71399781df5c8c63aa2982c55106c73ced70eaa
7,174
py
Python
lib/modules/powershell/management/powercat.py
terrorizer1980/Empire
9259e5106986847d2bb770c4289c0c0f1adf2344
[ "BSD-3-Clause" ]
2
2021-01-15T21:16:02.000Z
2021-05-16T21:02:36.000Z
lib/modules/powershell/management/powercat.py
terrorizer1980/Empire
9259e5106986847d2bb770c4289c0c0f1adf2344
[ "BSD-3-Clause" ]
1
2020-11-04T08:15:12.000Z
2020-11-04T08:15:12.000Z
lib/modules/powershell/management/powercat.py
InfinitelyFreedom/Empire
3a922f60d92658fb716efb3be5a1c15074114766
[ "BSD-3-Clause" ]
1
2022-01-04T17:36:40.000Z
2022-01-04T17:36:40.000Z
from __future__ import print_function from builtins import object from builtins import str from lib.common import helpers class Module(object): def __init__(self, mainMenu, params=[]): # metadata info about the module, not modified during runtime self.info = { # name for the module that will appear in module menus 'Name': 'PowerCat', # list of one or more authors for the module 'Author': ['besimorhino'], 'Software': '', 'Techniques': ['T1036'], # more verbose multi-line description of the module 'Description': ( 'powercat is a powershell function. First you need to load the function before you can execute it.' 'You can put one of the below commands into your powershell profile so powercat is automatically' 'loaded when powershell starts..'), # True if the module needs to run in the background 'Background': True, # File extension to save the file as 'OutputExtension': None, # True if the module needs admin rights to run 'NeedsAdmin': False, # True if the method doesn't touch disk/is reasonably opsec safe 'OpsecSafe': True, 'Language': 'powershell', 'MinLanguageVersion': '2', # list of any references/other comments 'Comments': [ 'https://github.com/besimorhino/powercat' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent': { 'Description': 'Agent to run module on.', 'Required': True, 'Value': '' }, 'l': { 'Description': 'Switch. Listen for a connection', 'Required': False, 'Value': '' }, 'c': { 'Description': 'Connect to a listener', 'Required': False, 'Value': '' }, 'p': { 'Description': 'The port to connect to, or listen on.', 'Required': False, 'Value': '' }, 'e': { 'Description': 'Execute. (GAPING_SECURITY_HOLE) ', 'Required': False, 'Value': '' }, 'ep': { 'Description': 'Switch. Execute Powershell.', 'Required': False, 'Value': '' }, 'r': { 'Description': 'Switch. Relay. Format: -r tcp:10.1.1.1:443', 'Required': False, 'Value': '' }, 'u': { 'Description': 'Switch. Transfer data over UDP.', 'Required': False, 'Value': '' }, 'dns': { 'Description': 'Transfer data over dns (dnscat2).', 'Required': False, 'Value': '' }, 'dnsft': { 'Description': 'DNS Failure Threshold. ', 'Required': False, 'Value': '' }, 't': { 'Description': 'Timeout option. Default: 60 ', 'Required': False, 'Value': '' }, 'i': { 'Description': 'Input: Filepath (string), byte array, or string.', 'Required': False, 'Value': '' }, 'o': { 'Description': 'Console Output Type: "Host", "Bytes", or "String" ', 'Required': False, 'Value': '' }, 'of': { 'Description': 'Output File Path. ', 'Required': False, 'Value': '' }, 'd': { 'Description': 'Switch. Disconnect after connecting.', 'Required': False, 'Value': '' }, 'rep': { 'Description': 'Switch. Repeater. Restart after disconnecting.', 'Required': False, 'Value': '' }, 'g': { 'Description': 'Switch. Generate Payload', 'Required': False, 'Value': '' }, 'ge': { 'Description': 'Switch. Generate Encoded Payload', 'Required': False, 'Value': '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu # During instantiation, any settable option parameters # are passed as an object set to the module and the # options dictionary is automatically set. This is mostly # in case options are passed on the command line if params: for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self, obfuscate=False, obfuscationCommand=""): # the PowerShell script itself, with the command to invoke # for execution appended to the end. Scripts should output # everything to the pipeline for proper parsing. # # the script should be stripped of comments, with a link to any # original reference script included in the comments. script = """ """ # if you're reading in a large, external script that might be updates, # use the pattern below # read in the common module source code moduleSource = self.mainMenu.installPath + "/data/module_source/management/powercat.ps1" try: f = open(moduleSource, 'r') except: print((helpers.color("[!] Could not read module source path at: " + str(moduleSource)))) return "" moduleCode = f.read() f.close() script = moduleCode scriptEnd = "powercat" # add any arguments to the end execution of the script for option, values in self.options.items(): if option.lower() != "agent": if values['Value'] and values['Value'] != '': if values['Value'].lower() == "true": # if we're just adding a switch scriptEnd += " -" + str(option) else: scriptEnd += " -" + str(option) + " " + str(values['Value']) if obfuscate: scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand) script += scriptEnd script = helpers.keyword_obfuscation(script) return script
34.657005
115
0.476861
from __future__ import print_function from builtins import object from builtins import str from lib.common import helpers class Module(object): def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'PowerCat', 'Author': ['besimorhino'], 'Software': '', 'Techniques': ['T1036'], 'Description': ( 'powercat is a powershell function. First you need to load the function before you can execute it.' 'You can put one of the below commands into your powershell profile so powercat is automatically' 'loaded when powershell starts..'), 'Background': True, 'OutputExtension': None, 'NeedsAdmin': False, 'OpsecSafe': True, 'Language': 'powershell', 'MinLanguageVersion': '2', # list of any references/other comments 'Comments': [ 'https://github.com/besimorhino/powercat' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent': { 'Description': 'Agent to run module on.', 'Required': True, 'Value': '' }, 'l': { 'Description': 'Switch. Listen for a connection', 'Required': False, 'Value': '' }, 'c': { 'Description': 'Connect to a listener', 'Required': False, 'Value': '' }, 'p': { 'Description': 'The port to connect to, or listen on.', 'Required': False, 'Value': '' }, 'e': { 'Description': 'Execute. (GAPING_SECURITY_HOLE) ', 'Required': False, 'Value': '' }, 'ep': { 'Description': 'Switch. Execute Powershell.', 'Required': False, 'Value': '' }, 'r': { 'Description': 'Switch. Relay. Format: -r tcp:10.1.1.1:443', 'Required': False, 'Value': '' }, 'u': { 'Description': 'Switch. Transfer data over UDP.', 'Required': False, 'Value': '' }, 'dns': { 'Description': 'Transfer data over dns (dnscat2).', 'Required': False, 'Value': '' }, 'dnsft': { 'Description': 'DNS Failure Threshold. ', 'Required': False, 'Value': '' }, 't': { 'Description': 'Timeout option. Default: 60 ', 'Required': False, 'Value': '' }, 'i': { 'Description': 'Input: Filepath (string), byte array, or string.', 'Required': False, 'Value': '' }, 'o': { 'Description': 'Console Output Type: "Host", "Bytes", or "String" ', 'Required': False, 'Value': '' }, 'of': { 'Description': 'Output File Path. ', 'Required': False, 'Value': '' }, 'd': { 'Description': 'Switch. Disconnect after connecting.', 'Required': False, 'Value': '' }, 'rep': { 'Description': 'Switch. Repeater. Restart after disconnecting.', 'Required': False, 'Value': '' }, 'g': { 'Description': 'Switch. Generate Payload', 'Required': False, 'Value': '' }, 'ge': { 'Description': 'Switch. Generate Encoded Payload', 'Required': False, 'Value': '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu # During instantiation, any settable option parameters # are passed as an object set to the module and the # options dictionary is automatically set. This is mostly # in case options are passed on the command line if params: for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self, obfuscate=False, obfuscationCommand=""): # the PowerShell script itself, with the command to invoke # for execution appended to the end. Scripts should output # everything to the pipeline for proper parsing. # # the script should be stripped of comments, with a link to any # original reference script included in the comments. script = """ """ # if you're reading in a large, external script that might be updates, moduleSource = self.mainMenu.installPath + "/data/module_source/management/powercat.ps1" try: f = open(moduleSource, 'r') except: print((helpers.color("[!] Could not read module source path at: " + str(moduleSource)))) return "" moduleCode = f.read() f.close() script = moduleCode scriptEnd = "powercat" for option, values in self.options.items(): if option.lower() != "agent": if values['Value'] and values['Value'] != '': if values['Value'].lower() == "true": scriptEnd += " -" + str(option) else: scriptEnd += " -" + str(option) + " " + str(values['Value']) if obfuscate: scriptEnd = helpers.obfuscate(self.mainMenu.installPath, psScript=scriptEnd, obfuscationCommand=obfuscationCommand) script += scriptEnd script = helpers.keyword_obfuscation(script) return script
true
true
f7139ab3f0fb49d1cac5d3f59d00c8d5d31e3407
1,459
py
Python
main.py
lerignoux/docker-gcloud-control
6bbe1c7f0b9712c3791637d8d83251441ed27bf8
[ "Apache-2.0" ]
null
null
null
main.py
lerignoux/docker-gcloud-control
6bbe1c7f0b9712c3791637d8d83251441ed27bf8
[ "Apache-2.0" ]
null
null
null
main.py
lerignoux/docker-gcloud-control
6bbe1c7f0b9712c3791637d8d83251441ed27bf8
[ "Apache-2.0" ]
null
null
null
import argparse import logging import os from time import sleep from googleapiclient import discovery log = logging.getLogger(__name__) parser = argparse.ArgumentParser(description="Control a google cloud instance.") parser.add_argument('--debug', '-d', dest='debug', action='store_true', help="Debug mode") parser.add_argument('--project', '-p', dest='project', help="instance project id") parser.add_argument('--zone', '-z', dest='zone', help="instance zone") parser.add_argument('--instance', '-i', dest='instance', help="instance name") def restart_instance(project, zone, instance): compute = discovery.build('compute', 'v1') compute.instances().stop(project=project, zone=zone, instance=instance).execute() stopped = False while not stopped: sleep(10) list = compute.instances().list(project=project, zone=zone).execute()['items'] data = next((x for x in list if x['name'] == instance), None) if data['status'] == "TERMINATED": stopped = True return compute.instances().start(project=project, zone=zone, instance=instance).execute() if __name__ == "__main__": args = parser.parse_args() level = logging.INFO if args.debug: level = logging.DEBUG logging.basicConfig(format='%(asctime)s %(message)s', level=level) restart_instance(args.project or os.environ['gc_project'], args.zone or os.environ['gc_zone'], args.instance or os.environ['gc_instance'])
36.475
142
0.69294
import argparse import logging import os from time import sleep from googleapiclient import discovery log = logging.getLogger(__name__) parser = argparse.ArgumentParser(description="Control a google cloud instance.") parser.add_argument('--debug', '-d', dest='debug', action='store_true', help="Debug mode") parser.add_argument('--project', '-p', dest='project', help="instance project id") parser.add_argument('--zone', '-z', dest='zone', help="instance zone") parser.add_argument('--instance', '-i', dest='instance', help="instance name") def restart_instance(project, zone, instance): compute = discovery.build('compute', 'v1') compute.instances().stop(project=project, zone=zone, instance=instance).execute() stopped = False while not stopped: sleep(10) list = compute.instances().list(project=project, zone=zone).execute()['items'] data = next((x for x in list if x['name'] == instance), None) if data['status'] == "TERMINATED": stopped = True return compute.instances().start(project=project, zone=zone, instance=instance).execute() if __name__ == "__main__": args = parser.parse_args() level = logging.INFO if args.debug: level = logging.DEBUG logging.basicConfig(format='%(asctime)s %(message)s', level=level) restart_instance(args.project or os.environ['gc_project'], args.zone or os.environ['gc_zone'], args.instance or os.environ['gc_instance'])
true
true
f7139ce7db9740ed4872b368a0cddb95a859e9c8
450
py
Python
code/stable/nopcm/src/python/Constants.py
Daisy-C/Drasil
20a877e463965dbb034a84a7094cdd73bdc0eb98
[ "BSD-2-Clause" ]
114
2017-12-16T04:51:37.000Z
2021-12-20T16:27:51.000Z
code/stable/nopcm/src/python/Constants.py
Daisy-C/Drasil
20a877e463965dbb034a84a7094cdd73bdc0eb98
[ "BSD-2-Clause" ]
1,762
2017-12-02T14:39:11.000Z
2022-03-29T16:28:57.000Z
code/stable/nopcm/src/python/Constants.py
Daisy-C/Drasil
20a877e463965dbb034a84a7094cdd73bdc0eb98
[ "BSD-2-Clause" ]
31
2018-11-25T22:16:12.000Z
2021-12-01T20:15:38.000Z
## \file Constants.py # \author Thulasi Jegatheesan # \brief Provides the structure for holding constant values ## \brief Structure for holding the constant values class Constants: pi = 3.14159265 L_min = 0.1 L_max = 50.0 rho_W_min = 950.0 rho_W_max = 1000.0 A_C_max = 100000.0 C_W_min = 4170.0 C_W_max = 4210.0 h_C_min = 10.0 h_C_max = 10000.0 t_final_max = 86400.0 AR_min = 1.0e-2 AR_max = 100.0
23.684211
59
0.653333
0.0 rho_W_min = 950.0 rho_W_max = 1000.0 A_C_max = 100000.0 C_W_min = 4170.0 C_W_max = 4210.0 h_C_min = 10.0 h_C_max = 10000.0 t_final_max = 86400.0 AR_min = 1.0e-2 AR_max = 100.0
true
true
f7139d0d1c6f8bab13a8e63017a7ecb263d70f2d
828
py
Python
hckclone/users/urls.py
hckcksrl/hckclone
3e2585b3a3ca63798d46ade02255a56e795837bf
[ "MIT" ]
null
null
null
hckclone/users/urls.py
hckcksrl/hckclone
3e2585b3a3ca63798d46ade02255a56e795837bf
[ "MIT" ]
null
null
null
hckclone/users/urls.py
hckcksrl/hckclone
3e2585b3a3ca63798d46ade02255a56e795837bf
[ "MIT" ]
null
null
null
from django.urls import path from . import views app_name = "users" urlpatterns = [ path( "<str:username>", view = views.UserProfile.as_view(), name = "user_profile" ), path( "<str:username>/following", view = views.UserFollowing.as_view(), name = "user_following" ), path( "<str:username>/followers", view = views.UserFollowers.as_view(), name = "user_followers" ), path( "search/", view = views.SearchUser.as_view(), name = 'search_user' ), path( "follow/<str:username>", view = views.FollowCount.as_view(), name = 'follow_count' ), path( "change/<str:username>", view = views.ChangePassword.as_view(), name = 'change_password' ) ]
21.789474
46
0.541063
from django.urls import path from . import views app_name = "users" urlpatterns = [ path( "<str:username>", view = views.UserProfile.as_view(), name = "user_profile" ), path( "<str:username>/following", view = views.UserFollowing.as_view(), name = "user_following" ), path( "<str:username>/followers", view = views.UserFollowers.as_view(), name = "user_followers" ), path( "search/", view = views.SearchUser.as_view(), name = 'search_user' ), path( "follow/<str:username>", view = views.FollowCount.as_view(), name = 'follow_count' ), path( "change/<str:username>", view = views.ChangePassword.as_view(), name = 'change_password' ) ]
true
true
f7139da2c15dad77fce6e96abeded28353137ed9
18,589
py
Python
odoo-13.0/venv/lib/python3.8/site-packages/zeep/wsdl/messages/soap.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
null
null
null
odoo-13.0/venv/lib/python3.8/site-packages/zeep/wsdl/messages/soap.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
null
null
null
odoo-13.0/venv/lib/python3.8/site-packages/zeep/wsdl/messages/soap.py
VaibhavBhujade/Blockchain-ERP-interoperability
b5190a037fb6615386f7cbad024d51b0abd4ba03
[ "MIT" ]
null
null
null
""" zeep.wsdl.messages.soap ~~~~~~~~~~~~~~~~~~~~~~~ """ import copy from collections import OrderedDict from lxml import etree from lxml.builder import ElementMaker from zeep import exceptions, xsd from zeep.utils import as_qname from zeep.xsd.context import XmlParserContext from zeep.wsdl.messages.base import ConcreteMessage, SerializedMessage from zeep.wsdl.messages.multiref import process_multiref __all__ = [ 'DocumentMessage', 'RpcMessage', ] class SoapMessage(ConcreteMessage): """Base class for the SOAP Document and RPC messages :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.bindings.soap.SoapOperation :param type: 'input' or 'output' :type type: str :param nsmap: The namespace mapping :type nsmap: dict """ def __init__(self, wsdl, name, operation, type, nsmap): super(SoapMessage, self).__init__(wsdl, name, operation) self.nsmap = nsmap self.abstract = None # Set during resolve() self.type = type self._is_body_wrapped = False self.body = None self.header = None self.envelope = None def serialize(self, *args, **kwargs): """Create a SerializedMessage for this message""" nsmap = { 'soap-env': self.nsmap['soap-env'] } nsmap.update(self.wsdl.types._prefix_map_custom) soap = ElementMaker(namespace=self.nsmap['soap-env'], nsmap=nsmap) # Create the soap:envelope envelope = soap.Envelope() # Create the soap:header element headers_value = kwargs.pop('_soapheaders', None) header = self._serialize_header(headers_value, nsmap) if header is not None: envelope.append(header) # Create the soap:body element. The _is_body_wrapped attribute signals # that the self.body element is of type soap:body, so we don't have to # create it in that case. Otherwise we create a Element soap:body and # render the content into this. if self.body: body_value = self.body(*args, **kwargs) if self._is_body_wrapped: self.body.render(envelope, body_value) else: body = soap.Body() envelope.append(body) self.body.render(body, body_value) else: body = soap.Body() envelope.append(body) # XXX: This is only used in Soap 1.1 so should be moved to the the # Soap11Binding._set_http_headers(). But let's keep it like this for # now. headers = { 'SOAPAction': '"%s"' % self.operation.soapaction } return SerializedMessage( path=None, headers=headers, content=envelope) def deserialize(self, envelope): """Deserialize the SOAP:Envelope and return a CompoundValue with the result. """ if not self.envelope: return None body = envelope.find('soap-env:Body', namespaces=self.nsmap) body_result = self._deserialize_body(body) header = envelope.find('soap-env:Header', namespaces=self.nsmap) headers_result = self._deserialize_headers(header) kwargs = body_result kwargs.update(headers_result) result = self.envelope(**kwargs) # If the message if self.header.type._element: return result result = result.body if result is None or len(result) == 0: return None elif len(result) > 1: return result # Check if we can remove the wrapping object to make the return value # easier to use. result = next(iter(result.__values__.values())) if isinstance(result, xsd.CompoundValue): children = result._xsd_type.elements attributes = result._xsd_type.attributes if len(children) == 1 and len(attributes) == 0: item_name, item_element = children[0] retval = getattr(result, item_name) return retval return result def signature(self, as_output=False): if not self.envelope: return None if as_output: if isinstance(self.envelope.type, xsd.ComplexType): try: if len(self.envelope.type.elements) == 1: return self.envelope.type.elements[0][1].type.signature( schema=self.wsdl.types, standalone=False) except AttributeError: return None return self.envelope.type.signature(schema=self.wsdl.types, standalone=False) if self.body: parts = [self.body.type.signature(schema=self.wsdl.types, standalone=False)] else: parts = [] if self.header.type._element: parts.append('_soapheaders={%s}' % self.header.type.signature( schema=self.wsdl.types, standalone=False)) return ', '.join(part for part in parts if part) @classmethod def parse(cls, definitions, xmlelement, operation, type, nsmap): """Parse a wsdl:binding/wsdl:operation/wsdl:operation for the SOAP implementation. Each wsdl:operation can contain three child nodes: - input - output - fault Definition for input/output:: <input> <soap:body parts="nmtokens"? use="literal|encoded" encodingStyle="uri-list"? namespace="uri"?> <soap:header message="qname" part="nmtoken" use="literal|encoded" encodingStyle="uri-list"? namespace="uri"?>* <soap:headerfault message="qname" part="nmtoken" use="literal|encoded" encodingStyle="uri-list"? namespace="uri"?/>* </soap:header> </input> And the definition for fault:: <soap:fault name="nmtoken" use="literal|encoded" encodingStyle="uri-list"? namespace="uri"?> """ name = xmlelement.get('name') obj = cls(definitions.wsdl, name, operation, nsmap=nsmap, type=type) body_data = None header_data = None # After some profiling it turns out that .find() and .findall() in this # case are twice as fast as the xpath method body = xmlelement.find('soap:body', namespaces=operation.binding.nsmap) if body is not None: body_data = cls._parse_body(body) # Parse soap:header (multiple) elements = xmlelement.findall( 'soap:header', namespaces=operation.binding.nsmap) header_data = cls._parse_header( elements, definitions.target_namespace, operation) obj._resolve_info = { 'body': body_data, 'header': header_data } return obj @classmethod def _parse_body(cls, xmlelement): """Parse soap:body and return a dict with data to resolve it. <soap:body parts="nmtokens"? use="literal|encoded"? encodingStyle="uri-list"? namespace="uri"?> """ return { 'part': xmlelement.get('part'), 'use': xmlelement.get('use', 'literal'), 'encodingStyle': xmlelement.get('encodingStyle'), 'namespace': xmlelement.get('namespace'), } @classmethod def _parse_header(cls, xmlelements, tns, operation): """Parse the soap:header and optionally included soap:headerfault elements <soap:header message="qname" part="nmtoken" use="literal|encoded" encodingStyle="uri-list"? namespace="uri"? />* The header can optionally contain one ore more soap:headerfault elements which can contain the same attributes as the soap:header:: <soap:headerfault message="qname" part="nmtoken" use="literal|encoded" encodingStyle="uri-list"? namespace="uri"?/>* """ result = [] for xmlelement in xmlelements: data = cls._parse_header_element(xmlelement, tns) # Add optional soap:headerfault elements data['faults'] = [] fault_elements = xmlelement.findall( 'soap:headerfault', namespaces=operation.binding.nsmap) for fault_element in fault_elements: fault_data = cls._parse_header_element(fault_element, tns) data['faults'].append(fault_data) result.append(data) return result @classmethod def _parse_header_element(cls, xmlelement, tns): attributes = xmlelement.attrib message_qname = as_qname( attributes['message'], xmlelement.nsmap, tns) try: return { 'message': message_qname, 'part': attributes['part'], 'use': attributes['use'], 'encodingStyle': attributes.get('encodingStyle'), 'namespace': attributes.get('namespace'), } except KeyError: raise exceptions.WsdlSyntaxError("Invalid soap:header(fault)") def resolve(self, definitions, abstract_message): """Resolve the data in the self._resolve_info dict (set via parse()) This creates three xsd.Element objects: - self.header - self.body - self.envelope (combination of headers and body) XXX headerfaults are not implemented yet. """ info = self._resolve_info del self._resolve_info # If this message has no parts then we have nothing to do. This might # happen for output messages which don't return anything. if (abstract_message is None or not abstract_message.parts) and self.type != 'input': return self.abstract = abstract_message parts = OrderedDict(self.abstract.parts) self.header = self._resolve_header(info['header'], definitions, parts) self.body = self._resolve_body(info['body'], definitions, parts) self.envelope = self._create_envelope_element() def _create_envelope_element(self): """Create combined `envelope` complexType which contains both the elements from the body and the headers. """ all_elements = xsd.Sequence([]) if self.header.type._element: all_elements.append( xsd.Element('{%s}header' % self.nsmap['soap-env'], self.header.type)) all_elements.append( xsd.Element( '{%s}body' % self.nsmap['soap-env'], self.body.type if self.body else None)) return xsd.Element('{%s}envelope' % self.nsmap['soap-env'], xsd.ComplexType(all_elements)) def _serialize_header(self, headers_value, nsmap): if not headers_value: return headers_value = copy.deepcopy(headers_value) soap = ElementMaker(namespace=self.nsmap['soap-env'], nsmap=nsmap) header = soap.Header() if isinstance(headers_value, list): for header_value in headers_value: if hasattr(header_value, '_xsd_elm'): header_value._xsd_elm.render(header, header_value) elif hasattr(header_value, '_xsd_type'): header_value._xsd_type.render(header, header_value) elif isinstance(header_value, etree._Element): header.append(header_value) else: raise ValueError("Invalid value given to _soapheaders") elif isinstance(headers_value, dict): if not self.header: raise ValueError( "_soapheaders only accepts a dictionary if the wsdl " "defines the headers.") # Only render headers for which we have a value headers_value = self.header(**headers_value) for name, elm in self.header.type.elements: if name in headers_value and headers_value[name] is not None: elm.render(header, headers_value[name], ['header', name]) else: raise ValueError("Invalid value given to _soapheaders") return header def _deserialize_headers(self, xmlelement): """Deserialize the values in the SOAP:Header element""" if not self.header or xmlelement is None: return {} context = XmlParserContext(settings=self.wsdl.settings) result = self.header.parse(xmlelement, self.wsdl.types, context=context) if result is not None: return {'header': result} return {} def _resolve_header(self, info, definitions, parts): name = etree.QName(self.nsmap['soap-env'], 'Header') container = xsd.All(consume_other=True) if not info: return xsd.Element(name, xsd.ComplexType(container)) for item in info: message_name = item['message'].text part_name = item['part'] message = definitions.get('messages', message_name) if message == self.abstract and part_name in parts: del parts[part_name] part = message.parts[part_name] if part.element: element = part.element.clone() element.attr_name = part_name else: element = xsd.Element(part_name, part.type) container.append(element) return xsd.Element(name, xsd.ComplexType(container)) class DocumentMessage(SoapMessage): """In the document message there are no additional wrappers, and the message parts appear directly under the SOAP Body element. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.bindings.soap.SoapOperation :param type: 'input' or 'output' :type type: str :param nsmap: The namespace mapping :type nsmap: dict """ def __init__(self, *args, **kwargs): super(DocumentMessage, self).__init__(*args, **kwargs) def _deserialize_body(self, xmlelement): if not self._is_body_wrapped: # TODO: For now we assume that the body only has one child since # only one part is specified in the wsdl. This should be handled # way better xmlelement = list(xmlelement)[0] context = XmlParserContext(settings=self.wsdl.settings) result = self.body.parse(xmlelement, self.wsdl.types, context=context) return {'body': result} def _resolve_body(self, info, definitions, parts): name = etree.QName(self.nsmap['soap-env'], 'Body') if not info or not parts: return None # If the part name is omitted then all parts are available under # the soap:body tag. Otherwise only the part with the given name. if info['part']: part_name = info['part'] sub_elements = [parts[part_name].element] else: sub_elements = [] for part_name, part in parts.items(): element = part.element.clone() element.attr_name = part_name or element.name sub_elements.append(element) if len(sub_elements) > 1: self._is_body_wrapped = True return xsd.Element(name, xsd.ComplexType(xsd.All(sub_elements))) else: self._is_body_wrapped = False return sub_elements[0] class RpcMessage(SoapMessage): """In RPC messages each part is a parameter or a return value and appears inside a wrapper element within the body. The wrapper element is named identically to the operation name and its namespace is the value of the namespace attribute. Each message part (parameter) appears under the wrapper, represented by an accessor named identically to the corresponding parameter of the call. Parts are arranged in the same order as the parameters of the call. .. inheritance-diagram:: zeep.wsdl.messages.soap.DocumentMessage :parts: 1 :param wsdl: The main wsdl document :type wsdl: zeep.wsdl.Document :param name: :param operation: The operation to which this message belongs :type operation: zeep.wsdl.bindings.soap.SoapOperation :param type: 'input' or 'output' :type type: str :param nsmap: The namespace mapping :type nsmap: dict """ def _resolve_body(self, info, definitions, parts): """Return an XSD element for the SOAP:Body. Each part is a parameter or a return value and appears inside a wrapper element within the body named identically to the operation name and its namespace is the value of the namespace attribute. """ if not info: return None namespace = info['namespace'] if self.type == 'input': tag_name = etree.QName(namespace, self.operation.name) else: tag_name = etree.QName(namespace, self.abstract.name.localname) # Create the xsd element to create/parse the response. Each part # is a sub element of the root node (which uses the operation name) elements = [] for name, msg in parts.items(): if msg.element: elements.append(msg.element) else: elements.append(xsd.Element(name, msg.type)) return xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements))) def _deserialize_body(self, body_element): """The name of the wrapper element is not defined. The WS-I defines that it should be the operation name with the 'Response' string as suffix. But lets just do it really stupid for now and use the first element. """ process_multiref(body_element) response_element = list(body_element)[0] if self.body: context = XmlParserContext(self.wsdl.settings) result = self.body.parse( response_element, self.wsdl.types, context=context) return {'body': result} return {'body': None}
35.748077
98
0.607026
import copy from collections import OrderedDict from lxml import etree from lxml.builder import ElementMaker from zeep import exceptions, xsd from zeep.utils import as_qname from zeep.xsd.context import XmlParserContext from zeep.wsdl.messages.base import ConcreteMessage, SerializedMessage from zeep.wsdl.messages.multiref import process_multiref __all__ = [ 'DocumentMessage', 'RpcMessage', ] class SoapMessage(ConcreteMessage): def __init__(self, wsdl, name, operation, type, nsmap): super(SoapMessage, self).__init__(wsdl, name, operation) self.nsmap = nsmap self.abstract = None self.type = type self._is_body_wrapped = False self.body = None self.header = None self.envelope = None def serialize(self, *args, **kwargs): nsmap = { 'soap-env': self.nsmap['soap-env'] } nsmap.update(self.wsdl.types._prefix_map_custom) soap = ElementMaker(namespace=self.nsmap['soap-env'], nsmap=nsmap) envelope = soap.Envelope() headers_value = kwargs.pop('_soapheaders', None) header = self._serialize_header(headers_value, nsmap) if header is not None: envelope.append(header) # create it in that case. Otherwise we create a Element soap:body and # render the content into this. if self.body: body_value = self.body(*args, **kwargs) if self._is_body_wrapped: self.body.render(envelope, body_value) else: body = soap.Body() envelope.append(body) self.body.render(body, body_value) else: body = soap.Body() envelope.append(body) # XXX: This is only used in Soap 1.1 so should be moved to the the # Soap11Binding._set_http_headers(). But let's keep it like this for headers = { 'SOAPAction': '"%s"' % self.operation.soapaction } return SerializedMessage( path=None, headers=headers, content=envelope) def deserialize(self, envelope): if not self.envelope: return None body = envelope.find('soap-env:Body', namespaces=self.nsmap) body_result = self._deserialize_body(body) header = envelope.find('soap-env:Header', namespaces=self.nsmap) headers_result = self._deserialize_headers(header) kwargs = body_result kwargs.update(headers_result) result = self.envelope(**kwargs) if self.header.type._element: return result result = result.body if result is None or len(result) == 0: return None elif len(result) > 1: return result result = next(iter(result.__values__.values())) if isinstance(result, xsd.CompoundValue): children = result._xsd_type.elements attributes = result._xsd_type.attributes if len(children) == 1 and len(attributes) == 0: item_name, item_element = children[0] retval = getattr(result, item_name) return retval return result def signature(self, as_output=False): if not self.envelope: return None if as_output: if isinstance(self.envelope.type, xsd.ComplexType): try: if len(self.envelope.type.elements) == 1: return self.envelope.type.elements[0][1].type.signature( schema=self.wsdl.types, standalone=False) except AttributeError: return None return self.envelope.type.signature(schema=self.wsdl.types, standalone=False) if self.body: parts = [self.body.type.signature(schema=self.wsdl.types, standalone=False)] else: parts = [] if self.header.type._element: parts.append('_soapheaders={%s}' % self.header.type.signature( schema=self.wsdl.types, standalone=False)) return ', '.join(part for part in parts if part) @classmethod def parse(cls, definitions, xmlelement, operation, type, nsmap): name = xmlelement.get('name') obj = cls(definitions.wsdl, name, operation, nsmap=nsmap, type=type) body_data = None header_data = None body = xmlelement.find('soap:body', namespaces=operation.binding.nsmap) if body is not None: body_data = cls._parse_body(body) elements = xmlelement.findall( 'soap:header', namespaces=operation.binding.nsmap) header_data = cls._parse_header( elements, definitions.target_namespace, operation) obj._resolve_info = { 'body': body_data, 'header': header_data } return obj @classmethod def _parse_body(cls, xmlelement): return { 'part': xmlelement.get('part'), 'use': xmlelement.get('use', 'literal'), 'encodingStyle': xmlelement.get('encodingStyle'), 'namespace': xmlelement.get('namespace'), } @classmethod def _parse_header(cls, xmlelements, tns, operation): result = [] for xmlelement in xmlelements: data = cls._parse_header_element(xmlelement, tns) data['faults'] = [] fault_elements = xmlelement.findall( 'soap:headerfault', namespaces=operation.binding.nsmap) for fault_element in fault_elements: fault_data = cls._parse_header_element(fault_element, tns) data['faults'].append(fault_data) result.append(data) return result @classmethod def _parse_header_element(cls, xmlelement, tns): attributes = xmlelement.attrib message_qname = as_qname( attributes['message'], xmlelement.nsmap, tns) try: return { 'message': message_qname, 'part': attributes['part'], 'use': attributes['use'], 'encodingStyle': attributes.get('encodingStyle'), 'namespace': attributes.get('namespace'), } except KeyError: raise exceptions.WsdlSyntaxError("Invalid soap:header(fault)") def resolve(self, definitions, abstract_message): info = self._resolve_info del self._resolve_info if (abstract_message is None or not abstract_message.parts) and self.type != 'input': return self.abstract = abstract_message parts = OrderedDict(self.abstract.parts) self.header = self._resolve_header(info['header'], definitions, parts) self.body = self._resolve_body(info['body'], definitions, parts) self.envelope = self._create_envelope_element() def _create_envelope_element(self): all_elements = xsd.Sequence([]) if self.header.type._element: all_elements.append( xsd.Element('{%s}header' % self.nsmap['soap-env'], self.header.type)) all_elements.append( xsd.Element( '{%s}body' % self.nsmap['soap-env'], self.body.type if self.body else None)) return xsd.Element('{%s}envelope' % self.nsmap['soap-env'], xsd.ComplexType(all_elements)) def _serialize_header(self, headers_value, nsmap): if not headers_value: return headers_value = copy.deepcopy(headers_value) soap = ElementMaker(namespace=self.nsmap['soap-env'], nsmap=nsmap) header = soap.Header() if isinstance(headers_value, list): for header_value in headers_value: if hasattr(header_value, '_xsd_elm'): header_value._xsd_elm.render(header, header_value) elif hasattr(header_value, '_xsd_type'): header_value._xsd_type.render(header, header_value) elif isinstance(header_value, etree._Element): header.append(header_value) else: raise ValueError("Invalid value given to _soapheaders") elif isinstance(headers_value, dict): if not self.header: raise ValueError( "_soapheaders only accepts a dictionary if the wsdl " "defines the headers.") # Only render headers for which we have a value headers_value = self.header(**headers_value) for name, elm in self.header.type.elements: if name in headers_value and headers_value[name] is not None: elm.render(header, headers_value[name], ['header', name]) else: raise ValueError("Invalid value given to _soapheaders") return header def _deserialize_headers(self, xmlelement): if not self.header or xmlelement is None: return {} context = XmlParserContext(settings=self.wsdl.settings) result = self.header.parse(xmlelement, self.wsdl.types, context=context) if result is not None: return {'header': result} return {} def _resolve_header(self, info, definitions, parts): name = etree.QName(self.nsmap['soap-env'], 'Header') container = xsd.All(consume_other=True) if not info: return xsd.Element(name, xsd.ComplexType(container)) for item in info: message_name = item['message'].text part_name = item['part'] message = definitions.get('messages', message_name) if message == self.abstract and part_name in parts: del parts[part_name] part = message.parts[part_name] if part.element: element = part.element.clone() element.attr_name = part_name else: element = xsd.Element(part_name, part.type) container.append(element) return xsd.Element(name, xsd.ComplexType(container)) class DocumentMessage(SoapMessage): def __init__(self, *args, **kwargs): super(DocumentMessage, self).__init__(*args, **kwargs) def _deserialize_body(self, xmlelement): if not self._is_body_wrapped: # TODO: For now we assume that the body only has one child since # only one part is specified in the wsdl. This should be handled # way better xmlelement = list(xmlelement)[0] context = XmlParserContext(settings=self.wsdl.settings) result = self.body.parse(xmlelement, self.wsdl.types, context=context) return {'body': result} def _resolve_body(self, info, definitions, parts): name = etree.QName(self.nsmap['soap-env'], 'Body') if not info or not parts: return None # If the part name is omitted then all parts are available under # the soap:body tag. Otherwise only the part with the given name. if info['part']: part_name = info['part'] sub_elements = [parts[part_name].element] else: sub_elements = [] for part_name, part in parts.items(): element = part.element.clone() element.attr_name = part_name or element.name sub_elements.append(element) if len(sub_elements) > 1: self._is_body_wrapped = True return xsd.Element(name, xsd.ComplexType(xsd.All(sub_elements))) else: self._is_body_wrapped = False return sub_elements[0] class RpcMessage(SoapMessage): def _resolve_body(self, info, definitions, parts): if not info: return None namespace = info['namespace'] if self.type == 'input': tag_name = etree.QName(namespace, self.operation.name) else: tag_name = etree.QName(namespace, self.abstract.name.localname) # Create the xsd element to create/parse the response. Each part # is a sub element of the root node (which uses the operation name) elements = [] for name, msg in parts.items(): if msg.element: elements.append(msg.element) else: elements.append(xsd.Element(name, msg.type)) return xsd.Element(tag_name, xsd.ComplexType(xsd.Sequence(elements))) def _deserialize_body(self, body_element): process_multiref(body_element) response_element = list(body_element)[0] if self.body: context = XmlParserContext(self.wsdl.settings) result = self.body.parse( response_element, self.wsdl.types, context=context) return {'body': result} return {'body': None}
true
true
f713a0592cc26640121c0ba7b80b466b8ece1a2d
59,710
py
Python
test/metric_learn_test.py
RobinVogel/metric-learn
5f4def7b9f6b877d24b7662f0f1ef54c3dc4d5eb
[ "MIT" ]
1
2020-05-22T19:04:24.000Z
2020-05-22T19:04:24.000Z
test/metric_learn_test.py
q-vision/metric-learn
a30471424d35b0ef47582751fa6acea7b3a3bce5
[ "MIT" ]
null
null
null
test/metric_learn_test.py
q-vision/metric-learn
a30471424d35b0ef47582751fa6acea7b3a3bce5
[ "MIT" ]
null
null
null
import unittest import re import pytest import numpy as np import scipy from scipy.optimize import check_grad, approx_fprime from six.moves import xrange from sklearn.metrics import pairwise_distances, euclidean_distances from sklearn.datasets import (load_iris, make_classification, make_regression, make_spd_matrix) from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose) from sklearn.utils.testing import assert_warns_message from sklearn.exceptions import ConvergenceWarning, ChangedBehaviorWarning from sklearn.utils.validation import check_X_y try: from inverse_covariance import quic assert(quic) except ImportError: HAS_SKGGM = False else: HAS_SKGGM = True from metric_learn import (LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised, SDML, RCA, ITML, LSML) # Import this specially for testing. from metric_learn.constraints import wrap_pairs from metric_learn.lmnn import _sum_outer_products def class_separation(X, labels): unique_labels, label_inds = np.unique(labels, return_inverse=True) ratio = 0 for li in xrange(len(unique_labels)): Xc = X[label_inds == li] Xnc = X[label_inds != li] ratio += pairwise_distances(Xc).mean() / pairwise_distances(Xc, Xnc).mean() return ratio / len(unique_labels) class MetricTestCase(unittest.TestCase): @classmethod def setUpClass(self): # runs once per test class iris_data = load_iris() self.iris_points = iris_data['data'] self.iris_labels = iris_data['target'] np.random.seed(1234) class TestCovariance(MetricTestCase): def test_iris(self): cov = Covariance() cov.fit(self.iris_points) csep = class_separation(cov.transform(self.iris_points), self.iris_labels) # deterministic result self.assertAlmostEqual(csep, 0.72981476) def test_singular_returns_pseudo_inverse(self): """Checks that if the input covariance matrix is singular, we return the pseudo inverse""" X, y = load_iris(return_X_y=True) # We add a virtual column that is a linear combination of the other # columns so that the covariance matrix will be singular X = np.concatenate([X, X[:, :2].dot([[2], [3]])], axis=1) cov_matrix = np.cov(X, rowvar=False) covariance = Covariance() covariance.fit(X) pseudo_inverse = covariance.get_mahalanobis_matrix() # here is the definition of a pseudo inverse according to wikipedia: assert_allclose(cov_matrix.dot(pseudo_inverse).dot(cov_matrix), cov_matrix) assert_allclose(pseudo_inverse.dot(cov_matrix).dot(pseudo_inverse), pseudo_inverse) class TestLSML(MetricTestCase): def test_iris(self): lsml = LSML_Supervised(num_constraints=200) lsml.fit(self.iris_points, self.iris_labels) csep = class_separation(lsml.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.8) # it's pretty terrible def test_deprecation_num_labeled(self): # test that a deprecation message is thrown if num_labeled is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised(num_labeled=np.inf) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, lsml_supervised.fit, X, y) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised() msg = ("Warning, no prior was set (`prior=None`). As of version 0.5.0, " "the default prior will now be set to " "'identity', instead of 'covariance'. If you still want to use " "the inverse of the covariance matrix as a prior, " "set prior='covariance'. This warning will disappear in " "v0.6.0, and `prior` parameter's default value will be set to " "'identity'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: lsml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.], [-5., 3.], [5., 0.]], [[0., 50.], [0., -60], [-10., 0.], [10., 0.]]]) lsml = LSML() with pytest.warns(ChangedBehaviorWarning) as raised_warning: lsml.fit(pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): # test that a deprecation message is thrown if random_state is set at # fit time # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised() msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `LSML_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: lsml_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): # test that a ChangedBehavior warning is thrown if the random_state is # not set in fit. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised() msg = ('As of v0.5.0, `LSML_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: lsml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) class TestITML(MetricTestCase): def test_iris(self): itml = ITML_Supervised(num_constraints=200) itml.fit(self.iris_points, self.iris_labels) csep = class_separation(itml.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.2) def test_deprecation_num_labeled(self): # test that a deprecation message is thrown if num_labeled is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised(num_labeled=np.inf) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y) def test_deprecation_bounds(self): # test that a deprecation message is thrown if bounds is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised(bounds=None) msg = ('"bounds" parameter from initialization is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use the "bounds" parameter of this ' 'fit method instead.') assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y) def test_deprecation_A0(self): # test that a deprecation message is thrown if A0 is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised(A0=np.ones_like(X)) msg = ('"A0" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "prior" instead.') with pytest.warns(DeprecationWarning) as raised_warning: itml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] itml = ITML(A0=np.ones_like(X)) with pytest.warns(DeprecationWarning) as raised_warning: itml.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): # test that a deprecation message is thrown if random_state is set at # fit time # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `ITML_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: itml_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): # test that a ChangedBehavior warning is thrown if the random_state is # not set in fit. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() msg = ('As of v0.5.0, `ITML_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: itml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('bounds', [None, (20., 100.), [20., 100.], np.array([20., 100.]), np.array([[20., 100.]]), np.array([[20], [100]])]) def test_bounds_parameters_valid(bounds): """Asserts that we can provide any array-like of two elements as bounds, and that the attribute bound_ is a numpy array""" pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] itml = ITML() itml.fit(pairs, y_pairs, bounds=bounds) X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() itml_supervised.fit(X, y, bounds=bounds) @pytest.mark.parametrize('bounds', ['weird', ['weird1', 'weird2'], np.array([1, 2, 3])]) def test_bounds_parameters_invalid(bounds): """Assert that if a non array-like is put for bounds, or an array-like of length different than 2, an error is returned""" pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] itml = ITML() with pytest.raises(Exception): itml.fit(pairs, y_pairs, bounds=bounds) X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() with pytest.raises(Exception): itml_supervised.fit(X, y, bounds=bounds) class TestLMNN(MetricTestCase): def test_iris(self): lmnn = LMNN(k=5, learn_rate=1e-6, verbose=False) lmnn.fit(self.iris_points, self.iris_labels) csep = class_separation(lmnn.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.25) def test_loss_grad_lbfgs(self): """Test gradient of loss function Assert that the gradient is almost equal to its finite differences approximation. """ rng = np.random.RandomState(42) X, y = make_classification(random_state=rng) L = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1]) lmnn = LMNN() k = lmnn.k reg = lmnn.regularization X, y = lmnn._prepare_inputs(X, y, dtype=float, ensure_min_samples=2) num_pts, n_components = X.shape unique_labels, label_inds = np.unique(y, return_inverse=True) lmnn.labels_ = np.arange(len(unique_labels)) lmnn.components_ = np.eye(n_components) target_neighbors = lmnn._select_targets(X, label_inds) # sum outer products dfG = _sum_outer_products(X, target_neighbors.flatten(), np.repeat(np.arange(X.shape[0]), k)) # initialize L def loss_grad(flat_L): return lmnn._loss_grad(X, flat_L.reshape(-1, X.shape[1]), dfG, k, reg, target_neighbors, label_inds) def fun(x): return loss_grad(x)[1] def grad(x): return loss_grad(x)[0].ravel() # compute relative error epsilon = np.sqrt(np.finfo(float).eps) rel_diff = (check_grad(fun, grad, L.ravel()) / np.linalg.norm(approx_fprime(L.ravel(), fun, epsilon))) np.testing.assert_almost_equal(rel_diff, 0., decimal=5) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lmnn = LMNN(k=2) msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'auto', instead of the " "previous identity matrix. If you still want to use the identity " "matrix as before, set init='identity'. This warning " "will disappear in v0.6.0, and `init` parameter's default value " "will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: lmnn.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_use_pca(self): # test that a DeprecationWarning is thrown about use_pca, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lmnn = LMNN(k=2, use_pca=True) msg = ('"use_pca" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0.') assert_warns_message(DeprecationWarning, msg, lmnn.fit, X, y) def test_loss_func(capsys): """Test the loss function (and its gradient) on a simple example, by comparing the results with the actual implementation of metric-learn, with a very simple (but nonperformant) implementation""" # toy dataset to use X, y = make_classification(n_samples=10, n_classes=2, n_features=6, n_redundant=0, shuffle=True, scale=[1, 1, 20, 20, 20, 20], random_state=42) def hinge(a): if a > 0: return a, 1 else: return 0, 0 def loss_fn(L, X, y, target_neighbors, reg): L = L.reshape(-1, X.shape[1]) Lx = np.dot(X, L.T) loss = 0 total_active = 0 grad = np.zeros_like(L) for i in range(X.shape[0]): for j in target_neighbors[i]: loss += (1 - reg) * np.sum((Lx[i] - Lx[j]) ** 2) grad += (1 - reg) * np.outer(Lx[i] - Lx[j], X[i] - X[j]) for l in range(X.shape[0]): if y[i] != y[l]: hin, active = hinge(1 + np.sum((Lx[i] - Lx[j])**2) - np.sum((Lx[i] - Lx[l])**2)) total_active += active if active: loss += reg * hin grad += (reg * (np.outer(Lx[i] - Lx[j], X[i] - X[j]) - np.outer(Lx[i] - Lx[l], X[i] - X[l]))) grad = 2 * grad return grad, loss, total_active # we check that the gradient we have computed in the non-performant implem # is indeed the true gradient on a toy example: def _select_targets(X, y, k): target_neighbors = np.empty((X.shape[0], k), dtype=int) for label in np.unique(y): inds, = np.nonzero(y == label) dd = euclidean_distances(X[inds], squared=True) np.fill_diagonal(dd, np.inf) nn = np.argsort(dd)[..., :k] target_neighbors[inds] = inds[nn] return target_neighbors target_neighbors = _select_targets(X, y, 2) regularization = 0.5 n_features = X.shape[1] x0 = np.random.randn(1, n_features) def loss(x0): return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors, regularization)[1] def grad(x0): return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors, regularization)[0].ravel() scipy.optimize.check_grad(loss, grad, x0.ravel()) class LMNN_with_callback(LMNN): """ We will use a callback to get the gradient (see later) """ def __init__(self, callback, *args, **kwargs): self.callback = callback super(LMNN_with_callback, self).__init__(*args, **kwargs) def _loss_grad(self, *args, **kwargs): grad, objective, total_active = ( super(LMNN_with_callback, self)._loss_grad(*args, **kwargs)) self.callback.append(grad) return grad, objective, total_active class LMNN_nonperformant(LMNN_with_callback): def fit(self, X, y): self.y = y return super(LMNN_nonperformant, self).fit(X, y) def _loss_grad(self, X, L, dfG, k, reg, target_neighbors, label_inds): grad, loss, total_active = loss_fn(L.ravel(), X, self.y, target_neighbors, self.regularization) self.callback.append(grad) return grad, loss, total_active mem1, mem2 = [], [] lmnn_perf = LMNN_with_callback(verbose=True, random_state=42, init='identity', max_iter=30, callback=mem1) lmnn_nonperf = LMNN_nonperformant(verbose=True, random_state=42, init='identity', max_iter=30, callback=mem2) objectives, obj_diffs, learn_rate, total_active = (dict(), dict(), dict(), dict()) for algo, name in zip([lmnn_perf, lmnn_nonperf], ['perf', 'nonperf']): algo.fit(X, y) out, _ = capsys.readouterr() lines = re.split("\n+", out) # we get every variable that is printed from the algorithm in verbose num = r'(-?\d+.?\d*(e[+|-]\d+)?)' strings = [re.search(r"\d+ (?:{}) (?:{}) (?:(\d+)) (?:{})" .format(num, num, num), s) for s in lines] objectives[name] = [float(match.group(1)) for match in strings if match is not None] obj_diffs[name] = [float(match.group(3)) for match in strings if match is not None] total_active[name] = [float(match.group(5)) for match in strings if match is not None] learn_rate[name] = [float(match.group(6)) for match in strings if match is not None] assert len(strings) >= 10 # we ensure that we actually did more than 10 # iterations assert total_active[name][0] >= 2 # we ensure that we have some active # constraints (that's the case we want to test) # we remove the last element because it can be equal to the penultimate # if the last gradient update is null for i in range(len(mem1)): np.testing.assert_allclose(lmnn_perf.callback[i], lmnn_nonperf.callback[i], err_msg='Gradient different at position ' '{}'.format(i)) np.testing.assert_allclose(objectives['perf'], objectives['nonperf']) np.testing.assert_allclose(obj_diffs['perf'], obj_diffs['nonperf']) np.testing.assert_allclose(total_active['perf'], total_active['nonperf']) np.testing.assert_allclose(learn_rate['perf'], learn_rate['nonperf']) @pytest.mark.parametrize('X, y, loss', [(np.array([[0], [1], [2], [3]]), [1, 1, 0, 0], 3.0), (np.array([[0], [1], [2], [3]]), [1, 0, 0, 1], 26.)]) def test_toy_ex_lmnn(X, y, loss): """Test that the loss give the right result on a toy example""" L = np.array([[1]]) lmnn = LMNN(k=1, regularization=0.5) k = lmnn.k reg = lmnn.regularization X, y = lmnn._prepare_inputs(X, y, dtype=float, ensure_min_samples=2) num_pts, n_components = X.shape unique_labels, label_inds = np.unique(y, return_inverse=True) lmnn.labels_ = np.arange(len(unique_labels)) lmnn.components_ = np.eye(n_components) target_neighbors = lmnn._select_targets(X, label_inds) # sum outer products dfG = _sum_outer_products(X, target_neighbors.flatten(), np.repeat(np.arange(X.shape[0]), k)) # storage a1 = [None] * k a2 = [None] * k for nn_idx in xrange(k): a1[nn_idx] = np.array([]) a2[nn_idx] = np.array([]) # assert that the loss equals the one computed by hand assert lmnn._loss_grad(X, L.reshape(-1, X.shape[1]), dfG, k, reg, target_neighbors, label_inds)[1] == loss def test_convergence_simple_example(capsys): # LMNN should converge on this simple example, which it did not with # this issue: https://github.com/scikit-learn-contrib/metric-learn/issues/88 X, y = make_classification(random_state=0) lmnn = LMNN(verbose=True) lmnn.fit(X, y) out, _ = capsys.readouterr() assert "LMNN converged with objective" in out def test_no_twice_same_objective(capsys): # test that the objective function never has twice the same value # see https://github.com/scikit-learn-contrib/metric-learn/issues/88 X, y = make_classification(random_state=0) lmnn = LMNN(verbose=True) lmnn.fit(X, y) out, _ = capsys.readouterr() lines = re.split("\n+", out) # we get only objectives from each line: # the regexp matches a float that follows an integer (the iteration # number), and which is followed by a (signed) float (delta obj). It # matches for instance: # 3 **1113.7665747189938** -3.182774197440267 46431.0200999999999998e-06 objectives = [re.search(r"\d* (?:(\d*.\d*))[ | -]\d*.\d*", s) for s in lines] objectives = [match.group(1) for match in objectives if match is not None] # we remove the last element because it can be equal to the penultimate # if the last gradient update is null assert len(objectives[:-1]) == len(set(objectives[:-1])) class TestSDML(MetricTestCase): @pytest.mark.skipif(HAS_SKGGM, reason="The warning can be thrown only if skggm is " "not installed.") def test_sdml_supervised_raises_warning_msg_not_installed_skggm(self): """Tests that the right warning message is raised if someone tries to use SDML_Supervised but has not installed skggm, and that the algorithm fails to converge""" # TODO: remove if we don't need skggm anymore # load_iris: dataset where we know scikit-learn's graphical lasso fails # with a Floating Point error X, y = load_iris(return_X_y=True) sdml_supervised = SDML_Supervised(balance_param=0.5, use_cov=True, sparsity_param=0.01) msg = ("There was a problem in SDML when using scikit-learn's graphical " "lasso solver. skggm's graphical lasso can sometimes converge on " "non SPD cases where scikit-learn's graphical lasso fails to " "converge. Try to install skggm and rerun the algorithm (see " "the README.md for the right version of skggm). The following " "error message was thrown:") with pytest.raises(RuntimeError) as raised_error: sdml_supervised.fit(X, y) assert str(raised_error.value).startswith(msg) @pytest.mark.skipif(HAS_SKGGM, reason="The warning can be thrown only if skggm is " "not installed.") def test_sdml_raises_warning_msg_not_installed_skggm(self): """Tests that the right warning message is raised if someone tries to use SDML but has not installed skggm, and that the algorithm fails to converge""" # TODO: remove if we don't need skggm anymore # case on which we know that scikit-learn's graphical lasso fails # because it will return a non SPD matrix pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(prior='identity', balance_param=100, verbose=True) msg = ("There was a problem in SDML when using scikit-learn's graphical " "lasso solver. skggm's graphical lasso can sometimes converge on " "non SPD cases where scikit-learn's graphical lasso fails to " "converge. Try to install skggm and rerun the algorithm (see " "the README.md for the right version of skggm).") with pytest.raises(RuntimeError) as raised_error: sdml.fit(pairs, y_pairs) assert msg == str(raised_error.value) @pytest.mark.skipif(not HAS_SKGGM, reason="The warning can be thrown only if skggm is " "installed.") def test_sdml_raises_warning_msg_installed_skggm(self): """Tests that the right warning message is raised if someone tries to use SDML and has installed skggm, and that the algorithm fails to converge""" # TODO: remove if we don't need skggm anymore # case on which we know that skggm's graphical lasso fails # because it will return non finite values pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(prior='identity', balance_param=100, verbose=True) msg = ("There was a problem in SDML when using skggm's graphical " "lasso solver.") with pytest.raises(RuntimeError) as raised_error: sdml.fit(pairs, y_pairs) assert msg == str(raised_error.value) @pytest.mark.skipif(not HAS_SKGGM, reason="The warning can be thrown only if skggm is " "installed.") def test_sdml_supervised_raises_warning_msg_installed_skggm(self): """Tests that the right warning message is raised if someone tries to use SDML_Supervised but has not installed skggm, and that the algorithm fails to converge""" # TODO: remove if we don't need skggm anymore # case on which we know that skggm's graphical lasso fails # because it will return non finite values rng = np.random.RandomState(42) # This example will create a diagonal em_cov with a negative coeff ( # pathological case) X = np.array([[-10., 0.], [10., 0.], [5., 0.], [3., 0.]]) y = [0, 0, 1, 1] sdml_supervised = SDML_Supervised(balance_param=0.5, prior='identity', sparsity_param=0.01, random_state=rng) msg = ("There was a problem in SDML when using skggm's graphical " "lasso solver.") with pytest.raises(RuntimeError) as raised_error: sdml_supervised.fit(X, y) assert msg == str(raised_error.value) @pytest.mark.skipif(not HAS_SKGGM, reason="It's only in the case where skggm is installed" "that no warning should be thrown.") def test_raises_no_warning_installed_skggm(self): # otherwise we should be able to instantiate and fit SDML and it # should raise no error and no ConvergenceWarning pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y_pairs = [1, -1] X, y = make_classification(random_state=42) with pytest.warns(None) as records: sdml = SDML(prior='covariance') sdml.fit(pairs, y_pairs) for record in records: assert record.category is not ConvergenceWarning with pytest.warns(None) as records: sdml_supervised = SDML_Supervised(prior='identity', balance_param=1e-5) sdml_supervised.fit(X, y) for record in records: assert record.category is not ConvergenceWarning def test_iris(self): # Note: this is a flaky test, which fails for certain seeds. # TODO: un-flake it! rs = np.random.RandomState(5555) sdml = SDML_Supervised(num_constraints=1500, prior='identity', balance_param=5e-5) sdml.fit(self.iris_points, self.iris_labels, random_state=rs) csep = class_separation(sdml.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.22) def test_deprecation_num_labeled(self): # test that a deprecation message is thrown if num_labeled is set at # initialization # TODO: remove in v.0.6 X, y = make_classification(random_state=42) sdml_supervised = SDML_Supervised(num_labeled=np.inf, prior='identity', balance_param=5e-5) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, sdml_supervised.fit, X, y) def test_sdml_raises_warning_non_psd(self): """Tests that SDML raises a warning on a toy example where we know the pseudo-covariance matrix is not PSD""" pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y = [1, -1] sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5) msg = ("Warning, the input matrix of graphical lasso is not " "positive semi-definite (PSD). The algorithm may diverge, " "and lead to degenerate solutions. " "To prevent that, try to decrease the balance parameter " "`balance_param` and/or to set prior='identity'.") with pytest.warns(ConvergenceWarning) as raised_warning: try: sdml.fit(pairs, y) except Exception: pass # we assert that this warning is in one of the warning raised by the # estimator assert msg in list(map(lambda w: str(w.message), raised_warning)) def test_sdml_converges_if_psd(self): """Tests that sdml converges on a simple problem where we know the pseudo-covariance matrix is PSD""" pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y = [1, -1] sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5) sdml.fit(pairs, y) assert np.isfinite(sdml.get_mahalanobis_matrix()).all() @pytest.mark.skipif(not HAS_SKGGM, reason="sklearn's graphical_lasso can sometimes not " "work on some non SPD problems. We test that " "is works only if skggm is installed.") def test_sdml_works_on_non_spd_pb_with_skggm(self): """Test that SDML works on a certain non SPD problem on which we know it should work, but scikit-learn's graphical_lasso does not work""" X, y = load_iris(return_X_y=True) sdml = SDML_Supervised(balance_param=0.5, sparsity_param=0.01, prior='covariance', random_state=np.random.RandomState(42)) sdml.fit(X, y) def test_deprecation_use_cov(self): # test that a deprecation message is thrown if use_cov is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) sdml_supervised = SDML_Supervised(use_cov=np.ones_like(X), balance_param=1e-5) msg = ('"use_cov" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "prior" instead.') with pytest.warns(DeprecationWarning) as raised_warning: sdml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(use_cov=np.ones_like(X), balance_param=1e-5) with pytest.warns(DeprecationWarning) as raised_warning: sdml.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used (except for the balance_param that we need # to set for the algorithm to not diverge) # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) sdml_supervised = SDML_Supervised(balance_param=1e-5) msg = ("Warning, no prior was set (`prior=None`). As of version 0.5.0, " "the default prior will now be set to " "'identity', instead of 'covariance'. If you still want to use " "the inverse of the covariance matrix as a prior, " "set prior='covariance'. This warning will disappear in " "v0.6.0, and `prior` parameter's default value will be set to " "'identity'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: sdml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(balance_param=1e-5) with pytest.warns(ChangedBehaviorWarning) as raised_warning: sdml.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): # test that a deprecation message is thrown if random_state is set at # fit time # TODO: remove in v.0.6 X, y = load_iris(return_X_y=True) sdml_supervised = SDML_Supervised(balance_param=5e-5) msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `SDML_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: sdml_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): # test that a ChangedBehavior warning is thrown if the random_state is # not set in fit. # TODO: remove in v.0.6 X, y = load_iris(return_X_y=True) sdml_supervised = SDML_Supervised(balance_param=5e-5) msg = ('As of v0.5.0, `SDML_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: sdml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.skipif(not HAS_SKGGM, reason='The message should be printed only if skggm is ' 'installed.') def test_verbose_has_installed_skggm_sdml(capsys): # Test that if users have installed skggm, a message is printed telling them # skggm's solver is used (when they use SDML) # TODO: remove if we don't need skggm anymore pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(verbose=True, prior='covariance') sdml.fit(pairs, y_pairs) out, _ = capsys.readouterr() assert "SDML will use skggm's graphical lasso solver." in out @pytest.mark.skipif(not HAS_SKGGM, reason='The message should be printed only if skggm is ' 'installed.') def test_verbose_has_installed_skggm_sdml_supervised(capsys): # Test that if users have installed skggm, a message is printed telling them # skggm's solver is used (when they use SDML_Supervised) # TODO: remove if we don't need skggm anymore X, y = load_iris(return_X_y=True) sdml = SDML_Supervised(verbose=True, prior='identity', balance_param=1e-5) sdml.fit(X, y) out, _ = capsys.readouterr() assert "SDML will use skggm's graphical lasso solver." in out @pytest.mark.skipif(HAS_SKGGM, reason='The message should be printed only if skggm is ' 'not installed.') def test_verbose_has_not_installed_skggm_sdml(capsys): # Test that if users have installed skggm, a message is printed telling them # skggm's solver is used (when they use SDML) # TODO: remove if we don't need skggm anymore pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(verbose=True, prior='covariance') sdml.fit(pairs, y_pairs) out, _ = capsys.readouterr() assert "SDML will use scikit-learn's graphical lasso solver." in out @pytest.mark.skipif(HAS_SKGGM, reason='The message should be printed only if skggm is ' 'not installed.') def test_verbose_has_not_installed_skggm_sdml_supervised(capsys): # Test that if users have installed skggm, a message is printed telling them # skggm's solver is used (when they use SDML_Supervised) # TODO: remove if we don't need skggm anymore X, y = make_classification(random_state=42) sdml = SDML_Supervised(verbose=True, balance_param=1e-5, prior='identity') sdml.fit(X, y) out, _ = capsys.readouterr() assert "SDML will use scikit-learn's graphical lasso solver." in out class TestNCA(MetricTestCase): def test_iris(self): n = self.iris_points.shape[0] # Without dimension reduction nca = NCA(max_iter=(100000 // n)) nca.fit(self.iris_points, self.iris_labels) csep = class_separation(nca.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.15) # With dimension reduction nca = NCA(max_iter=(100000 // n), n_components=2) nca.fit(self.iris_points, self.iris_labels) csep = class_separation(nca.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.20) def test_finite_differences(self): """Test gradient of loss function Assert that the gradient is almost equal to its finite differences approximation. """ # Initialize the transformation `M`, as well as `X` and `y` and `NCA` X, y = make_classification() M = np.random.randn(np.random.randint(1, X.shape[1] + 1), X.shape[1]) mask = y[:, np.newaxis] == y[np.newaxis, :] nca = NCA() nca.n_iter_ = 0 def fun(M): return nca._loss_grad_lbfgs(M, X, mask)[0] def grad(M): return nca._loss_grad_lbfgs(M, X, mask)[1].ravel() # compute relative error epsilon = np.sqrt(np.finfo(float).eps) rel_diff = (check_grad(fun, grad, M.ravel()) / np.linalg.norm(approx_fprime(M.ravel(), fun, epsilon))) np.testing.assert_almost_equal(rel_diff, 0., decimal=6) def test_simple_example(self): """Test on a simple example. Puts four points in the input space where the opposite labels points are next to each other. After transform the same labels points should be next to each other. """ X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) nca = NCA(n_components=2,) nca.fit(X, y) Xansformed = nca.transform(X) np.testing.assert_equal(pairwise_distances(Xansformed).argsort()[:, 1], np.array([2, 3, 0, 1])) def test_singleton_class(self): X = self.iris_points y = self.iris_labels # one singleton class: test fitting works singleton_class = 1 ind_singleton, = np.where(y == singleton_class) y[ind_singleton] = 2 y[ind_singleton[0]] = singleton_class nca = NCA(max_iter=30) nca.fit(X, y) # One non-singleton class: test fitting works ind_1, = np.where(y == 1) ind_2, = np.where(y == 2) y[ind_1] = 0 y[ind_1[0]] = 1 y[ind_2] = 0 y[ind_2[0]] = 2 nca = NCA(max_iter=30) nca.fit(X, y) # Only singleton classes: test fitting does nothing (the gradient # must be null in this case, so the final matrix must stay like # the initialization) ind_0, = np.where(y == 0) ind_1, = np.where(y == 1) ind_2, = np.where(y == 2) X = X[[ind_0[0], ind_1[0], ind_2[0]]] y = y[[ind_0[0], ind_1[0], ind_2[0]]] A = make_spd_matrix(X.shape[1], X.shape[1]) nca = NCA(init=A, max_iter=30, n_components=X.shape[1]) nca.fit(X, y) assert_array_equal(nca.components_, A) def test_one_class(self): # if there is only one class the gradient is null, so the final matrix # must stay like the initialization X = self.iris_points[self.iris_labels == 0] y = self.iris_labels[self.iris_labels == 0] A = make_spd_matrix(X.shape[1], X.shape[1]) nca = NCA(init=A, max_iter=30, n_components=X.shape[1]) nca.fit(X, y) assert_array_equal(nca.components_, A) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) nca = NCA() msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'auto', instead of the " "previous scaling matrix. If you still want to use the same " "scaling matrix as before, set " "init=np.eye(X.shape[1])/(np.maximum(X.max(axis=0)-X.min(axis=0)" ", EPS))). This warning will disappear in v0.6.0, and `init` " "parameter's default value will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: nca.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_nca(num_dims): # test that a deprecation message is thrown if num_dims is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) nca = NCA(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: nca.fit(X, y) assert (str(raised_warning[0].message) == msg) class TestLFDA(MetricTestCase): def test_iris(self): lfda = LFDA(k=2, n_components=2) lfda.fit(self.iris_points, self.iris_labels) csep = class_separation(lfda.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.15) # Sanity checks for learned matrices. self.assertEqual(lfda.get_mahalanobis_matrix().shape, (4, 4)) self.assertEqual(lfda.components_.shape, (2, 4)) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_lfda(num_dims): # test that a deprecation message is thrown if num_dims is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lfda = LFDA(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: lfda.fit(X, y) assert (str(raised_warning[0].message) == msg) class TestRCA(MetricTestCase): def test_iris(self): rca = RCA_Supervised(n_components=2, num_chunks=30, chunk_size=2) rca.fit(self.iris_points, self.iris_labels) csep = class_separation(rca.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.29) def test_deprecation_pca_comps(self): # test that a deprecation message is thrown if pca_comps is set at # initialization # TODO: remove in v.0.6 X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(pca_comps=X.shape[1], num_chunks=20) msg = ('"pca_comps" parameter is not used. ' 'It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. RCA will not do PCA preprocessing anymore. If ' 'you still want to do it, you could use ' '`sklearn.decomposition.PCA` and an `sklearn.pipeline.Pipeline`.') with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca_supervised.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) rca = RCA(pca_comps=X.shape[1]) with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) def test_changedbehaviorwarning_preprocessing(self): # test that a ChangedBehaviorWarning is thrown when using RCA # TODO: remove in v.0.6 msg = ("RCA will no longer center the data before training. If you want " "to do some preprocessing, you should do it manually (you can also " "use an `sklearn.pipeline.Pipeline` for instance). This warning " "will disappear in version 0.6.0.") X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(num_chunks=20) with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca_supervised.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) rca = RCA() with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) def test_rank_deficient_returns_warning(self): """Checks that if the covariance matrix is not invertible, we raise a warning message advising to use PCA""" X, y = load_iris(return_X_y=True) # we make the fourth column a linear combination of the two first, # so that the covariance matrix will not be invertible: X[:, 3] = X[:, 0] + 3 * X[:, 1] rca = RCA() msg = ('The inner covariance matrix is not invertible, ' 'so the transformation matrix may contain Nan values. ' 'You should reduce the dimensionality of your input,' 'for instance using `sklearn.decomposition.PCA` as a ' 'preprocessing step.') with pytest.warns(None) as raised_warnings: rca.fit(X, y) assert any(str(w.message) == msg for w in raised_warnings) def test_deprecation_random_state(self): # test that a deprecation message is thrown if random_state is set at # fit time # TODO: remove in v.0.6 X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(num_chunks=20) msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `RCA_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: rca_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): # test that a ChangedBehavior warning is thrown if the random_state is # not set in fit. # TODO: remove in v.0.6 X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(num_chunks=20) msg = ('As of v0.5.0, `RCA_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: rca_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_rca(num_dims): # test that a deprecation message is thrown if num_dims is set at # initialization # TODO: remove in v.0.6 X, y = load_iris(return_X_y=True) rca = RCA(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: rca.fit(X, y) assert any(str(w.message) == msg for w in raised_warning) # we take a small number of chunks so that RCA works on iris rca_supervised = RCA_Supervised(num_dims=num_dims, num_chunks=10) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: rca_supervised.fit(X, y) assert any(str(w.message) == msg for w in raised_warning) class TestMLKR(MetricTestCase): def test_iris(self): mlkr = MLKR() mlkr.fit(self.iris_points, self.iris_labels) csep = class_separation(mlkr.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.25) def test_finite_differences(self): """Test gradient of loss function Assert that the gradient is almost equal to its finite differences approximation. """ # Initialize the transformation `M`, as well as `X`, and `y` and `MLKR` X, y = make_regression(n_features=4, random_state=1, n_samples=20) X, y = check_X_y(X, y) M = np.random.randn(2, X.shape[1]) mlkr = MLKR() mlkr.n_iter_ = 0 def fun(M): return mlkr._loss(M, X, y)[0] def grad_fn(M): return mlkr._loss(M, X, y)[1].ravel() # compute relative error rel_diff = check_grad(fun, grad_fn, M.ravel()) / np.linalg.norm(grad_fn(M)) np.testing.assert_almost_equal(rel_diff, 0.) def test_deprecation_A0(self): # test that a deprecation message is thrown if A0 is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mlkr = MLKR(A0=np.ones_like(X)) msg = ('"A0" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "init" instead.') with pytest.warns(DeprecationWarning) as raised_warning: mlkr.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([0.1, 0.2, 0.3, 0.4]) mlkr = MLKR() msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'auto', instead of 'pca'. " "If you still want to use PCA as an init, set init='pca'. " "This warning will disappear in v0.6.0, and `init` parameter's" " default value will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: mlkr.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_mlkr(num_dims): # test that a deprecation message is thrown if num_dims is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mlkr = MLKR(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: mlkr.fit(X, y) assert (str(raised_warning[0].message) == msg) class TestMMC(MetricTestCase): def test_iris(self): # Generate full set of constraints for comparison with reference # implementation mask = self.iris_labels[None] == self.iris_labels[:, None] a, b = np.nonzero(np.triu(mask, k=1)) c, d = np.nonzero(np.triu(~mask, k=1)) # Full metric n_features = self.iris_points.shape[1] mmc = MMC(convergence_threshold=0.01, init=np.eye(n_features) / 10) mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d])) expected = [[+0.000514, +0.000868, -0.001195, -0.001703], [+0.000868, +0.001468, -0.002021, -0.002879], [-0.001195, -0.002021, +0.002782, +0.003964], [-0.001703, -0.002879, +0.003964, +0.005648]] assert_array_almost_equal(expected, mmc.get_mahalanobis_matrix(), decimal=6) # Diagonal metric mmc = MMC(diagonal=True) mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d])) expected = [0, 0, 1.210220, 1.228596] assert_array_almost_equal(np.diag(expected), mmc.get_mahalanobis_matrix(), decimal=6) # Supervised Full mmc = MMC_Supervised() mmc.fit(self.iris_points, self.iris_labels) csep = class_separation(mmc.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.15) # Supervised Diagonal mmc = MMC_Supervised(diagonal=True) mmc.fit(self.iris_points, self.iris_labels) csep = class_separation(mmc.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.2) def test_deprecation_num_labeled(self): # test that a deprecation message is thrown if num_labeled is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised(num_labeled=np.inf) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, mmc_supervised.fit, X, y) def test_deprecation_A0(self): # test that a deprecation message is thrown if A0 is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised(A0=np.ones_like(X)) msg = ('"A0" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "init" instead.') with pytest.warns(DeprecationWarning) as raised_warning: mmc_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] mmc = MMC(A0=np.ones_like(X)) with pytest.warns(DeprecationWarning) as raised_warning: mmc.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised() msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'identity', instead of the " "identity divided by a scaling factor of 10. " "If you still want to use the same init as in previous " "versions, set init=np.eye(d)/10, where d is the dimension " "of your input space (d=pairs.shape[1]). " "This warning will disappear in v0.6.0, and `init` parameter's" " default value will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: mmc_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] mmc = MMC() with pytest.warns(ChangedBehaviorWarning) as raised_warning: mmc.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): # test that a deprecation message is thrown if random_state is set at # fit time # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised() msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `MMC_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: mmc_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): # test that a ChangedBehavior warning is thrown if the random_state is # not set in fit. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised() msg = ('As of v0.5.0, `MMC_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: mmc_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize(('algo_class', 'dataset'), [(NCA, make_classification()), (MLKR, make_regression())]) def test_verbose(algo_class, dataset, capsys): # assert there is proper output when verbose = True X, y = dataset model = algo_class(verbose=True) model.fit(X, y) out, _ = capsys.readouterr() # check output lines = re.split('\n+', out) header = '{:>10} {:>20} {:>10}'.format('Iteration', 'Objective Value', 'Time(s)') assert lines[0] == '[{}]'.format(algo_class.__name__) assert lines[1] == '[{}] {}'.format(algo_class.__name__, header) assert lines[2] == '[{}] {}'.format(algo_class.__name__, '-' * len(header)) for line in lines[3:-2]: # The following regex will match for instance: # '[NCA] 0 6.988936e+01 0.01' assert re.match(r"\[" + algo_class.__name__ + r"\]\ *\d+\ *\d\.\d{6}e[+|-]" r"\d+\ *\d+\.\d{2}", line) assert re.match(r"\[" + algo_class.__name__ + r"\] Training took\ *" r"\d+\.\d{2}s\.", lines[-2]) assert lines[-1] == '' @pytest.mark.parametrize(('algo_class', 'dataset'), [(NCA, make_classification()), (MLKR, make_regression(n_features=10))]) def test_no_verbose(dataset, algo_class, capsys): # assert by default there is no output (verbose=False) X, y = dataset model = algo_class() model.fit(X, y) out, _ = capsys.readouterr() # check output assert (out == '') @pytest.mark.parametrize(('algo_class', 'dataset'), [(NCA, make_classification()), (MLKR, make_regression(n_features=10))]) def test_convergence_warning(dataset, algo_class): X, y = dataset model = algo_class(max_iter=2, verbose=True) cls_name = model.__class__.__name__ assert_warns_message(ConvergenceWarning, '[{}] {} did not converge'.format(cls_name, cls_name), model.fit, X, y) if __name__ == '__main__': unittest.main()
41.901754
79
0.635505
import unittest import re import pytest import numpy as np import scipy from scipy.optimize import check_grad, approx_fprime from six.moves import xrange from sklearn.metrics import pairwise_distances, euclidean_distances from sklearn.datasets import (load_iris, make_classification, make_regression, make_spd_matrix) from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose) from sklearn.utils.testing import assert_warns_message from sklearn.exceptions import ConvergenceWarning, ChangedBehaviorWarning from sklearn.utils.validation import check_X_y try: from inverse_covariance import quic assert(quic) except ImportError: HAS_SKGGM = False else: HAS_SKGGM = True from metric_learn import (LMNN, NCA, LFDA, Covariance, MLKR, MMC, LSML_Supervised, ITML_Supervised, SDML_Supervised, RCA_Supervised, MMC_Supervised, SDML, RCA, ITML, LSML) from metric_learn.constraints import wrap_pairs from metric_learn.lmnn import _sum_outer_products def class_separation(X, labels): unique_labels, label_inds = np.unique(labels, return_inverse=True) ratio = 0 for li in xrange(len(unique_labels)): Xc = X[label_inds == li] Xnc = X[label_inds != li] ratio += pairwise_distances(Xc).mean() / pairwise_distances(Xc, Xnc).mean() return ratio / len(unique_labels) class MetricTestCase(unittest.TestCase): @classmethod def setUpClass(self): iris_data = load_iris() self.iris_points = iris_data['data'] self.iris_labels = iris_data['target'] np.random.seed(1234) class TestCovariance(MetricTestCase): def test_iris(self): cov = Covariance() cov.fit(self.iris_points) csep = class_separation(cov.transform(self.iris_points), self.iris_labels) self.assertAlmostEqual(csep, 0.72981476) def test_singular_returns_pseudo_inverse(self): X, y = load_iris(return_X_y=True) X = np.concatenate([X, X[:, :2].dot([[2], [3]])], axis=1) cov_matrix = np.cov(X, rowvar=False) covariance = Covariance() covariance.fit(X) pseudo_inverse = covariance.get_mahalanobis_matrix() assert_allclose(cov_matrix.dot(pseudo_inverse).dot(cov_matrix), cov_matrix) assert_allclose(pseudo_inverse.dot(cov_matrix).dot(pseudo_inverse), pseudo_inverse) class TestLSML(MetricTestCase): def test_iris(self): lsml = LSML_Supervised(num_constraints=200) lsml.fit(self.iris_points, self.iris_labels) csep = class_separation(lsml.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.8) def test_deprecation_num_labeled(self): # test that a deprecation message is thrown if num_labeled is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised(num_labeled=np.inf) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, lsml_supervised.fit, X, y) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised() msg = ("Warning, no prior was set (`prior=None`). As of version 0.5.0, " "the default prior will now be set to " "'identity', instead of 'covariance'. If you still want to use " "the inverse of the covariance matrix as a prior, " "set prior='covariance'. This warning will disappear in " "v0.6.0, and `prior` parameter's default value will be set to " "'identity'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: lsml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.], [-5., 3.], [5., 0.]], [[0., 50.], [0., -60], [-10., 0.], [10., 0.]]]) lsml = LSML() with pytest.warns(ChangedBehaviorWarning) as raised_warning: lsml.fit(pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised() msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `LSML_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: lsml_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lsml_supervised = LSML_Supervised() msg = ('As of v0.5.0, `LSML_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: lsml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) class TestITML(MetricTestCase): def test_iris(self): itml = ITML_Supervised(num_constraints=200) itml.fit(self.iris_points, self.iris_labels) csep = class_separation(itml.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.2) def test_deprecation_num_labeled(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised(num_labeled=np.inf) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y) def test_deprecation_bounds(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised(bounds=None) msg = ('"bounds" parameter from initialization is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use the "bounds" parameter of this ' 'fit method instead.') assert_warns_message(DeprecationWarning, msg, itml_supervised.fit, X, y) def test_deprecation_A0(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised(A0=np.ones_like(X)) msg = ('"A0" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "prior" instead.') with pytest.warns(DeprecationWarning) as raised_warning: itml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] itml = ITML(A0=np.ones_like(X)) with pytest.warns(DeprecationWarning) as raised_warning: itml.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `ITML_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: itml_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() msg = ('As of v0.5.0, `ITML_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: itml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('bounds', [None, (20., 100.), [20., 100.], np.array([20., 100.]), np.array([[20., 100.]]), np.array([[20], [100]])]) def test_bounds_parameters_valid(bounds): pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] itml = ITML() itml.fit(pairs, y_pairs, bounds=bounds) X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() itml_supervised.fit(X, y, bounds=bounds) @pytest.mark.parametrize('bounds', ['weird', ['weird1', 'weird2'], np.array([1, 2, 3])]) def test_bounds_parameters_invalid(bounds): pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] itml = ITML() with pytest.raises(Exception): itml.fit(pairs, y_pairs, bounds=bounds) X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) itml_supervised = ITML_Supervised() with pytest.raises(Exception): itml_supervised.fit(X, y, bounds=bounds) class TestLMNN(MetricTestCase): def test_iris(self): lmnn = LMNN(k=5, learn_rate=1e-6, verbose=False) lmnn.fit(self.iris_points, self.iris_labels) csep = class_separation(lmnn.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.25) def test_loss_grad_lbfgs(self): rng = np.random.RandomState(42) X, y = make_classification(random_state=rng) L = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1]) lmnn = LMNN() k = lmnn.k reg = lmnn.regularization X, y = lmnn._prepare_inputs(X, y, dtype=float, ensure_min_samples=2) num_pts, n_components = X.shape unique_labels, label_inds = np.unique(y, return_inverse=True) lmnn.labels_ = np.arange(len(unique_labels)) lmnn.components_ = np.eye(n_components) target_neighbors = lmnn._select_targets(X, label_inds) dfG = _sum_outer_products(X, target_neighbors.flatten(), np.repeat(np.arange(X.shape[0]), k)) def loss_grad(flat_L): return lmnn._loss_grad(X, flat_L.reshape(-1, X.shape[1]), dfG, k, reg, target_neighbors, label_inds) def fun(x): return loss_grad(x)[1] def grad(x): return loss_grad(x)[0].ravel() epsilon = np.sqrt(np.finfo(float).eps) rel_diff = (check_grad(fun, grad, L.ravel()) / np.linalg.norm(approx_fprime(L.ravel(), fun, epsilon))) np.testing.assert_almost_equal(rel_diff, 0., decimal=5) def test_changed_behaviour_warning(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lmnn = LMNN(k=2) msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'auto', instead of the " "previous identity matrix. If you still want to use the identity " "matrix as before, set init='identity'. This warning " "will disappear in v0.6.0, and `init` parameter's default value " "will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: lmnn.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_use_pca(self): # test that a DeprecationWarning is thrown about use_pca, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lmnn = LMNN(k=2, use_pca=True) msg = ('"use_pca" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0.') assert_warns_message(DeprecationWarning, msg, lmnn.fit, X, y) def test_loss_func(capsys): # toy dataset to use X, y = make_classification(n_samples=10, n_classes=2, n_features=6, n_redundant=0, shuffle=True, scale=[1, 1, 20, 20, 20, 20], random_state=42) def hinge(a): if a > 0: return a, 1 else: return 0, 0 def loss_fn(L, X, y, target_neighbors, reg): L = L.reshape(-1, X.shape[1]) Lx = np.dot(X, L.T) loss = 0 total_active = 0 grad = np.zeros_like(L) for i in range(X.shape[0]): for j in target_neighbors[i]: loss += (1 - reg) * np.sum((Lx[i] - Lx[j]) ** 2) grad += (1 - reg) * np.outer(Lx[i] - Lx[j], X[i] - X[j]) for l in range(X.shape[0]): if y[i] != y[l]: hin, active = hinge(1 + np.sum((Lx[i] - Lx[j])**2) - np.sum((Lx[i] - Lx[l])**2)) total_active += active if active: loss += reg * hin grad += (reg * (np.outer(Lx[i] - Lx[j], X[i] - X[j]) - np.outer(Lx[i] - Lx[l], X[i] - X[l]))) grad = 2 * grad return grad, loss, total_active # we check that the gradient we have computed in the non-performant implem # is indeed the true gradient on a toy example: def _select_targets(X, y, k): target_neighbors = np.empty((X.shape[0], k), dtype=int) for label in np.unique(y): inds, = np.nonzero(y == label) dd = euclidean_distances(X[inds], squared=True) np.fill_diagonal(dd, np.inf) nn = np.argsort(dd)[..., :k] target_neighbors[inds] = inds[nn] return target_neighbors target_neighbors = _select_targets(X, y, 2) regularization = 0.5 n_features = X.shape[1] x0 = np.random.randn(1, n_features) def loss(x0): return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors, regularization)[1] def grad(x0): return loss_fn(x0.reshape(-1, X.shape[1]), X, y, target_neighbors, regularization)[0].ravel() scipy.optimize.check_grad(loss, grad, x0.ravel()) class LMNN_with_callback(LMNN): def __init__(self, callback, *args, **kwargs): self.callback = callback super(LMNN_with_callback, self).__init__(*args, **kwargs) def _loss_grad(self, *args, **kwargs): grad, objective, total_active = ( super(LMNN_with_callback, self)._loss_grad(*args, **kwargs)) self.callback.append(grad) return grad, objective, total_active class LMNN_nonperformant(LMNN_with_callback): def fit(self, X, y): self.y = y return super(LMNN_nonperformant, self).fit(X, y) def _loss_grad(self, X, L, dfG, k, reg, target_neighbors, label_inds): grad, loss, total_active = loss_fn(L.ravel(), X, self.y, target_neighbors, self.regularization) self.callback.append(grad) return grad, loss, total_active mem1, mem2 = [], [] lmnn_perf = LMNN_with_callback(verbose=True, random_state=42, init='identity', max_iter=30, callback=mem1) lmnn_nonperf = LMNN_nonperformant(verbose=True, random_state=42, init='identity', max_iter=30, callback=mem2) objectives, obj_diffs, learn_rate, total_active = (dict(), dict(), dict(), dict()) for algo, name in zip([lmnn_perf, lmnn_nonperf], ['perf', 'nonperf']): algo.fit(X, y) out, _ = capsys.readouterr() lines = re.split("\n+", out) # we get every variable that is printed from the algorithm in verbose num = r'(-?\d+.?\d*(e[+|-]\d+)?)' strings = [re.search(r"\d+ (?:{}) (?:{}) (?:(\d+)) (?:{})" .format(num, num, num), s) for s in lines] objectives[name] = [float(match.group(1)) for match in strings if match is not None] obj_diffs[name] = [float(match.group(3)) for match in strings if match is not None] total_active[name] = [float(match.group(5)) for match in strings if match is not None] learn_rate[name] = [float(match.group(6)) for match in strings if match is not None] assert len(strings) >= 10 # we ensure that we actually did more than 10 # iterations assert total_active[name][0] >= 2 # we ensure that we have some active # constraints (that's the case we want to test) for i in range(len(mem1)): np.testing.assert_allclose(lmnn_perf.callback[i], lmnn_nonperf.callback[i], err_msg='Gradient different at position ' '{}'.format(i)) np.testing.assert_allclose(objectives['perf'], objectives['nonperf']) np.testing.assert_allclose(obj_diffs['perf'], obj_diffs['nonperf']) np.testing.assert_allclose(total_active['perf'], total_active['nonperf']) np.testing.assert_allclose(learn_rate['perf'], learn_rate['nonperf']) @pytest.mark.parametrize('X, y, loss', [(np.array([[0], [1], [2], [3]]), [1, 1, 0, 0], 3.0), (np.array([[0], [1], [2], [3]]), [1, 0, 0, 1], 26.)]) def test_toy_ex_lmnn(X, y, loss): L = np.array([[1]]) lmnn = LMNN(k=1, regularization=0.5) k = lmnn.k reg = lmnn.regularization X, y = lmnn._prepare_inputs(X, y, dtype=float, ensure_min_samples=2) num_pts, n_components = X.shape unique_labels, label_inds = np.unique(y, return_inverse=True) lmnn.labels_ = np.arange(len(unique_labels)) lmnn.components_ = np.eye(n_components) target_neighbors = lmnn._select_targets(X, label_inds) dfG = _sum_outer_products(X, target_neighbors.flatten(), np.repeat(np.arange(X.shape[0]), k)) a1 = [None] * k a2 = [None] * k for nn_idx in xrange(k): a1[nn_idx] = np.array([]) a2[nn_idx] = np.array([]) assert lmnn._loss_grad(X, L.reshape(-1, X.shape[1]), dfG, k, reg, target_neighbors, label_inds)[1] == loss def test_convergence_simple_example(capsys): X, y = make_classification(random_state=0) lmnn = LMNN(verbose=True) lmnn.fit(X, y) out, _ = capsys.readouterr() assert "LMNN converged with objective" in out def test_no_twice_same_objective(capsys): X, y = make_classification(random_state=0) lmnn = LMNN(verbose=True) lmnn.fit(X, y) out, _ = capsys.readouterr() lines = re.split("\n+", out) objectives = [re.search(r"\d* (?:(\d*.\d*))[ | -]\d*.\d*", s) for s in lines] objectives = [match.group(1) for match in objectives if match is not None] assert len(objectives[:-1]) == len(set(objectives[:-1])) class TestSDML(MetricTestCase): @pytest.mark.skipif(HAS_SKGGM, reason="The warning can be thrown only if skggm is " "not installed.") def test_sdml_supervised_raises_warning_msg_not_installed_skggm(self): # load_iris: dataset where we know scikit-learn's graphical lasso fails X, y = load_iris(return_X_y=True) sdml_supervised = SDML_Supervised(balance_param=0.5, use_cov=True, sparsity_param=0.01) msg = ("There was a problem in SDML when using scikit-learn's graphical " "lasso solver. skggm's graphical lasso can sometimes converge on " "non SPD cases where scikit-learn's graphical lasso fails to " "converge. Try to install skggm and rerun the algorithm (see " "the README.md for the right version of skggm). The following " "error message was thrown:") with pytest.raises(RuntimeError) as raised_error: sdml_supervised.fit(X, y) assert str(raised_error.value).startswith(msg) @pytest.mark.skipif(HAS_SKGGM, reason="The warning can be thrown only if skggm is " "not installed.") def test_sdml_raises_warning_msg_not_installed_skggm(self): # TODO: remove if we don't need skggm anymore # because it will return a non SPD matrix pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(prior='identity', balance_param=100, verbose=True) msg = ("There was a problem in SDML when using scikit-learn's graphical " "lasso solver. skggm's graphical lasso can sometimes converge on " "non SPD cases where scikit-learn's graphical lasso fails to " "converge. Try to install skggm and rerun the algorithm (see " "the README.md for the right version of skggm).") with pytest.raises(RuntimeError) as raised_error: sdml.fit(pairs, y_pairs) assert msg == str(raised_error.value) @pytest.mark.skipif(not HAS_SKGGM, reason="The warning can be thrown only if skggm is " "installed.") def test_sdml_raises_warning_msg_installed_skggm(self): # case on which we know that skggm's graphical lasso fails pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(prior='identity', balance_param=100, verbose=True) msg = ("There was a problem in SDML when using skggm's graphical " "lasso solver.") with pytest.raises(RuntimeError) as raised_error: sdml.fit(pairs, y_pairs) assert msg == str(raised_error.value) @pytest.mark.skipif(not HAS_SKGGM, reason="The warning can be thrown only if skggm is " "installed.") def test_sdml_supervised_raises_warning_msg_installed_skggm(self): # TODO: remove if we don't need skggm anymore # because it will return non finite values rng = np.random.RandomState(42) # This example will create a diagonal em_cov with a negative coeff ( # pathological case) X = np.array([[-10., 0.], [10., 0.], [5., 0.], [3., 0.]]) y = [0, 0, 1, 1] sdml_supervised = SDML_Supervised(balance_param=0.5, prior='identity', sparsity_param=0.01, random_state=rng) msg = ("There was a problem in SDML when using skggm's graphical " "lasso solver.") with pytest.raises(RuntimeError) as raised_error: sdml_supervised.fit(X, y) assert msg == str(raised_error.value) @pytest.mark.skipif(not HAS_SKGGM, reason="It's only in the case where skggm is installed" "that no warning should be thrown.") def test_raises_no_warning_installed_skggm(self): # otherwise we should be able to instantiate and fit SDML and it # should raise no error and no ConvergenceWarning pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y_pairs = [1, -1] X, y = make_classification(random_state=42) with pytest.warns(None) as records: sdml = SDML(prior='covariance') sdml.fit(pairs, y_pairs) for record in records: assert record.category is not ConvergenceWarning with pytest.warns(None) as records: sdml_supervised = SDML_Supervised(prior='identity', balance_param=1e-5) sdml_supervised.fit(X, y) for record in records: assert record.category is not ConvergenceWarning def test_iris(self): # Note: this is a flaky test, which fails for certain seeds. # TODO: un-flake it! rs = np.random.RandomState(5555) sdml = SDML_Supervised(num_constraints=1500, prior='identity', balance_param=5e-5) sdml.fit(self.iris_points, self.iris_labels, random_state=rs) csep = class_separation(sdml.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.22) def test_deprecation_num_labeled(self): # test that a deprecation message is thrown if num_labeled is set at # initialization # TODO: remove in v.0.6 X, y = make_classification(random_state=42) sdml_supervised = SDML_Supervised(num_labeled=np.inf, prior='identity', balance_param=5e-5) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, sdml_supervised.fit, X, y) def test_sdml_raises_warning_non_psd(self): pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y = [1, -1] sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5) msg = ("Warning, the input matrix of graphical lasso is not " "positive semi-definite (PSD). The algorithm may diverge, " "and lead to degenerate solutions. " "To prevent that, try to decrease the balance parameter " "`balance_param` and/or to set prior='identity'.") with pytest.warns(ConvergenceWarning) as raised_warning: try: sdml.fit(pairs, y) except Exception: pass # we assert that this warning is in one of the warning raised by the # estimator assert msg in list(map(lambda w: str(w.message), raised_warning)) def test_sdml_converges_if_psd(self): pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y = [1, -1] sdml = SDML(prior='covariance', sparsity_param=0.01, balance_param=0.5) sdml.fit(pairs, y) assert np.isfinite(sdml.get_mahalanobis_matrix()).all() @pytest.mark.skipif(not HAS_SKGGM, reason="sklearn's graphical_lasso can sometimes not " "work on some non SPD problems. We test that " "is works only if skggm is installed.") def test_sdml_works_on_non_spd_pb_with_skggm(self): X, y = load_iris(return_X_y=True) sdml = SDML_Supervised(balance_param=0.5, sparsity_param=0.01, prior='covariance', random_state=np.random.RandomState(42)) sdml.fit(X, y) def test_deprecation_use_cov(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) sdml_supervised = SDML_Supervised(use_cov=np.ones_like(X), balance_param=1e-5) msg = ('"use_cov" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "prior" instead.') with pytest.warns(DeprecationWarning) as raised_warning: sdml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(use_cov=np.ones_like(X), balance_param=1e-5) with pytest.warns(DeprecationWarning) as raised_warning: sdml.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) sdml_supervised = SDML_Supervised(balance_param=1e-5) msg = ("Warning, no prior was set (`prior=None`). As of version 0.5.0, " "the default prior will now be set to " "'identity', instead of 'covariance'. If you still want to use " "the inverse of the covariance matrix as a prior, " "set prior='covariance'. This warning will disappear in " "v0.6.0, and `prior` parameter's default value will be set to " "'identity'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: sdml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(balance_param=1e-5) with pytest.warns(ChangedBehaviorWarning) as raised_warning: sdml.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): # test that a deprecation message is thrown if random_state is set at # fit time # TODO: remove in v.0.6 X, y = load_iris(return_X_y=True) sdml_supervised = SDML_Supervised(balance_param=5e-5) msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `SDML_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: sdml_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): # test that a ChangedBehavior warning is thrown if the random_state is # not set in fit. # TODO: remove in v.0.6 X, y = load_iris(return_X_y=True) sdml_supervised = SDML_Supervised(balance_param=5e-5) msg = ('As of v0.5.0, `SDML_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: sdml_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.skipif(not HAS_SKGGM, reason='The message should be printed only if skggm is ' 'installed.') def test_verbose_has_installed_skggm_sdml(capsys): # Test that if users have installed skggm, a message is printed telling them # skggm's solver is used (when they use SDML) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(verbose=True, prior='covariance') sdml.fit(pairs, y_pairs) out, _ = capsys.readouterr() assert "SDML will use skggm's graphical lasso solver." in out @pytest.mark.skipif(not HAS_SKGGM, reason='The message should be printed only if skggm is ' 'installed.') def test_verbose_has_installed_skggm_sdml_supervised(capsys): # TODO: remove if we don't need skggm anymore X, y = load_iris(return_X_y=True) sdml = SDML_Supervised(verbose=True, prior='identity', balance_param=1e-5) sdml.fit(X, y) out, _ = capsys.readouterr() assert "SDML will use skggm's graphical lasso solver." in out @pytest.mark.skipif(HAS_SKGGM, reason='The message should be printed only if skggm is ' 'not installed.') def test_verbose_has_not_installed_skggm_sdml(capsys): # Test that if users have installed skggm, a message is printed telling them # skggm's solver is used (when they use SDML) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., -55.], [0., -60]]]) y_pairs = [1, -1] sdml = SDML(verbose=True, prior='covariance') sdml.fit(pairs, y_pairs) out, _ = capsys.readouterr() assert "SDML will use scikit-learn's graphical lasso solver." in out @pytest.mark.skipif(HAS_SKGGM, reason='The message should be printed only if skggm is ' 'not installed.') def test_verbose_has_not_installed_skggm_sdml_supervised(capsys): # TODO: remove if we don't need skggm anymore X, y = make_classification(random_state=42) sdml = SDML_Supervised(verbose=True, balance_param=1e-5, prior='identity') sdml.fit(X, y) out, _ = capsys.readouterr() assert "SDML will use scikit-learn's graphical lasso solver." in out class TestNCA(MetricTestCase): def test_iris(self): n = self.iris_points.shape[0] # Without dimension reduction nca = NCA(max_iter=(100000 // n)) nca.fit(self.iris_points, self.iris_labels) csep = class_separation(nca.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.15) # With dimension reduction nca = NCA(max_iter=(100000 // n), n_components=2) nca.fit(self.iris_points, self.iris_labels) csep = class_separation(nca.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.20) def test_finite_differences(self): # Initialize the transformation `M`, as well as `X` and `y` and `NCA` X, y = make_classification() M = np.random.randn(np.random.randint(1, X.shape[1] + 1), X.shape[1]) mask = y[:, np.newaxis] == y[np.newaxis, :] nca = NCA() nca.n_iter_ = 0 def fun(M): return nca._loss_grad_lbfgs(M, X, mask)[0] def grad(M): return nca._loss_grad_lbfgs(M, X, mask)[1].ravel() # compute relative error epsilon = np.sqrt(np.finfo(float).eps) rel_diff = (check_grad(fun, grad, M.ravel()) / np.linalg.norm(approx_fprime(M.ravel(), fun, epsilon))) np.testing.assert_almost_equal(rel_diff, 0., decimal=6) def test_simple_example(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) nca = NCA(n_components=2,) nca.fit(X, y) Xansformed = nca.transform(X) np.testing.assert_equal(pairwise_distances(Xansformed).argsort()[:, 1], np.array([2, 3, 0, 1])) def test_singleton_class(self): X = self.iris_points y = self.iris_labels # one singleton class: test fitting works singleton_class = 1 ind_singleton, = np.where(y == singleton_class) y[ind_singleton] = 2 y[ind_singleton[0]] = singleton_class nca = NCA(max_iter=30) nca.fit(X, y) # One non-singleton class: test fitting works ind_1, = np.where(y == 1) ind_2, = np.where(y == 2) y[ind_1] = 0 y[ind_1[0]] = 1 y[ind_2] = 0 y[ind_2[0]] = 2 nca = NCA(max_iter=30) nca.fit(X, y) # Only singleton classes: test fitting does nothing (the gradient # must be null in this case, so the final matrix must stay like # the initialization) ind_0, = np.where(y == 0) ind_1, = np.where(y == 1) ind_2, = np.where(y == 2) X = X[[ind_0[0], ind_1[0], ind_2[0]]] y = y[[ind_0[0], ind_1[0], ind_2[0]]] A = make_spd_matrix(X.shape[1], X.shape[1]) nca = NCA(init=A, max_iter=30, n_components=X.shape[1]) nca.fit(X, y) assert_array_equal(nca.components_, A) def test_one_class(self): # if there is only one class the gradient is null, so the final matrix # must stay like the initialization X = self.iris_points[self.iris_labels == 0] y = self.iris_labels[self.iris_labels == 0] A = make_spd_matrix(X.shape[1], X.shape[1]) nca = NCA(init=A, max_iter=30, n_components=X.shape[1]) nca.fit(X, y) assert_array_equal(nca.components_, A) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) nca = NCA() msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'auto', instead of the " "previous scaling matrix. If you still want to use the same " "scaling matrix as before, set " "init=np.eye(X.shape[1])/(np.maximum(X.max(axis=0)-X.min(axis=0)" ", EPS))). This warning will disappear in v0.6.0, and `init` " "parameter's default value will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: nca.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_nca(num_dims): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) nca = NCA(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: nca.fit(X, y) assert (str(raised_warning[0].message) == msg) class TestLFDA(MetricTestCase): def test_iris(self): lfda = LFDA(k=2, n_components=2) lfda.fit(self.iris_points, self.iris_labels) csep = class_separation(lfda.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.15) self.assertEqual(lfda.get_mahalanobis_matrix().shape, (4, 4)) self.assertEqual(lfda.components_.shape, (2, 4)) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_lfda(num_dims): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) lfda = LFDA(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: lfda.fit(X, y) assert (str(raised_warning[0].message) == msg) class TestRCA(MetricTestCase): def test_iris(self): rca = RCA_Supervised(n_components=2, num_chunks=30, chunk_size=2) rca.fit(self.iris_points, self.iris_labels) csep = class_separation(rca.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.29) def test_deprecation_pca_comps(self): X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(pca_comps=X.shape[1], num_chunks=20) msg = ('"pca_comps" parameter is not used. ' 'It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. RCA will not do PCA preprocessing anymore. If ' 'you still want to do it, you could use ' '`sklearn.decomposition.PCA` and an `sklearn.pipeline.Pipeline`.') with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca_supervised.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) rca = RCA(pca_comps=X.shape[1]) with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) def test_changedbehaviorwarning_preprocessing(self): msg = ("RCA will no longer center the data before training. If you want " "to do some preprocessing, you should do it manually (you can also " "use an `sklearn.pipeline.Pipeline` for instance). This warning " "will disappear in version 0.6.0.") X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(num_chunks=20) with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca_supervised.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) rca = RCA() with pytest.warns(ChangedBehaviorWarning) as expected_msg: rca.fit(X, y) assert any(str(w.message) == msg for w in expected_msg) def test_rank_deficient_returns_warning(self): X, y = load_iris(return_X_y=True) X[:, 3] = X[:, 0] + 3 * X[:, 1] rca = RCA() msg = ('The inner covariance matrix is not invertible, ' 'so the transformation matrix may contain Nan values. ' 'You should reduce the dimensionality of your input,' 'for instance using `sklearn.decomposition.PCA` as a ' 'preprocessing step.') with pytest.warns(None) as raised_warnings: rca.fit(X, y) assert any(str(w.message) == msg for w in raised_warnings) def test_deprecation_random_state(self): X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(num_chunks=20) msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `RCA_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: rca_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): X, y = make_classification(random_state=42, n_samples=100) rca_supervised = RCA_Supervised(num_chunks=20) msg = ('As of v0.5.0, `RCA_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: rca_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_rca(num_dims): X, y = load_iris(return_X_y=True) rca = RCA(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: rca.fit(X, y) assert any(str(w.message) == msg for w in raised_warning) rca_supervised = RCA_Supervised(num_dims=num_dims, num_chunks=10) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: rca_supervised.fit(X, y) assert any(str(w.message) == msg for w in raised_warning) class TestMLKR(MetricTestCase): def test_iris(self): mlkr = MLKR() mlkr.fit(self.iris_points, self.iris_labels) csep = class_separation(mlkr.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.25) def test_finite_differences(self): X, y = make_regression(n_features=4, random_state=1, n_samples=20) X, y = check_X_y(X, y) M = np.random.randn(2, X.shape[1]) mlkr = MLKR() mlkr.n_iter_ = 0 def fun(M): return mlkr._loss(M, X, y)[0] def grad_fn(M): return mlkr._loss(M, X, y)[1].ravel() rel_diff = check_grad(fun, grad_fn, M.ravel()) / np.linalg.norm(grad_fn(M)) np.testing.assert_almost_equal(rel_diff, 0.) def test_deprecation_A0(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mlkr = MLKR(A0=np.ones_like(X)) msg = ('"A0" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "init" instead.') with pytest.warns(DeprecationWarning) as raised_warning: mlkr.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([0.1, 0.2, 0.3, 0.4]) mlkr = MLKR() msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'auto', instead of 'pca'. " "If you still want to use PCA as an init, set init='pca'. " "This warning will disappear in v0.6.0, and `init` parameter's" " default value will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: mlkr.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize('num_dims', [None, 2]) def test_deprecation_num_dims_mlkr(num_dims): # test that a deprecation message is thrown if num_dims is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mlkr = MLKR(num_dims=num_dims) msg = ('"num_dims" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0. Use "n_components" instead') with pytest.warns(DeprecationWarning) as raised_warning: mlkr.fit(X, y) assert (str(raised_warning[0].message) == msg) class TestMMC(MetricTestCase): def test_iris(self): # Generate full set of constraints for comparison with reference # implementation mask = self.iris_labels[None] == self.iris_labels[:, None] a, b = np.nonzero(np.triu(mask, k=1)) c, d = np.nonzero(np.triu(~mask, k=1)) # Full metric n_features = self.iris_points.shape[1] mmc = MMC(convergence_threshold=0.01, init=np.eye(n_features) / 10) mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d])) expected = [[+0.000514, +0.000868, -0.001195, -0.001703], [+0.000868, +0.001468, -0.002021, -0.002879], [-0.001195, -0.002021, +0.002782, +0.003964], [-0.001703, -0.002879, +0.003964, +0.005648]] assert_array_almost_equal(expected, mmc.get_mahalanobis_matrix(), decimal=6) # Diagonal metric mmc = MMC(diagonal=True) mmc.fit(*wrap_pairs(self.iris_points, [a, b, c, d])) expected = [0, 0, 1.210220, 1.228596] assert_array_almost_equal(np.diag(expected), mmc.get_mahalanobis_matrix(), decimal=6) # Supervised Full mmc = MMC_Supervised() mmc.fit(self.iris_points, self.iris_labels) csep = class_separation(mmc.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.15) # Supervised Diagonal mmc = MMC_Supervised(diagonal=True) mmc.fit(self.iris_points, self.iris_labels) csep = class_separation(mmc.transform(self.iris_points), self.iris_labels) self.assertLess(csep, 0.2) def test_deprecation_num_labeled(self): # test that a deprecation message is thrown if num_labeled is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised(num_labeled=np.inf) msg = ('"num_labeled" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' ' removed in 0.6.0') assert_warns_message(DeprecationWarning, msg, mmc_supervised.fit, X, y) def test_deprecation_A0(self): # test that a deprecation message is thrown if A0 is set at # initialization # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised(A0=np.ones_like(X)) msg = ('"A0" parameter is not used.' ' It has been deprecated in version 0.5.0 and will be' 'removed in 0.6.0. Use "init" instead.') with pytest.warns(DeprecationWarning) as raised_warning: mmc_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] mmc = MMC(A0=np.ones_like(X)) with pytest.warns(DeprecationWarning) as raised_warning: mmc.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning(self): # test that a ChangedBehavior warning is thrown about the init, if the # default parameters are used. # TODO: remove in v.0.6 X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised() msg = ("Warning, no init was set (`init=None`). As of version 0.5.0, " "the default init will now be set to 'identity', instead of the " "identity divided by a scaling factor of 10. " "If you still want to use the same init as in previous " "versions, set init=np.eye(d)/10, where d is the dimension " "of your input space (d=pairs.shape[1]). " "This warning will disappear in v0.6.0, and `init` parameter's" " default value will be set to 'auto'.") with pytest.warns(ChangedBehaviorWarning) as raised_warning: mmc_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) pairs = np.array([[[-10., 0.], [10., 0.]], [[0., 50.], [0., -60]]]) y_pairs = [1, -1] mmc = MMC() with pytest.warns(ChangedBehaviorWarning) as raised_warning: mmc.fit(pairs, y_pairs) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_deprecation_random_state(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised() msg = ('"random_state" parameter in the `fit` function is ' 'deprecated. Set `random_state` at initialization ' 'instead (when instantiating a new `MMC_Supervised` ' 'object).') with pytest.warns(DeprecationWarning) as raised_warning: mmc_supervised.fit(X, y, random_state=np.random) assert any(msg == str(wrn.message) for wrn in raised_warning) def test_changed_behaviour_warning_random_state(self): X = np.array([[0, 0], [0, 1], [2, 0], [2, 1]]) y = np.array([1, 0, 1, 0]) mmc_supervised = MMC_Supervised() msg = ('As of v0.5.0, `MMC_Supervised` now uses the ' '`random_state` given at initialization to sample ' 'constraints, not the default `np.random` from the `fit` ' 'method, since this argument is now deprecated. ' 'This warning will disappear in v0.6.0.') with pytest.warns(ChangedBehaviorWarning) as raised_warning: mmc_supervised.fit(X, y) assert any(msg == str(wrn.message) for wrn in raised_warning) @pytest.mark.parametrize(('algo_class', 'dataset'), [(NCA, make_classification()), (MLKR, make_regression())]) def test_verbose(algo_class, dataset, capsys): X, y = dataset model = algo_class(verbose=True) model.fit(X, y) out, _ = capsys.readouterr() lines = re.split('\n+', out) header = '{:>10} {:>20} {:>10}'.format('Iteration', 'Objective Value', 'Time(s)') assert lines[0] == '[{}]'.format(algo_class.__name__) assert lines[1] == '[{}] {}'.format(algo_class.__name__, header) assert lines[2] == '[{}] {}'.format(algo_class.__name__, '-' * len(header)) for line in lines[3:-2]: assert re.match(r"\[" + algo_class.__name__ + r"\]\ *\d+\ *\d\.\d{6}e[+|-]" r"\d+\ *\d+\.\d{2}", line) assert re.match(r"\[" + algo_class.__name__ + r"\] Training took\ *" r"\d+\.\d{2}s\.", lines[-2]) assert lines[-1] == '' @pytest.mark.parametrize(('algo_class', 'dataset'), [(NCA, make_classification()), (MLKR, make_regression(n_features=10))]) def test_no_verbose(dataset, algo_class, capsys): X, y = dataset model = algo_class() model.fit(X, y) out, _ = capsys.readouterr() assert (out == '') @pytest.mark.parametrize(('algo_class', 'dataset'), [(NCA, make_classification()), (MLKR, make_regression(n_features=10))]) def test_convergence_warning(dataset, algo_class): X, y = dataset model = algo_class(max_iter=2, verbose=True) cls_name = model.__class__.__name__ assert_warns_message(ConvergenceWarning, '[{}] {} did not converge'.format(cls_name, cls_name), model.fit, X, y) if __name__ == '__main__': unittest.main()
true
true