hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3230602dc6ce8125868679fb1c35533fbc07f4 | 264 | py | Python | Exercise-1/Q15_diffNum.py | abhay-lal/18CSC207J-APP | 79a955a99837e6d41c89cb1a9e84eb0230c0fa7b | [
"MIT"
] | null | null | null | Exercise-1/Q15_diffNum.py | abhay-lal/18CSC207J-APP | 79a955a99837e6d41c89cb1a9e84eb0230c0fa7b | [
"MIT"
] | null | null | null | Exercise-1/Q15_diffNum.py | abhay-lal/18CSC207J-APP | 79a955a99837e6d41c89cb1a9e84eb0230c0fa7b | [
"MIT"
] | null | null | null | def check(arr):
for num in arr:
if num in arr[arr.index(num)+1:]:
return False
return True
numbers = list(map(int, input("Enter the numbers: ").split()))
if (check(numbers)):
print("Different")
else:
print("Not Different") | 22 | 62 | 0.590909 | def check(arr):
for num in arr:
if num in arr[arr.index(num)+1:]:
return False
return True
numbers = list(map(int, input("Enter the numbers: ").split()))
if (check(numbers)):
print("Different")
else:
print("Not Different") | true | true |
1c3233bbc704222b68e2f5e756163b08e3d8050e | 1,698 | py | Python | Oblig3/eval_on_test.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
] | null | null | null | Oblig3/eval_on_test.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
] | null | null | null | Oblig3/eval_on_test.py | fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing | 4d3b2ed56b56e016413ae1544e19ad2a2c0ef047 | [
"MIT"
] | null | null | null | #! python3
# coding: utf-8
from argparse import ArgumentParser
from conllu import parse
from ner_eval import Evaluator
def f1(precision, recall, eps=1e-7):
score = 2 * (precision * recall) / (precision + recall + eps)
return score
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--predictions",
"-p",
required=True,
help="path to a CONLLU file with system predictions",
default="predictions.conllu",
)
parser.add_argument(
"--gold",
"-g",
help="path to a CONLLU file with gold scores",
required=True,
default="norne_test_gold.conllu",
)
args = parser.parse_args()
predictions = parse(open(args.predictions, "r").read())
gold = parse(open(args.gold, "r").read())
print(len(gold))
gold_labels = []
for sentence in gold:
sentence = [token["misc"]["name"] for token in sentence]
gold_labels.append(sentence)
predicted_labels = []
for sentence in predictions:
sentence = [token["misc"]["name"] for token in sentence]
predicted_labels.append(sentence)
entities = ["PER", "ORG", "LOC", "GPE_LOC", "GPE_ORG", "PROD", "EVT", "DRV"]
evaluator = Evaluator(gold_labels, predicted_labels, entities)
results, results_agg = evaluator.evaluate()
print("F1 scores:")
for entity in results_agg:
prec = results_agg[entity]["strict"]["precision"]
rec = results_agg[entity]["strict"]["recall"]
print(f"{entity}:\t{f1(prec, rec):.4f}")
prec = results["strict"]["precision"]
rec = results["strict"]["recall"]
print(f"Overall score: {f1(prec, rec):.4f}")
| 28.3 | 80 | 0.617786 |
from argparse import ArgumentParser
from conllu import parse
from ner_eval import Evaluator
def f1(precision, recall, eps=1e-7):
score = 2 * (precision * recall) / (precision + recall + eps)
return score
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--predictions",
"-p",
required=True,
help="path to a CONLLU file with system predictions",
default="predictions.conllu",
)
parser.add_argument(
"--gold",
"-g",
help="path to a CONLLU file with gold scores",
required=True,
default="norne_test_gold.conllu",
)
args = parser.parse_args()
predictions = parse(open(args.predictions, "r").read())
gold = parse(open(args.gold, "r").read())
print(len(gold))
gold_labels = []
for sentence in gold:
sentence = [token["misc"]["name"] for token in sentence]
gold_labels.append(sentence)
predicted_labels = []
for sentence in predictions:
sentence = [token["misc"]["name"] for token in sentence]
predicted_labels.append(sentence)
entities = ["PER", "ORG", "LOC", "GPE_LOC", "GPE_ORG", "PROD", "EVT", "DRV"]
evaluator = Evaluator(gold_labels, predicted_labels, entities)
results, results_agg = evaluator.evaluate()
print("F1 scores:")
for entity in results_agg:
prec = results_agg[entity]["strict"]["precision"]
rec = results_agg[entity]["strict"]["recall"]
print(f"{entity}:\t{f1(prec, rec):.4f}")
prec = results["strict"]["precision"]
rec = results["strict"]["recall"]
print(f"Overall score: {f1(prec, rec):.4f}")
| true | true |
1c323454c7cfcd8d98b8fce09c225ab1b6acb53f | 161 | py | Python | main.py | AKSHITA-SRIVASTAVA/PhotoOrganiser | f00490219bd05ff977669f30b88afe708e182d9a | [
"MIT"
] | null | null | null | main.py | AKSHITA-SRIVASTAVA/PhotoOrganiser | f00490219bd05ff977669f30b88afe708e182d9a | [
"MIT"
] | null | null | null | main.py | AKSHITA-SRIVASTAVA/PhotoOrganiser | f00490219bd05ff977669f30b88afe708e182d9a | [
"MIT"
] | null | null | null | import os
cwd = os.getcwd()
os.chdir(cwd+'/images')
cwd = os.getcwd()
file_list = os.listdir(cwd)
for i in range(len(file_list)):
print(file_list[i])
| 11.5 | 31 | 0.652174 | import os
cwd = os.getcwd()
os.chdir(cwd+'/images')
cwd = os.getcwd()
file_list = os.listdir(cwd)
for i in range(len(file_list)):
print(file_list[i])
| true | true |
1c32349232bd3da9067aafed3d454e41f9ef4bee | 7,628 | py | Python | test/core/028-dynamic-hierarchy-b/workflow.py | spxiwh/pegasus | ebe3e205ae34c1721c540465712da557979c7437 | [
"Apache-2.0"
] | null | null | null | test/core/028-dynamic-hierarchy-b/workflow.py | spxiwh/pegasus | ebe3e205ae34c1721c540465712da557979c7437 | [
"Apache-2.0"
] | null | null | null | test/core/028-dynamic-hierarchy-b/workflow.py | spxiwh/pegasus | ebe3e205ae34c1721c540465712da557979c7437 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import logging
from pathlib import Path
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
TOP_DIR = Path(__file__).resolve().parent
# --- Properties ---------------------------------------------------------------
# properties that will be used by both the outer workflow and inner diamond workflow
props = Properties()
props["pegasus.dir.storage.deep"] = "false"
props["pegasus.condor.logs.symlink"] = "false"
props["pegasus.data.configuration"] = "condorio"
props["pegasus.data.configuration"] = "nonsharedfs"
props["pegasus.dir.staging.mapper"] = "Hashed"
props["pegasus.dir.submit.mapper"] = "Hashed"
props.write()
# properties that will be used by inner diamond workflow
inner_props = Properties()
inner_props["pegasus.catalog.transformation.file"] = "inner_diamond_workflow_tc.yml"
inner_props["pegasus.catalog.replica.file"] = "inner_diamond_workflow_rc.yml"
inner_props["pegasus.catalog.site.file"] = "sites.yml"
inner_props["pegasus.dir.storage.deep"] = "false"
inner_props["pegasus.condor.logs.symlink"] = "false"
inner_props.write("inner_diamond_workflow.pegasus.properties")
# --- Sites --------------------------------------------------------------------
sc = SiteCatalog()
# local site
local_site = Site(name="local", arch=Arch.X86_64, os_type=OS.LINUX, os_release="rhel", os_version="7")
local_site.add_directories(
Directory(Directory.SHARED_SCRATCH, TOP_DIR / "work/local-site/scratch")
.add_file_servers(FileServer("file://{}".format(TOP_DIR / "work/local-site/scratch"), Operation.ALL)),
Directory(Directory.LOCAL_STORAGE, TOP_DIR / "outputs/local-site")
.add_file_servers(FileServer("file://{}".format(TOP_DIR / "outputs/local-site"), Operation.ALL))
)
# CCG site
ccg_site = Site(name="CCG", arch=Arch.X86_64, os_type=OS.LINUX)
ccg_site.add_grids(
Grid(grid_type=Grid.GT5, contact="obelix.isi.edu/jobmanager-fork", scheduler_type=Scheduler.FORK, job_type=SupportedJobs.AUXILLARY),
Grid(grid_type=Grid.GT5, contact="obelix.isi.edu/jobmanager-condor", scheduler_type=Scheduler.CONDOR, job_type=SupportedJobs.COMPUTE),
)
ccg_site.add_directories(
Directory(Directory.SHARED_SCRATCH, "/lizard/scratch-90-days/CCG/scratch")
.add_file_servers(FileServer("gsiftp://obelix.isi.edu/lizard/scratch-90-days/CCG/scratch", Operation.ALL)),
Directory(Directory.LOCAL_STORAGE, "/lizard/scratch-90-days/CCG/outputs")
.add_file_servers(FileServer("gsiftp://obelix.isi.edu/lizard/scratch-90-days/CCG/outputs", Operation.ALL))
)
ccg_site.add_env(PEGASUS_HOME="/usr/bin")
sc.add_sites(local_site, ccg_site)
sc.write()
# --- Transformations ----------------------------------------------------------
# create transformation catalog for the outer level workflow
sleep = Transformation(
name="sleep",
site="CCG",
pfn="/bin/sleep",
is_stageable=False,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
generate_diamond_wf = Transformation(
name="generate_inner_diamond_workflow.py",
site="local",
pfn=TOP_DIR / "generate_inner_diamond_workflow.py",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
tc = TransformationCatalog()
tc.add_transformations(sleep, generate_diamond_wf)
tc.write()
# create transformation catalog for the inner diamond workflow
preprocess = Transformation(
name="preprocess",
site="local",
pfn="/usr/bin/pegasus-keg",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
findrange = Transformation(
name="findrange",
site="local",
pfn="/usr/bin/pegasus-keg",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
analyze = Transformation(
name="analyze",
site="local",
pfn="/usr/bin/pegasus-keg",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
inner_diamond_workflow_tc = TransformationCatalog()
inner_diamond_workflow_tc.add_transformations(preprocess, findrange, analyze)
inner_diamond_workflow_tc.write("inner_diamond_workflow_tc.yml")
# --- Replicas -----------------------------------------------------------------
with open("f.a", "w") as f:
f.write("Sample input file for the first inner dax job.")
# replica catalog for the inner diamond workflow
inner_diamond_workflow_rc = ReplicaCatalog()
inner_diamond_workflow_rc.add_replica(site="local", lfn="f.a", pfn=TOP_DIR / "f.a")
inner_diamond_workflow_rc.write("inner_diamond_workflow_rc.yml")
# replica catalog for the outer workflow
rc = ReplicaCatalog()
rc.add_replica(site="local", lfn="inner_diamond_workflow.pegasus.properties", pfn=TOP_DIR / "inner_diamond_workflow.pegasus.properties")
rc.add_replica(site="local", lfn="inner_diamond_workflow_rc.yml", pfn =TOP_DIR / "inner_diamond_workflow_rc.yml")
rc.add_replica(site="local", lfn="inner_diamond_workflow_tc.yml", pfn=TOP_DIR / "inner_diamond_workflow_tc.yml")
rc.add_replica(site="local", lfn="sites.yml", pfn=TOP_DIR / "sites.yml")
rc.add_replica(site="local", lfn="inner_sleep_workflow.yml", pfn=TOP_DIR / "inner_sleep_workflow.yml")
rc.write()
# --- Workflow -----------------------------------------------------------------
wf = Workflow("hierarchical-workflow")
# job to generate the diamond workflow
diamond_wf_file = File("inner_diamond_workflow.yml")
generate_diamond_wf_job = Job(generate_diamond_wf, _id="diamond_workflow_gen")\
.add_outputs(diamond_wf_file)
# job to plan and run the diamond workflow
diamond_wf_job = SubWorkflow(file=diamond_wf_file, is_planned=False, _id="diamond_subworkflow")\
.add_args(
"--conf",
"inner_diamond_workflow.pegasus.properties",
"--output-sites",
"local",
"-vvv",
"--basename",
"inner"
)\
.add_inputs(
File("inner_diamond_workflow.pegasus.properties"),
File("inner_diamond_workflow_rc.yml"),
File("inner_diamond_workflow_tc.yml"),
File("sites.yml"),
)
sleep_wf_file = File("inner_sleep_workflow.yml")
sleep_wf_job = SubWorkflow(file=sleep_wf_file, is_planned=False, _id="sleep_subworkflow")\
.add_args(
"--output-sites",
"local",
"-vvv"
)
sleep_job = Job(sleep, _id="sleep_job").add_args(5)
wf.add_jobs(generate_diamond_wf_job, diamond_wf_job, sleep_wf_job, sleep_job)
wf.add_dependency(generate_diamond_wf_job, children=[diamond_wf_job])
wf.add_dependency(diamond_wf_job, children=[sleep_wf_job])
wf.add_dependency(sleep_wf_job, children=[sleep_job])
wf.plan(
sites=["local", "CCG"],
output_sites=["local"],
dir="work",
verbose=3,
submit=True
)
| 39.937173 | 138 | 0.613529 |
import logging
from pathlib import Path
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
TOP_DIR = Path(__file__).resolve().parent
props = Properties()
props["pegasus.dir.storage.deep"] = "false"
props["pegasus.condor.logs.symlink"] = "false"
props["pegasus.data.configuration"] = "condorio"
props["pegasus.data.configuration"] = "nonsharedfs"
props["pegasus.dir.staging.mapper"] = "Hashed"
props["pegasus.dir.submit.mapper"] = "Hashed"
props.write()
inner_props = Properties()
inner_props["pegasus.catalog.transformation.file"] = "inner_diamond_workflow_tc.yml"
inner_props["pegasus.catalog.replica.file"] = "inner_diamond_workflow_rc.yml"
inner_props["pegasus.catalog.site.file"] = "sites.yml"
inner_props["pegasus.dir.storage.deep"] = "false"
inner_props["pegasus.condor.logs.symlink"] = "false"
inner_props.write("inner_diamond_workflow.pegasus.properties")
sc = SiteCatalog()
local_site = Site(name="local", arch=Arch.X86_64, os_type=OS.LINUX, os_release="rhel", os_version="7")
local_site.add_directories(
Directory(Directory.SHARED_SCRATCH, TOP_DIR / "work/local-site/scratch")
.add_file_servers(FileServer("file://{}".format(TOP_DIR / "work/local-site/scratch"), Operation.ALL)),
Directory(Directory.LOCAL_STORAGE, TOP_DIR / "outputs/local-site")
.add_file_servers(FileServer("file://{}".format(TOP_DIR / "outputs/local-site"), Operation.ALL))
)
ccg_site = Site(name="CCG", arch=Arch.X86_64, os_type=OS.LINUX)
ccg_site.add_grids(
Grid(grid_type=Grid.GT5, contact="obelix.isi.edu/jobmanager-fork", scheduler_type=Scheduler.FORK, job_type=SupportedJobs.AUXILLARY),
Grid(grid_type=Grid.GT5, contact="obelix.isi.edu/jobmanager-condor", scheduler_type=Scheduler.CONDOR, job_type=SupportedJobs.COMPUTE),
)
ccg_site.add_directories(
Directory(Directory.SHARED_SCRATCH, "/lizard/scratch-90-days/CCG/scratch")
.add_file_servers(FileServer("gsiftp://obelix.isi.edu/lizard/scratch-90-days/CCG/scratch", Operation.ALL)),
Directory(Directory.LOCAL_STORAGE, "/lizard/scratch-90-days/CCG/outputs")
.add_file_servers(FileServer("gsiftp://obelix.isi.edu/lizard/scratch-90-days/CCG/outputs", Operation.ALL))
)
ccg_site.add_env(PEGASUS_HOME="/usr/bin")
sc.add_sites(local_site, ccg_site)
sc.write()
sleep = Transformation(
name="sleep",
site="CCG",
pfn="/bin/sleep",
is_stageable=False,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
generate_diamond_wf = Transformation(
name="generate_inner_diamond_workflow.py",
site="local",
pfn=TOP_DIR / "generate_inner_diamond_workflow.py",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
tc = TransformationCatalog()
tc.add_transformations(sleep, generate_diamond_wf)
tc.write()
preprocess = Transformation(
name="preprocess",
site="local",
pfn="/usr/bin/pegasus-keg",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
findrange = Transformation(
name="findrange",
site="local",
pfn="/usr/bin/pegasus-keg",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
analyze = Transformation(
name="analyze",
site="local",
pfn="/usr/bin/pegasus-keg",
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
os_release="rhel",
os_version="7"
)
inner_diamond_workflow_tc = TransformationCatalog()
inner_diamond_workflow_tc.add_transformations(preprocess, findrange, analyze)
inner_diamond_workflow_tc.write("inner_diamond_workflow_tc.yml")
with open("f.a", "w") as f:
f.write("Sample input file for the first inner dax job.")
inner_diamond_workflow_rc = ReplicaCatalog()
inner_diamond_workflow_rc.add_replica(site="local", lfn="f.a", pfn=TOP_DIR / "f.a")
inner_diamond_workflow_rc.write("inner_diamond_workflow_rc.yml")
rc = ReplicaCatalog()
rc.add_replica(site="local", lfn="inner_diamond_workflow.pegasus.properties", pfn=TOP_DIR / "inner_diamond_workflow.pegasus.properties")
rc.add_replica(site="local", lfn="inner_diamond_workflow_rc.yml", pfn =TOP_DIR / "inner_diamond_workflow_rc.yml")
rc.add_replica(site="local", lfn="inner_diamond_workflow_tc.yml", pfn=TOP_DIR / "inner_diamond_workflow_tc.yml")
rc.add_replica(site="local", lfn="sites.yml", pfn=TOP_DIR / "sites.yml")
rc.add_replica(site="local", lfn="inner_sleep_workflow.yml", pfn=TOP_DIR / "inner_sleep_workflow.yml")
rc.write()
wf = Workflow("hierarchical-workflow")
diamond_wf_file = File("inner_diamond_workflow.yml")
generate_diamond_wf_job = Job(generate_diamond_wf, _id="diamond_workflow_gen")\
.add_outputs(diamond_wf_file)
diamond_wf_job = SubWorkflow(file=diamond_wf_file, is_planned=False, _id="diamond_subworkflow")\
.add_args(
"--conf",
"inner_diamond_workflow.pegasus.properties",
"--output-sites",
"local",
"-vvv",
"--basename",
"inner"
)\
.add_inputs(
File("inner_diamond_workflow.pegasus.properties"),
File("inner_diamond_workflow_rc.yml"),
File("inner_diamond_workflow_tc.yml"),
File("sites.yml"),
)
sleep_wf_file = File("inner_sleep_workflow.yml")
sleep_wf_job = SubWorkflow(file=sleep_wf_file, is_planned=False, _id="sleep_subworkflow")\
.add_args(
"--output-sites",
"local",
"-vvv"
)
sleep_job = Job(sleep, _id="sleep_job").add_args(5)
wf.add_jobs(generate_diamond_wf_job, diamond_wf_job, sleep_wf_job, sleep_job)
wf.add_dependency(generate_diamond_wf_job, children=[diamond_wf_job])
wf.add_dependency(diamond_wf_job, children=[sleep_wf_job])
wf.add_dependency(sleep_wf_job, children=[sleep_job])
wf.plan(
sites=["local", "CCG"],
output_sites=["local"],
dir="work",
verbose=3,
submit=True
)
| true | true |
1c3234ec71ed87182691177575f951332dc73e55 | 26,635 | py | Python | pysystests/app/rest_client_tasks.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | 1 | 2020-08-31T18:51:45.000Z | 2020-08-31T18:51:45.000Z | pysystests/app/rest_client_tasks.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | null | null | null | pysystests/app/rest_client_tasks.py | couchbaselabs/testrunner-bharath | 96af90070da2140cc11c549db7403f5ea3b76d34 | [
"Apache-2.0"
] | 2 | 2020-07-24T07:12:01.000Z | 2022-03-17T23:43:28.000Z | ##!/usr/bin/env python
"""
rest tasks
"""
import base64
import sys
sys.path=["../lib"] + sys.path
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.document import View
from app.celery import celery
import app
import testcfg as cfg
import json
import time
import random
import eventlet
from eventlet.green import urllib2
from celery.utils.log import get_task_logger
from cache import ObjCacher, CacheHelper
from testconstants import STANDARD_BUCKET_PORT
logger = get_task_logger(__name__)
if cfg.SERIESLY_IP != '':
from seriesly import Seriesly
###
SDK_IP = '127.0.0.1'
SDK_PORT = 50008
###
@celery.task
def multi_query(count, design_doc_name, view_name, params = None, bucket = "default", password = "", type_ = "view", batch_size = 100, hosts = None):
if params is not None:
params = urllib2.urllib.urlencode(params)
pool = eventlet.GreenPool(batch_size)
api = '%s/_design/%s/_%s/%s?%s' % (bucket,
design_doc_name, type_,
view_name, params)
qtime = data = url = None
args = dict(api=api, hosts=hosts)
for qtime, data, url in pool.imap(send_query, [args for i in range(count)]):
pass
if cfg.SERIESLY_IP != '' and qtime is not None:
# store the most recent query response time 'qtime' into seriesly
seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
db = None
if 'fast' in seriesly.list_dbs():
db='fast'
else:
bucketStatus = app.workload_manager.BucketStatus.from_cache(bucket) or app.workload_manager.BucketStatus(bucket)
db = bucketStatus.latency_db
if db not in seriesly.list_dbs():
seriesly.create_db(db)
if db is not None:
seriesly[db].append({'query_latency' : qtime})
# log to logs/celery-query.log
try:
rc = data.read()[0:200]
except Exception:
rc = "exception reading query response"
logger.error('\n')
logger.error('url: %s' % url)
logger.error('latency: %s' % qtime)
logger.error('data: %s' % rc)
def send_query(args):
api = args['api']
hosts = args['hosts']
if hosts and len(hosts) > 0:
host = hosts[random.randint(0, len(hosts) - 1)]
capiUrl = "http://%s/couchBase/" % (host)
else:
capiUrl = "http://%s:%s/couchBase/" % (cfg.COUCHBASE_IP, cfg.COUCHBASE_PORT)
url = capiUrl + api
qtime, data = None, None
try:
qtime, data = timed_url_request(url)
except urllib.error.URLError as ex:
logger.error("Request error: %s" % ex)
return qtime, data, url
def timed_url_request(url):
start = time.time()
data = url_request(url)
end = time.time()
qtime = end - start
return qtime, data
def default_url_headers():
authorization = base64.encodebytes('%s:%s' % (cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD))
headers = {'Content-Type': 'application/json',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
return headers
def url_request(url, headers = None):
if headers is None:
headers = default_url_headers()
req = urllib.request.Request(url, headers = headers)
data = urllib.request.urlopen(req)
return data
def _send_msg(message):
sdk_client = eventlet.connect((SDK_IP, SDK_PORT))
sdk_client.sendall(json.dumps(message))
@celery.task
def perform_bucket_create_tasks(bucketMsg):
rest = create_rest()
if "default" in bucketMsg:
create_default_buckets(rest, bucketMsg["default"])
if "sasl" in bucketMsg:
create_sasl_buckets(rest, bucketMsg["sasl"])
if "standard" in bucketMsg:
create_standard_buckets(rest, bucketMsg["standard"])
if "tpcc" in bucketMsg:
create_tpcc_buckets(rest, bucketMsg["tpcc"])
def parseBucketMsg(bucket):
bucketMsg = {'count': 1,
'ramQuotaMB': 1000,
'replicas': 1,
'replica_index': 1,
'type': 'couchbase',
'priority': 3,
'eviction_policy': 'valueOnly'
}
if "count" in bucket:
bucketMsg['count'] = int(bucket['count'])
if "quota" in bucket:
bucketMsg['ramQuotaMB'] = int(bucket['quota'])
if "replicas" in bucket:
bucketMsg['replicas'] = int(bucket['replicas'])
if "replica_index" in bucket:
bucketMsg['replica_index'] = int(bucket['replica_index'])
if "type" in bucket:
bucketMsg['type'] = bucket['type']
if "priority" in bucket:
bucketMsg['priority'] = 8 if bucket['priority'] == 'high' else 3
if "eviction_policy" in bucket:
bucketMsg['eviction_policy'] = bucket['eviction_policy']
return bucketMsg
def create_default_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
rest.create_bucket(bucket="default",
ramQuotaMB = bucketMsgParsed['ramQuotaMB'],
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = 11211,
authType = "none",
saslPassword = None,
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
def create_tpcc_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
#tpcc_list = ["ITEM", "ORDERS", "ORDER_LINE", "NEW_ORDER", "STOCK", "CUSTOMER", "DISTRICT", "WAREHOUSE", "HISTORY"]
tpcc_dict = {"ITEM":"2000", "ORDERS":"3000", "ORDER_LINE":"3000", "NEW_ORDER":"1000", "STOCK":"2000", "CUSTOMER":"2000", "DISTRICT":"1000", "WAREHOUSE":"500", "HISTORY":"1000"}
for key, value in tpcc_dict.items():
#for b_name in tpcc_list:
rest.create_bucket(bucket=key,
ramQuotaMB =value,
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = 11211,
authType = "sasl",
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
def create_sasl_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
for i in range(bucketMsgParsed['count']):
if i == 0:
name = "saslbucket"
else:
name = "saslbucket" + str(i)
rest.create_bucket(bucket = name,
ramQuotaMB = bucketMsgParsed['ramQuotaMB'],
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = 11211,
authType = "sasl",
saslPassword = "password",
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
def create_standard_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
for i in range(bucketMsgParsed['count']):
if i == 0:
name = "standardbucket"
else:
name = "standardbucket" + str(i)
rest.create_bucket(bucket = name,
ramQuotaMB = bucketMsgParsed['ramQuotaMB'],
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = STANDARD_BUCKET_PORT + i,
authType = "none",
saslPassword = None,
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
@celery.task
def perform_view_tasks(viewMsgList):
rest = create_rest()
if isinstance(viewMsgList, dict):
viewMsgList = [viewMsgList]
for viewMsg in viewMsgList:
if "create" in viewMsg:
ddocMsg = parseDdocMsg(viewMsg['create'])
for ddoc_name, views in ddocMsg.items():
view_list = []
bucket_name = ''
for view in views:
view_list.append(View(view['view_name'], view['map_func'], view['red_func'],
view['dev_view'], view['is_spatial']))
bucket_name = view['bucket_name']
bucket_obj = rest.get_bucket(bucket_name, 2, 2)
rest.create_ddoc(ddoc_name, bucket_obj, view_list)
if "delete" in viewMsg:
for view in viewMsg['delete']:
viewMsgParsed = parseViewMsg(view)
bucket_obj = rest.get_bucket(viewMsgParsed['bucket_name'], 2, 2)
rest.delete_view(bucket_obj, viewMsgParsed['ddoc_name'])
def parseDdocMsg(views):
ddocs = {}
for view in views:
viewMsg = parseViewMsg(view)
if viewMsg['ddoc_name'] in ddocs:
ddocs[viewMsg['ddoc_name']].append(viewMsg)
else:
ddocs[viewMsg['ddoc_name']] = []
ddocs[viewMsg['ddoc_name']].append(viewMsg)
return ddocs
def parseViewMsg(view):
viewMsg = {'ddoc_name': 'ddoc1',
'view_name': 'view1',
'map_func': 'function (doc) { emit(null, doc);}',
'red_func': None,
'dev_view': True,
'is_spatial': False,
'bucket_name': 'default'
}
if 'ddoc' in view:
viewMsg['ddoc_name'] = view['ddoc']
if 'view' in view:
viewMsg['view_name'] = view['view']
if 'map' in view:
viewMsg['map_func'] = view['map']
if 'reduce' in view:
viewMsg['red_func'] = view['reduce']
if 'dev' in view:
if view['dev'] == "True":
viewMsg['dev_view'] = True
elif view['dev'] == "False":
viewMsg['dev_view'] = False
if 'spatial' in view:
if view['spatial'] == "True":
viewMsg['is_spatial'] = True
elif view['spatial'] == "False":
viewMsg['is_spatial'] = False
if 'bucket' in view:
viewMsg['bucket_name'] = view['bucket']
return viewMsg
@celery.task
def perform_admin_tasks(adminMsg, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
app.workload_manager.updateClusterStatus()
clusterStatus = CacheHelper.clusterstatus(cluster_id)
if clusterStatus is None:
logger.error("Unable to fetch clusterStatus from cache: ")
return
rest = clusterStatus.node_rest()
# Add nodes
servers = adminMsg["rebalance_in"]
zone_name = adminMsg["group"]
if adminMsg["services"]:
add_nodes(rest, servers, cluster_id, zone_name, adminMsg["services"])
else:
add_nodes(rest, servers, cluster_id, zone_name)
# Get all nodes
allNodes = []
for node in rest.node_statuses():
allNodes.append(node.id)
# Remove nodes
servers = adminMsg["rebalance_out"]
toBeEjectedNodes = remove_nodes(rest, servers, adminMsg["involve_orchestrator"], cluster_id)
# Failover Node
servers = adminMsg["failover"]
auto_failover_servers = adminMsg["auto_failover"]
only_failover = adminMsg["only_failover"]
add_back_servers = adminMsg["add_back"]
failoverNodes = failover_nodes(rest, servers, only_failover, adminMsg["involve_orchestrator"], cluster_id)
autoFailoverNodes = auto_failover_nodes(rest, auto_failover_servers, only_failover, adminMsg["involve_orchestrator"], cluster_id)
app.workload_manager.updateClusterStatus()
clusterStatus = CacheHelper.clusterstatus(cluster_id)
rest = clusterStatus.node_rest()
addBackNodes = add_back_nodes(rest, add_back_servers, autoFailoverNodes+failoverNodes)
toBeEjectedNodes.extend(failoverNodes)
toBeEjectedNodes.extend(autoFailoverNodes)
for node in addBackNodes:
toBeEjectedNodes.remove(node)
# SoftRestart a node
servers = adminMsg["soft_restart"]
restart(servers, cluster_id=cluster_id)
# HardRestart a node
servers = adminMsg["hard_restart"]
restart(servers, type='hard', cluster_id=cluster_id)
if adminMsg["soft_restart"] == '' and adminMsg["hard_restart"] == '':
if not only_failover and (len(allNodes) > 0 or len(toBeEjectedNodes) > 0):
logger.error("Rebalance")
logger.error(allNodes)
logger.error(toBeEjectedNodes)
rest.rebalance(otpNodes=allNodes, ejectedNodes=toBeEjectedNodes)
# do a soft rest on ejectedNodes that were failed over
logger.error(toBeEjectedNodes)
restartNodes = ""
for node in toBeEjectedNodes:
if node in (failoverNodes + autoFailoverNodes):
if '@' in node: # ns_X@hostname formated
node = node.split('@')[1]
restartNodes = "%s %s" % (node, restartNodes)
if len(restartNodes):
restart(restartNodes)
def monitorRebalance():
rest = create_rest()
rebalance_success = rest.monitorRebalance()
return rebalance_success
@celery.task
def perform_xdcr_tasks(xdcrMsg):
logger.error(xdcrMsg)
src_master = create_server_obj()
remoteHost = None
remotePort = 8091
# dest_cluster_name is used as remoteRef
dest_cluster_name = remoteRef = str(xdcrMsg['dest_cluster_name'])
if "COUCHBASE_IP" in cfg.REMOTE_SITES[remoteRef]:
remote_ip = cfg.REMOTE_SITES[remoteRef]["COUCHBASE_IP"]
if "COUCHBASE_PORT" in cfg.REMOTE_SITES[remoteRef]:
remote_port = cfg.REMOTE_SITES[remoteRef]["COUCHBASE_PORT"]
else:
logger.error("Cannot find remote site %s in testcfg.REMOTE_SITES: " % (remoteRef))
return
dest_master = create_server_obj(server_ip=remote_ip, port=remote_port,
username=xdcrMsg['dest_cluster_rest_username'],
password=xdcrMsg['dest_cluster_rest_pwd'])
xdcr_link_cluster(src_master, dest_master, dest_cluster_name)
buckets = xdcrMsg.get("buckets")
replication_filters = xdcrMsg.get("filter_expression")
for bucket in buckets:
xdcr_params={}
if replication_filters and bucket in list(replication_filters.keys()):
xdcr_params["filterExpression"] = replication_filters[bucket]
xdcr_start_replication(src_master, dest_cluster_name, bucket, xdcr_params)
def xdcr_link_cluster(src_master, dest_master, dest_cluster_name):
rest_conn_src = RestConnection(src_master)
rest_conn_src.add_remote_cluster(dest_master.ip, dest_master.port,
dest_master.rest_username,
dest_master.rest_password, dest_cluster_name)
def xdcr_start_replication(src_master, dest_cluster_name, bucket_name, xdcr_params):
rest_conn_src = RestConnection(src_master)
for bucket in rest_conn_src.get_buckets():
if bucket.name == bucket_name:
rep_id = rest_conn_src.start_replication("continuous",
bucket.name,
dest_cluster_name,
xdcr_params=xdcr_params)
logger.error("rep_id: %s" %rep_id)
def add_nodes(rest, servers='', cluster_id=cfg.CB_CLUSTER_TAG+"_status", zone_name = '', services=None):
# create zone if it does not exit
if zone_name != '':
if rest.is_zone_exist(zone_name) == False:
rest.add_zone(zone_name)
if servers.find('.') != -1 or servers == '':
servers = servers.split()
else:
clusterStatus = CacheHelper.clusterstatus(cluster_id)
count = int(servers)
if (len(clusterStatus.all_available_hosts) - len(clusterStatus.nodes)) >= int(count):
servers = list(set(clusterStatus.all_available_hosts) - set(clusterStatus.get_all_hosts()))
else:
logger.error("Add nodes request invalid. # of nodes outside cluster is not enough")
return
servers = servers[:count]
for server in servers:
logger.error("Adding node %s" % server)
ip, port = parse_server_arg(server)
if services:
rest.add_node(cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD, ip, port, zone_name, services)
else:
rest.add_node(cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD, ip, port, zone_name)
def pick_nodesToRemove(servers='', involve_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
if servers.find('.') != -1 or servers == '':
servers = servers.split()
else:
clusterStatus = CacheHelper.clusterstatus(cluster_id)
count = int(servers)
temp_count = count
servers = []
if involve_orchestrator:
servers.append("%s:%s" % (clusterStatus.orchestrator.ip, clusterStatus.orchestrator.port))
temp_count = temp_count -1
if len(clusterStatus.nodes) > count:
non_orchestrator_servers = list(set(clusterStatus.get_all_hosts()) -
{"%s:%s" % (clusterStatus.orchestrator.ip, clusterStatus.orchestrator.port)})
servers.extend(non_orchestrator_servers[:temp_count])
else:
logger.error("Remove nodes request invalid. # of nodes in cluster is not enough")
return []
return servers
def remove_nodes(rest, servers='', remove_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
toBeEjectedNodes = []
servers = pick_nodesToRemove(servers, remove_orchestrator, cluster_id)
for server in servers:
ip, port = parse_server_arg(server)
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % ip and\
"%s" % node.port == "%s" % port:
logger.error("Removing node %s" % node.id)
toBeEjectedNodes.append(node.id)
return toBeEjectedNodes
def failover_nodes(rest, servers='', only_failover=False, failover_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
toBeEjectedNodes = []
servers = pick_nodesToRemove(servers, failover_orchestrator, cluster_id)
for server in servers:
ip, port = parse_server_arg(server)
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % ip and\
"%s" % node.port == "%s" % port:
logger.error("Failing node %s" % node.id)
rest.fail_over(node.id)
if not only_failover:
toBeEjectedNodes.append(node.id)
return toBeEjectedNodes
def auto_failover_nodes(rest, servers='', only_failover=False, failover_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
toBeEjectedNodes = []
if servers != '':
rest.reset_autofailover()
rest.update_autofailover_settings(True, 30)
servers = pick_nodesToRemove(servers, failover_orchestrator, cluster_id)
for server in servers:
ip, port = parse_server_arg(server)
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % ip and\
"%s" % node.port == "%s" % port:
logger.error("Failing node %s" % node.id)
failover_by_killing_mc(node.ip)
if not only_failover:
toBeEjectedNodes.append(node.id)
rest.reset_autofailover()
rest.update_autofailover_settings(False, 30)
return toBeEjectedNodes
def failover_by_killing_mc(ip):
node_ssh, node = create_ssh_conn(ip)
if cfg.COUCHBASE_OS == "windows":
cmd = "taskkill /F /T /IM memcached* && taskkill /F /T /IM erl*"
else:
cmd = "killall -9 memcached && killall -9 beam.smp"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
# in windows, it needs more than 1 min
time.sleep(70)
def add_back_nodes(rest, servers='', nodes=[]):
addBackNodes = []
if servers.find('.') != -1 or servers == '':
servers = servers.split()
for server in servers:
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % server:
restart(servers=server, type='soft')
time.sleep(60)
logger.error("Add Back node %s" % node.id)
rest.add_back_node(node.id)
addBackNodes.append(node.id)
else:
count = int(servers)
servers = nodes[:count]
for server in servers:
restart(servers=server.split('@')[1], type='soft')
if cfg.COUCHBASE_OS == "windows":
time.sleep(120)
else:
time.sleep(60)
logger.error("Add Back node %s" % server)
rest.add_back_node(server)
addBackNodes.append(server)
return addBackNodes
def parse_server_arg(server):
ip = server
port = 8091
addr = server.split(":")
if len(addr) > 1:
ip = addr[0]
port = addr[1]
return ip, port
def _dict_to_obj(dict_):
return type('OBJ', (object,), dict_)
def restart(servers='', type='soft', cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
if servers.find('.') != -1 or servers == '':
servers = servers.split()
else:
clusterStatus = CacheHelper.clusterstatus(cluster_id)
count = int(servers)
if len(clusterStatus.nodes) >= int(count):
servers = clusterStatus.get_all_hosts()
else:
logger.error("Restart nodes request invalid. # of nodes in cluster is not enough")
return
servers = servers[:count]
for server in servers:
ip, port = parse_server_arg(server)
node_ssh, node = create_ssh_conn(ip)
if type is not 'soft':
logger.error('Hard Restart')
if cfg.COUCHBASE_OS == "windows":
cmd = "shutdown -r -t 0"
else:
cmd = "reboot"
else:
logger.error('Soft Restart')
if cfg.COUCHBASE_OS == "windows":
cmd = "net stop couchbaseserver"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
cmd = "net start couchbaseserver"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
return
else:
cmd = "/etc/init.d/couchbase-server restart"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
def create_server_obj(server_ip=cfg.COUCHBASE_IP, port=cfg.COUCHBASE_PORT,
username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD):
serverInfo = { "ip" : server_ip,
"port" : port,
"rest_username" : username,
"rest_password" : password
}
node = _dict_to_obj(serverInfo)
return node
def http_ping(ip, port, timeout=5):
url = "http://%s:%s/pools" % (ip, port)
try:
data = url_request(url)
pools_info = json.loads(data.read())
if 'pools' in pools_info:
pools = pools_info["pools"]
if len(pools) > 0:
return True
except Exception as ex:
pass
def create_rest(server_ip=cfg.COUCHBASE_IP, port=cfg.COUCHBASE_PORT,
username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD):
return RestConnection(create_server_obj(server_ip, port, username, password))
def create_ssh_conn(server_ip='', port=22, username=cfg.COUCHBASE_SSH_USER,
password=cfg.COUCHBASE_SSH_PASSWORD, os=cfg.COUCHBASE_OS):
if isinstance(server_ip, str):
server_ip = str(server_ip)
serverInfo = {"ip" : server_ip,
"port" : port,
"ssh_username" : username,
"ssh_password" : password,
"ssh_key": '',
"type": os
}
node = _dict_to_obj(serverInfo)
shell = RemoteMachineShellConnection(node)
return shell, node
def perform_teardown_tasks(teardownMsg, rest = None):
rest = rest or create_rest()
if "ddocs" in teardownMsg:
teardown_ddocs(teardownMsg["ddocs"], rest)
if "buckets" in teardownMsg:
teardown_buckets(teardownMsg["buckets"], rest)
if "xdcr_dest_clusters" in teardownMsg:
teardown_xdcr(teardownMsg["xdcr_dest_clusters"])
def teardown_ddocs(ddocList, rest = None):
rest = rest or create_rest()
for ddoc in ddocList:
try:
bucket_name, ddoc = [_f for _f in ddoc.split('/') if _f]
bucket = rest.get_bucket(bucket_name)
rest._delete_design_doc(bucket, ddoc)
except ValueError:
logger.error("Invalid syntax: %s " % (args))
except Exception:
pass # ddoc already deleted
def teardown_buckets(bucketList, rest = None):
rest = rest or create_rest()
for bucket in bucketList:
rest.delete_bucket(bucket)
def teardown_xdcr(xdcrClusters, rest = None):
rest = rest or create_rest()
try:
# stop all replications
rest.remove_all_replications()
# unpair select sites
for cluster in xdcrClusters:
rest.remove_remote_cluster(cluster)
except Exception:
pass # xdcr done
def perform_cli_task(ssh_command, rest = None):
command = ""
hosts = ssh_command.get('hosts') or ['127.0.0.1']
username = ssh_command.get('username') or cfg.COUCHBASE_SSH_USER
password = ssh_command.get('password') or cfg.COUCHBASE_SSH_PASSWORD
command = ssh_command.get('command')
if command is not None:
for host in hosts:
node_ssh, node = create_ssh_conn(host,
username = username,
password = password)
logger.error(command)
#TODO: cache result: CBQE-1329
result = node_ssh.execute_command(command, node)
logger.error(result)
| 36.139756 | 180 | 0.602966 | t sys
sys.path=["../lib"] + sys.path
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.document import View
from app.celery import celery
import app
import testcfg as cfg
import json
import time
import random
import eventlet
from eventlet.green import urllib2
from celery.utils.log import get_task_logger
from cache import ObjCacher, CacheHelper
from testconstants import STANDARD_BUCKET_PORT
logger = get_task_logger(__name__)
if cfg.SERIESLY_IP != '':
from seriesly import Seriesly
K_IP = '127.0.0.1'
SDK_PORT = 50008
celery.task
def multi_query(count, design_doc_name, view_name, params = None, bucket = "default", password = "", type_ = "view", batch_size = 100, hosts = None):
if params is not None:
params = urllib2.urllib.urlencode(params)
pool = eventlet.GreenPool(batch_size)
api = '%s/_design/%s/_%s/%s?%s' % (bucket,
design_doc_name, type_,
view_name, params)
qtime = data = url = None
args = dict(api=api, hosts=hosts)
for qtime, data, url in pool.imap(send_query, [args for i in range(count)]):
pass
if cfg.SERIESLY_IP != '' and qtime is not None:
seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
db = None
if 'fast' in seriesly.list_dbs():
db='fast'
else:
bucketStatus = app.workload_manager.BucketStatus.from_cache(bucket) or app.workload_manager.BucketStatus(bucket)
db = bucketStatus.latency_db
if db not in seriesly.list_dbs():
seriesly.create_db(db)
if db is not None:
seriesly[db].append({'query_latency' : qtime})
try:
rc = data.read()[0:200]
except Exception:
rc = "exception reading query response"
logger.error('\n')
logger.error('url: %s' % url)
logger.error('latency: %s' % qtime)
logger.error('data: %s' % rc)
def send_query(args):
api = args['api']
hosts = args['hosts']
if hosts and len(hosts) > 0:
host = hosts[random.randint(0, len(hosts) - 1)]
capiUrl = "http://%s/couchBase/" % (host)
else:
capiUrl = "http://%s:%s/couchBase/" % (cfg.COUCHBASE_IP, cfg.COUCHBASE_PORT)
url = capiUrl + api
qtime, data = None, None
try:
qtime, data = timed_url_request(url)
except urllib.error.URLError as ex:
logger.error("Request error: %s" % ex)
return qtime, data, url
def timed_url_request(url):
start = time.time()
data = url_request(url)
end = time.time()
qtime = end - start
return qtime, data
def default_url_headers():
authorization = base64.encodebytes('%s:%s' % (cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD))
headers = {'Content-Type': 'application/json',
'Authorization': 'Basic %s' % authorization,
'Accept': '*/*'}
return headers
def url_request(url, headers = None):
if headers is None:
headers = default_url_headers()
req = urllib.request.Request(url, headers = headers)
data = urllib.request.urlopen(req)
return data
def _send_msg(message):
sdk_client = eventlet.connect((SDK_IP, SDK_PORT))
sdk_client.sendall(json.dumps(message))
@celery.task
def perform_bucket_create_tasks(bucketMsg):
rest = create_rest()
if "default" in bucketMsg:
create_default_buckets(rest, bucketMsg["default"])
if "sasl" in bucketMsg:
create_sasl_buckets(rest, bucketMsg["sasl"])
if "standard" in bucketMsg:
create_standard_buckets(rest, bucketMsg["standard"])
if "tpcc" in bucketMsg:
create_tpcc_buckets(rest, bucketMsg["tpcc"])
def parseBucketMsg(bucket):
bucketMsg = {'count': 1,
'ramQuotaMB': 1000,
'replicas': 1,
'replica_index': 1,
'type': 'couchbase',
'priority': 3,
'eviction_policy': 'valueOnly'
}
if "count" in bucket:
bucketMsg['count'] = int(bucket['count'])
if "quota" in bucket:
bucketMsg['ramQuotaMB'] = int(bucket['quota'])
if "replicas" in bucket:
bucketMsg['replicas'] = int(bucket['replicas'])
if "replica_index" in bucket:
bucketMsg['replica_index'] = int(bucket['replica_index'])
if "type" in bucket:
bucketMsg['type'] = bucket['type']
if "priority" in bucket:
bucketMsg['priority'] = 8 if bucket['priority'] == 'high' else 3
if "eviction_policy" in bucket:
bucketMsg['eviction_policy'] = bucket['eviction_policy']
return bucketMsg
def create_default_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
rest.create_bucket(bucket="default",
ramQuotaMB = bucketMsgParsed['ramQuotaMB'],
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = 11211,
authType = "none",
saslPassword = None,
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
def create_tpcc_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
tpcc_dict = {"ITEM":"2000", "ORDERS":"3000", "ORDER_LINE":"3000", "NEW_ORDER":"1000", "STOCK":"2000", "CUSTOMER":"2000", "DISTRICT":"1000", "WAREHOUSE":"500", "HISTORY":"1000"}
for key, value in tpcc_dict.items():
rest.create_bucket(bucket=key,
ramQuotaMB =value,
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = 11211,
authType = "sasl",
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
def create_sasl_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
for i in range(bucketMsgParsed['count']):
if i == 0:
name = "saslbucket"
else:
name = "saslbucket" + str(i)
rest.create_bucket(bucket = name,
ramQuotaMB = bucketMsgParsed['ramQuotaMB'],
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = 11211,
authType = "sasl",
saslPassword = "password",
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
def create_standard_buckets(rest, bucketMsg):
bucketMsgParsed = parseBucketMsg(bucketMsg)
for i in range(bucketMsgParsed['count']):
if i == 0:
name = "standardbucket"
else:
name = "standardbucket" + str(i)
rest.create_bucket(bucket = name,
ramQuotaMB = bucketMsgParsed['ramQuotaMB'],
replicaNumber = bucketMsgParsed['replicas'],
proxyPort = STANDARD_BUCKET_PORT + i,
authType = "none",
saslPassword = None,
bucketType = bucketMsgParsed['type'],
replica_index = bucketMsgParsed['replica_index'],
threadsNumber = bucketMsgParsed['priority'],
evictionPolicy = bucketMsgParsed['eviction_policy'])
@celery.task
def perform_view_tasks(viewMsgList):
rest = create_rest()
if isinstance(viewMsgList, dict):
viewMsgList = [viewMsgList]
for viewMsg in viewMsgList:
if "create" in viewMsg:
ddocMsg = parseDdocMsg(viewMsg['create'])
for ddoc_name, views in ddocMsg.items():
view_list = []
bucket_name = ''
for view in views:
view_list.append(View(view['view_name'], view['map_func'], view['red_func'],
view['dev_view'], view['is_spatial']))
bucket_name = view['bucket_name']
bucket_obj = rest.get_bucket(bucket_name, 2, 2)
rest.create_ddoc(ddoc_name, bucket_obj, view_list)
if "delete" in viewMsg:
for view in viewMsg['delete']:
viewMsgParsed = parseViewMsg(view)
bucket_obj = rest.get_bucket(viewMsgParsed['bucket_name'], 2, 2)
rest.delete_view(bucket_obj, viewMsgParsed['ddoc_name'])
def parseDdocMsg(views):
ddocs = {}
for view in views:
viewMsg = parseViewMsg(view)
if viewMsg['ddoc_name'] in ddocs:
ddocs[viewMsg['ddoc_name']].append(viewMsg)
else:
ddocs[viewMsg['ddoc_name']] = []
ddocs[viewMsg['ddoc_name']].append(viewMsg)
return ddocs
def parseViewMsg(view):
viewMsg = {'ddoc_name': 'ddoc1',
'view_name': 'view1',
'map_func': 'function (doc) { emit(null, doc);}',
'red_func': None,
'dev_view': True,
'is_spatial': False,
'bucket_name': 'default'
}
if 'ddoc' in view:
viewMsg['ddoc_name'] = view['ddoc']
if 'view' in view:
viewMsg['view_name'] = view['view']
if 'map' in view:
viewMsg['map_func'] = view['map']
if 'reduce' in view:
viewMsg['red_func'] = view['reduce']
if 'dev' in view:
if view['dev'] == "True":
viewMsg['dev_view'] = True
elif view['dev'] == "False":
viewMsg['dev_view'] = False
if 'spatial' in view:
if view['spatial'] == "True":
viewMsg['is_spatial'] = True
elif view['spatial'] == "False":
viewMsg['is_spatial'] = False
if 'bucket' in view:
viewMsg['bucket_name'] = view['bucket']
return viewMsg
@celery.task
def perform_admin_tasks(adminMsg, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
app.workload_manager.updateClusterStatus()
clusterStatus = CacheHelper.clusterstatus(cluster_id)
if clusterStatus is None:
logger.error("Unable to fetch clusterStatus from cache: ")
return
rest = clusterStatus.node_rest()
servers = adminMsg["rebalance_in"]
zone_name = adminMsg["group"]
if adminMsg["services"]:
add_nodes(rest, servers, cluster_id, zone_name, adminMsg["services"])
else:
add_nodes(rest, servers, cluster_id, zone_name)
allNodes = []
for node in rest.node_statuses():
allNodes.append(node.id)
servers = adminMsg["rebalance_out"]
toBeEjectedNodes = remove_nodes(rest, servers, adminMsg["involve_orchestrator"], cluster_id)
servers = adminMsg["failover"]
auto_failover_servers = adminMsg["auto_failover"]
only_failover = adminMsg["only_failover"]
add_back_servers = adminMsg["add_back"]
failoverNodes = failover_nodes(rest, servers, only_failover, adminMsg["involve_orchestrator"], cluster_id)
autoFailoverNodes = auto_failover_nodes(rest, auto_failover_servers, only_failover, adminMsg["involve_orchestrator"], cluster_id)
app.workload_manager.updateClusterStatus()
clusterStatus = CacheHelper.clusterstatus(cluster_id)
rest = clusterStatus.node_rest()
addBackNodes = add_back_nodes(rest, add_back_servers, autoFailoverNodes+failoverNodes)
toBeEjectedNodes.extend(failoverNodes)
toBeEjectedNodes.extend(autoFailoverNodes)
for node in addBackNodes:
toBeEjectedNodes.remove(node)
servers = adminMsg["soft_restart"]
restart(servers, cluster_id=cluster_id)
servers = adminMsg["hard_restart"]
restart(servers, type='hard', cluster_id=cluster_id)
if adminMsg["soft_restart"] == '' and adminMsg["hard_restart"] == '':
if not only_failover and (len(allNodes) > 0 or len(toBeEjectedNodes) > 0):
logger.error("Rebalance")
logger.error(allNodes)
logger.error(toBeEjectedNodes)
rest.rebalance(otpNodes=allNodes, ejectedNodes=toBeEjectedNodes)
logger.error(toBeEjectedNodes)
restartNodes = ""
for node in toBeEjectedNodes:
if node in (failoverNodes + autoFailoverNodes):
if '@' in node:
node = node.split('@')[1]
restartNodes = "%s %s" % (node, restartNodes)
if len(restartNodes):
restart(restartNodes)
def monitorRebalance():
rest = create_rest()
rebalance_success = rest.monitorRebalance()
return rebalance_success
@celery.task
def perform_xdcr_tasks(xdcrMsg):
logger.error(xdcrMsg)
src_master = create_server_obj()
remoteHost = None
remotePort = 8091
dest_cluster_name = remoteRef = str(xdcrMsg['dest_cluster_name'])
if "COUCHBASE_IP" in cfg.REMOTE_SITES[remoteRef]:
remote_ip = cfg.REMOTE_SITES[remoteRef]["COUCHBASE_IP"]
if "COUCHBASE_PORT" in cfg.REMOTE_SITES[remoteRef]:
remote_port = cfg.REMOTE_SITES[remoteRef]["COUCHBASE_PORT"]
else:
logger.error("Cannot find remote site %s in testcfg.REMOTE_SITES: " % (remoteRef))
return
dest_master = create_server_obj(server_ip=remote_ip, port=remote_port,
username=xdcrMsg['dest_cluster_rest_username'],
password=xdcrMsg['dest_cluster_rest_pwd'])
xdcr_link_cluster(src_master, dest_master, dest_cluster_name)
buckets = xdcrMsg.get("buckets")
replication_filters = xdcrMsg.get("filter_expression")
for bucket in buckets:
xdcr_params={}
if replication_filters and bucket in list(replication_filters.keys()):
xdcr_params["filterExpression"] = replication_filters[bucket]
xdcr_start_replication(src_master, dest_cluster_name, bucket, xdcr_params)
def xdcr_link_cluster(src_master, dest_master, dest_cluster_name):
rest_conn_src = RestConnection(src_master)
rest_conn_src.add_remote_cluster(dest_master.ip, dest_master.port,
dest_master.rest_username,
dest_master.rest_password, dest_cluster_name)
def xdcr_start_replication(src_master, dest_cluster_name, bucket_name, xdcr_params):
rest_conn_src = RestConnection(src_master)
for bucket in rest_conn_src.get_buckets():
if bucket.name == bucket_name:
rep_id = rest_conn_src.start_replication("continuous",
bucket.name,
dest_cluster_name,
xdcr_params=xdcr_params)
logger.error("rep_id: %s" %rep_id)
def add_nodes(rest, servers='', cluster_id=cfg.CB_CLUSTER_TAG+"_status", zone_name = '', services=None):
if zone_name != '':
if rest.is_zone_exist(zone_name) == False:
rest.add_zone(zone_name)
if servers.find('.') != -1 or servers == '':
servers = servers.split()
else:
clusterStatus = CacheHelper.clusterstatus(cluster_id)
count = int(servers)
if (len(clusterStatus.all_available_hosts) - len(clusterStatus.nodes)) >= int(count):
servers = list(set(clusterStatus.all_available_hosts) - set(clusterStatus.get_all_hosts()))
else:
logger.error("Add nodes request invalid. # of nodes outside cluster is not enough")
return
servers = servers[:count]
for server in servers:
logger.error("Adding node %s" % server)
ip, port = parse_server_arg(server)
if services:
rest.add_node(cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD, ip, port, zone_name, services)
else:
rest.add_node(cfg.COUCHBASE_USER, cfg.COUCHBASE_PWD, ip, port, zone_name)
def pick_nodesToRemove(servers='', involve_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
if servers.find('.') != -1 or servers == '':
servers = servers.split()
else:
clusterStatus = CacheHelper.clusterstatus(cluster_id)
count = int(servers)
temp_count = count
servers = []
if involve_orchestrator:
servers.append("%s:%s" % (clusterStatus.orchestrator.ip, clusterStatus.orchestrator.port))
temp_count = temp_count -1
if len(clusterStatus.nodes) > count:
non_orchestrator_servers = list(set(clusterStatus.get_all_hosts()) -
{"%s:%s" % (clusterStatus.orchestrator.ip, clusterStatus.orchestrator.port)})
servers.extend(non_orchestrator_servers[:temp_count])
else:
logger.error("Remove nodes request invalid. # of nodes in cluster is not enough")
return []
return servers
def remove_nodes(rest, servers='', remove_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
toBeEjectedNodes = []
servers = pick_nodesToRemove(servers, remove_orchestrator, cluster_id)
for server in servers:
ip, port = parse_server_arg(server)
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % ip and\
"%s" % node.port == "%s" % port:
logger.error("Removing node %s" % node.id)
toBeEjectedNodes.append(node.id)
return toBeEjectedNodes
def failover_nodes(rest, servers='', only_failover=False, failover_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
toBeEjectedNodes = []
servers = pick_nodesToRemove(servers, failover_orchestrator, cluster_id)
for server in servers:
ip, port = parse_server_arg(server)
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % ip and\
"%s" % node.port == "%s" % port:
logger.error("Failing node %s" % node.id)
rest.fail_over(node.id)
if not only_failover:
toBeEjectedNodes.append(node.id)
return toBeEjectedNodes
def auto_failover_nodes(rest, servers='', only_failover=False, failover_orchestrator=False, cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
toBeEjectedNodes = []
if servers != '':
rest.reset_autofailover()
rest.update_autofailover_settings(True, 30)
servers = pick_nodesToRemove(servers, failover_orchestrator, cluster_id)
for server in servers:
ip, port = parse_server_arg(server)
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % ip and\
"%s" % node.port == "%s" % port:
logger.error("Failing node %s" % node.id)
failover_by_killing_mc(node.ip)
if not only_failover:
toBeEjectedNodes.append(node.id)
rest.reset_autofailover()
rest.update_autofailover_settings(False, 30)
return toBeEjectedNodes
def failover_by_killing_mc(ip):
node_ssh, node = create_ssh_conn(ip)
if cfg.COUCHBASE_OS == "windows":
cmd = "taskkill /F /T /IM memcached* && taskkill /F /T /IM erl*"
else:
cmd = "killall -9 memcached && killall -9 beam.smp"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
time.sleep(70)
def add_back_nodes(rest, servers='', nodes=[]):
addBackNodes = []
if servers.find('.') != -1 or servers == '':
servers = servers.split()
for server in servers:
for node in rest.node_statuses():
if "%s" % node.ip == "%s" % server:
restart(servers=server, type='soft')
time.sleep(60)
logger.error("Add Back node %s" % node.id)
rest.add_back_node(node.id)
addBackNodes.append(node.id)
else:
count = int(servers)
servers = nodes[:count]
for server in servers:
restart(servers=server.split('@')[1], type='soft')
if cfg.COUCHBASE_OS == "windows":
time.sleep(120)
else:
time.sleep(60)
logger.error("Add Back node %s" % server)
rest.add_back_node(server)
addBackNodes.append(server)
return addBackNodes
def parse_server_arg(server):
ip = server
port = 8091
addr = server.split(":")
if len(addr) > 1:
ip = addr[0]
port = addr[1]
return ip, port
def _dict_to_obj(dict_):
return type('OBJ', (object,), dict_)
def restart(servers='', type='soft', cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
if servers.find('.') != -1 or servers == '':
servers = servers.split()
else:
clusterStatus = CacheHelper.clusterstatus(cluster_id)
count = int(servers)
if len(clusterStatus.nodes) >= int(count):
servers = clusterStatus.get_all_hosts()
else:
logger.error("Restart nodes request invalid. # of nodes in cluster is not enough")
return
servers = servers[:count]
for server in servers:
ip, port = parse_server_arg(server)
node_ssh, node = create_ssh_conn(ip)
if type is not 'soft':
logger.error('Hard Restart')
if cfg.COUCHBASE_OS == "windows":
cmd = "shutdown -r -t 0"
else:
cmd = "reboot"
else:
logger.error('Soft Restart')
if cfg.COUCHBASE_OS == "windows":
cmd = "net stop couchbaseserver"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
cmd = "net start couchbaseserver"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
return
else:
cmd = "/etc/init.d/couchbase-server restart"
logger.error(cmd)
result = node_ssh.execute_command(cmd, node)
logger.error(result)
def create_server_obj(server_ip=cfg.COUCHBASE_IP, port=cfg.COUCHBASE_PORT,
username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD):
serverInfo = { "ip" : server_ip,
"port" : port,
"rest_username" : username,
"rest_password" : password
}
node = _dict_to_obj(serverInfo)
return node
def http_ping(ip, port, timeout=5):
url = "http://%s:%s/pools" % (ip, port)
try:
data = url_request(url)
pools_info = json.loads(data.read())
if 'pools' in pools_info:
pools = pools_info["pools"]
if len(pools) > 0:
return True
except Exception as ex:
pass
def create_rest(server_ip=cfg.COUCHBASE_IP, port=cfg.COUCHBASE_PORT,
username=cfg.COUCHBASE_USER, password=cfg.COUCHBASE_PWD):
return RestConnection(create_server_obj(server_ip, port, username, password))
def create_ssh_conn(server_ip='', port=22, username=cfg.COUCHBASE_SSH_USER,
password=cfg.COUCHBASE_SSH_PASSWORD, os=cfg.COUCHBASE_OS):
if isinstance(server_ip, str):
server_ip = str(server_ip)
serverInfo = {"ip" : server_ip,
"port" : port,
"ssh_username" : username,
"ssh_password" : password,
"ssh_key": '',
"type": os
}
node = _dict_to_obj(serverInfo)
shell = RemoteMachineShellConnection(node)
return shell, node
def perform_teardown_tasks(teardownMsg, rest = None):
rest = rest or create_rest()
if "ddocs" in teardownMsg:
teardown_ddocs(teardownMsg["ddocs"], rest)
if "buckets" in teardownMsg:
teardown_buckets(teardownMsg["buckets"], rest)
if "xdcr_dest_clusters" in teardownMsg:
teardown_xdcr(teardownMsg["xdcr_dest_clusters"])
def teardown_ddocs(ddocList, rest = None):
rest = rest or create_rest()
for ddoc in ddocList:
try:
bucket_name, ddoc = [_f for _f in ddoc.split('/') if _f]
bucket = rest.get_bucket(bucket_name)
rest._delete_design_doc(bucket, ddoc)
except ValueError:
logger.error("Invalid syntax: %s " % (args))
except Exception:
pass
def teardown_buckets(bucketList, rest = None):
rest = rest or create_rest()
for bucket in bucketList:
rest.delete_bucket(bucket)
def teardown_xdcr(xdcrClusters, rest = None):
rest = rest or create_rest()
try:
rest.remove_all_replications()
for cluster in xdcrClusters:
rest.remove_remote_cluster(cluster)
except Exception:
pass
def perform_cli_task(ssh_command, rest = None):
command = ""
hosts = ssh_command.get('hosts') or ['127.0.0.1']
username = ssh_command.get('username') or cfg.COUCHBASE_SSH_USER
password = ssh_command.get('password') or cfg.COUCHBASE_SSH_PASSWORD
command = ssh_command.get('command')
if command is not None:
for host in hosts:
node_ssh, node = create_ssh_conn(host,
username = username,
password = password)
logger.error(command)
result = node_ssh.execute_command(command, node)
logger.error(result)
| true | true |
1c3235090113f2ffd6c9298c078e8540cb1f7cfb | 12,039 | py | Python | retinaface/inference.py | markyong97/retinafacetest | b72317d682c9e17492f5418073073e63c4ce2ce2 | [
"MIT"
] | null | null | null | retinaface/inference.py | markyong97/retinafacetest | b72317d682c9e17492f5418073073e63c4ce2ce2 | [
"MIT"
] | null | null | null | retinaface/inference.py | markyong97/retinafacetest | b72317d682c9e17492f5418073073e63c4ce2ce2 | [
"MIT"
] | null | null | null | import argparse
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import albumentations as albu
import cv2
import numpy as np
import torch
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
import yaml
from albumentations.core.serialization import from_dict
from iglovikov_helper_functions.config_parsing.utils import object_from_dict
from iglovikov_helper_functions.dl.pytorch.utils import state_dict_from_disk
from iglovikov_helper_functions.utils.image_utils import pad_to_size, unpad_from_size
from PIL import Image
from torch import nn
from torch.nn import functional as F
from torch.utils.data import Dataset
from torch.utils.data.distributed import DistributedSampler
from torchvision.ops import nms
from tqdm import tqdm
from retinaface.box_utils import decode, decode_landm
from retinaface.utils import tensor_from_rgb_image, vis_annotations
def get_args() -> Any:
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg("-i", "--input_path", type=Path, help="Path with images.", required=True)
arg("-c", "--config_path", type=Path, help="Path to config.", required=True)
arg("-o", "--output_path", type=Path, help="Path to save jsons.", required=True)
arg("-v", "--visualize", action="store_true", help="Visualize predictions")
arg("-m", "--max_size", type=int, help="Resize the largest side to this number", default=960)
arg("-b", "--batch_size", type=int, help="batch_size", default=1)
arg("-j", "--num_workers", type=int, help="num_workers", default=12)
arg("--confidence_threshold", default=0.7, type=float, help="confidence_threshold")
arg("--nms_threshold", default=0.4, type=float, help="nms_threshold")
arg("-w", "--weight_path", type=str, help="Path to weights.", required=True)
arg("--keep_top_k", default=750, type=int, help="keep_top_k")
arg("--world_size", default=-1, type=int, help="number of nodes for distributed training")
arg("--local_rank", default=-1, type=int, help="node rank for distributed training")
arg("--fp16", action="store_true", help="Use fp6")
arg("--folder_in_name", action="store_true", help="Add folder to the saved labels.")
return parser.parse_args()
class InferenceDataset(Dataset):
def __init__(
self, file_paths: List[Path], max_size: int, transform: albu.Compose
) -> None: # pylint: disable=W0231
self.file_paths = file_paths
self.transform = transform
self.max_size = max_size
self.resize = albu.LongestMaxSize(max_size=max_size, p=1)
def __len__(self) -> int:
return len(self.file_paths)
def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
image_path = self.file_paths[idx]
image = np.array(Image.open(image_path))
image_height, image_width = image.shape[:2]
image = self.resize(image=image)["image"]
paded = pad_to_size(target_size=(self.max_size, self.max_size), image=image)
image = paded["image"]
pads = paded["pads"]
image = self.transform(image=image)["image"]
return {
"torched_image": tensor_from_rgb_image(image),
"image_path": str(image_path),
"pads": np.array(pads),
"image_height": image_height,
"image_width": image_width,
}
def unnormalize(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for c in range(image.shape[-1]):
image[:, :, c] *= std[c] # type: ignore
image[:, :, c] += mean[c] # type: ignore
image[:, :, c] *= 255 # type: ignore
return image
def process_predictions(
prediction: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
original_shapes: List[Tuple[int, int]],
input_shape: Tuple[int, int, int, int],
pads: Tuple[int, int, int, int],
confidence_threshold: float,
nms_threshold: float,
prior_box: torch.Tensor,
variance: Tuple[float, float],
keep_top_k: bool,
) -> List[List[Dict[str, Union[float, List[float]]]]]:
loc, conf, land = prediction
conf = F.softmax(conf, dim=-1)
result: List[List[Dict[str, Union[List[float], float]]]] = []
batch_size, _, image_height, image_width = input_shape
scale1 = torch.from_numpy(np.tile([image_width, image_height], 5)).to(loc.device)
scale = torch.from_numpy(np.tile([image_width, image_height], 2)).to(loc.device)
for batch_id in range(batch_size):
annotations: List[Dict[str, Union[List, float]]] = []
boxes = decode(loc.data[batch_id], prior_box.to(loc.device), variance)
boxes *= scale
scores = conf[batch_id][:, 1]
landmarks = decode_landm(land.data[batch_id], prior_box.to(land.device), variance)
landmarks *= scale1
# ignore low scores
valid_index = torch.where(scores > confidence_threshold)[0]
boxes = boxes[valid_index]
landmarks = landmarks[valid_index]
scores = scores[valid_index]
order = scores.argsort(descending=True)
boxes = boxes[order]
landmarks = landmarks[order]
scores = scores[order]
# do NMS
keep = nms(boxes, scores, nms_threshold)
boxes = boxes[keep, :].int()
if boxes.shape[0] == 0:
result += [[{"bbox": [], "score": -1, "landmarks": []}]]
continue
landmarks = landmarks[keep]
scores = scores[keep].cpu().numpy().astype(np.float64)[:keep_top_k]
boxes = boxes.cpu().numpy()[:keep_top_k, :]
landmarks = landmarks.cpu().numpy()[:keep_top_k, :]
landmarks = landmarks.reshape([-1, 2])
if pads is None:
pads_numpy = np.array([0, 0, 0, 0])
else:
pads_numpy = pads[batch_id]
unpadded = unpad_from_size(pads_numpy, bboxes=boxes, keypoints=landmarks)
resize_coeff = max(original_shapes[batch_id]) / max(image_height, image_width)
boxes = (unpadded["bboxes"] * resize_coeff).astype(int)
landmarks = (unpadded["keypoints"].reshape(-1, 10) * resize_coeff).astype(int)
for crop_id, bbox in enumerate(boxes):
annotations += [
{
"bbox": bbox.tolist(),
"score": float(scores[crop_id]),
"landmarks": landmarks[crop_id].reshape(-1, 2).tolist(),
}
]
result += [annotations]
return result
def main() -> None:
args = get_args()
torch.distributed.init_process_group(backend="nccl")
with args.config_path.open() as f:
hparams = yaml.load(f, Loader=yaml.SafeLoader)
hparams.update(
{
"json_path": args.output_path,
"visualize": args.visualize,
"confidence_threshold": args.confidence_threshold,
"nms_threshold": args.nms_threshold,
"keep_top_k": args.keep_top_k,
"local_rank": args.local_rank,
"prior_box": object_from_dict(hparams["prior_box"], image_size=[args.max_size, args.max_size]),
"fp16": args.fp16,
"folder_in_name": args.folder_in_name,
}
)
if args.visualize:
output_vis_path = args.output_path / "viz"
output_vis_path.mkdir(parents=True, exist_ok=True)
hparams["output_vis_path"] = output_vis_path
output_label_path = args.output_path / "labels"
output_label_path.mkdir(parents=True, exist_ok=True)
hparams["output_label_path"] = output_label_path
device = torch.device("cuda", args.local_rank)
model = object_from_dict(hparams["model"])
model = model.to(device)
if args.fp16:
model = model.half()
corrections: Dict[str, str] = {"model.": ""}
state_dict = state_dict_from_disk(file_path=args.weight_path, rename_in_layers=corrections)
model.load_state_dict(state_dict)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank
)
file_paths = list(args.input_path.rglob("*.jpg"))
dataset = InferenceDataset(file_paths, max_size=args.max_size, transform=from_dict(hparams["test_aug"]))
sampler: DistributedSampler = DistributedSampler(dataset, shuffle=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
shuffle=False,
drop_last=False,
sampler=sampler,
)
predict(dataloader, model, hparams, device)
def predict(dataloader: torch.utils.data.DataLoader, model: nn.Module, hparams: dict, device: torch.device) -> None:
model.eval()
if hparams["local_rank"] == 0:
loader = tqdm(dataloader)
else:
loader = dataloader
with torch.no_grad():
for batch in loader:
torched_images = batch["torched_image"] # images that are rescaled and padded
if hparams["fp16"]:
torched_images = torched_images.half()
pads = batch["pads"]
image_paths = batch["image_path"]
image_heights = batch["image_height"]
image_widths = batch["image_width"]
batch_size = torched_images.shape[0]
image_heights = image_heights.cpu().numpy()
image_widths = image_widths.cpu().numpy()
original_shapes = list(zip(image_heights, image_widths))
prediction = model(torched_images.to(device))
output_annotations = process_predictions(
prediction=prediction,
original_shapes=original_shapes,
input_shape=torched_images.shape,
pads=pads.cpu().numpy(),
confidence_threshold=hparams["confidence_threshold"],
nms_threshold=hparams["nms_threshold"],
prior_box=hparams["prior_box"],
variance=hparams["test_parameters"]["variance"],
keep_top_k=hparams["keep_top_k"],
)
for batch_id in range(batch_size):
annotations = output_annotations[batch_id]
if not annotations[0]["bbox"]:
continue
folder_name = Path(image_paths[batch_id]).parent.name
file_name = Path(image_paths[batch_id]).name
file_id = Path(image_paths[batch_id]).stem
predictions = {
"file_name": file_name,
"annotations": annotations,
"file_path": str(Path(folder_name) / file_name),
}
(hparams["output_label_path"] / folder_name).mkdir(exist_ok=True, parents=True)
result_path = hparams["output_label_path"] / folder_name / f"{file_id}.json"
with result_path.open("w") as f:
json.dump(predictions, f, indent=2)
if hparams["visualize"]:
normalized_image = np.transpose(torched_images[batch_id].cpu().numpy(), (1, 2, 0))
image = unnormalize(normalized_image)
unpadded = unpad_from_size(pads[batch_id].cpu().numpy(), image)
original_image_height = image_heights[batch_id].item()
original_image_width = image_widths[batch_id].item()
image = cv2.resize(
unpadded["image"].astype(np.uint8), (original_image_width, original_image_height)
)
image = vis_annotations(image, annotations=annotations) # type: ignore
(hparams["output_vis_path"] / folder_name).mkdir(exist_ok=True, parents=True)
result_path = hparams["output_vis_path"] / folder_name / f"{file_id}.jpg"
cv2.imwrite(str(result_path), cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if __name__ == "__main__":
main()
| 35.937313 | 116 | 0.627212 | import argparse
import json
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import albumentations as albu
import cv2
import numpy as np
import torch
import torch.nn.parallel
import torch.utils.data
import torch.utils.data.distributed
import yaml
from albumentations.core.serialization import from_dict
from iglovikov_helper_functions.config_parsing.utils import object_from_dict
from iglovikov_helper_functions.dl.pytorch.utils import state_dict_from_disk
from iglovikov_helper_functions.utils.image_utils import pad_to_size, unpad_from_size
from PIL import Image
from torch import nn
from torch.nn import functional as F
from torch.utils.data import Dataset
from torch.utils.data.distributed import DistributedSampler
from torchvision.ops import nms
from tqdm import tqdm
from retinaface.box_utils import decode, decode_landm
from retinaface.utils import tensor_from_rgb_image, vis_annotations
def get_args() -> Any:
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg("-i", "--input_path", type=Path, help="Path with images.", required=True)
arg("-c", "--config_path", type=Path, help="Path to config.", required=True)
arg("-o", "--output_path", type=Path, help="Path to save jsons.", required=True)
arg("-v", "--visualize", action="store_true", help="Visualize predictions")
arg("-m", "--max_size", type=int, help="Resize the largest side to this number", default=960)
arg("-b", "--batch_size", type=int, help="batch_size", default=1)
arg("-j", "--num_workers", type=int, help="num_workers", default=12)
arg("--confidence_threshold", default=0.7, type=float, help="confidence_threshold")
arg("--nms_threshold", default=0.4, type=float, help="nms_threshold")
arg("-w", "--weight_path", type=str, help="Path to weights.", required=True)
arg("--keep_top_k", default=750, type=int, help="keep_top_k")
arg("--world_size", default=-1, type=int, help="number of nodes for distributed training")
arg("--local_rank", default=-1, type=int, help="node rank for distributed training")
arg("--fp16", action="store_true", help="Use fp6")
arg("--folder_in_name", action="store_true", help="Add folder to the saved labels.")
return parser.parse_args()
class InferenceDataset(Dataset):
def __init__(
self, file_paths: List[Path], max_size: int, transform: albu.Compose
) -> None:
self.file_paths = file_paths
self.transform = transform
self.max_size = max_size
self.resize = albu.LongestMaxSize(max_size=max_size, p=1)
def __len__(self) -> int:
return len(self.file_paths)
def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
image_path = self.file_paths[idx]
image = np.array(Image.open(image_path))
image_height, image_width = image.shape[:2]
image = self.resize(image=image)["image"]
paded = pad_to_size(target_size=(self.max_size, self.max_size), image=image)
image = paded["image"]
pads = paded["pads"]
image = self.transform(image=image)["image"]
return {
"torched_image": tensor_from_rgb_image(image),
"image_path": str(image_path),
"pads": np.array(pads),
"image_height": image_height,
"image_width": image_width,
}
def unnormalize(image: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
for c in range(image.shape[-1]):
image[:, :, c] *= std[c]
image[:, :, c] += mean[c]
image[:, :, c] *= 255
return image
def process_predictions(
prediction: Tuple[torch.Tensor, torch.Tensor, torch.Tensor],
original_shapes: List[Tuple[int, int]],
input_shape: Tuple[int, int, int, int],
pads: Tuple[int, int, int, int],
confidence_threshold: float,
nms_threshold: float,
prior_box: torch.Tensor,
variance: Tuple[float, float],
keep_top_k: bool,
) -> List[List[Dict[str, Union[float, List[float]]]]]:
loc, conf, land = prediction
conf = F.softmax(conf, dim=-1)
result: List[List[Dict[str, Union[List[float], float]]]] = []
batch_size, _, image_height, image_width = input_shape
scale1 = torch.from_numpy(np.tile([image_width, image_height], 5)).to(loc.device)
scale = torch.from_numpy(np.tile([image_width, image_height], 2)).to(loc.device)
for batch_id in range(batch_size):
annotations: List[Dict[str, Union[List, float]]] = []
boxes = decode(loc.data[batch_id], prior_box.to(loc.device), variance)
boxes *= scale
scores = conf[batch_id][:, 1]
landmarks = decode_landm(land.data[batch_id], prior_box.to(land.device), variance)
landmarks *= scale1
valid_index = torch.where(scores > confidence_threshold)[0]
boxes = boxes[valid_index]
landmarks = landmarks[valid_index]
scores = scores[valid_index]
order = scores.argsort(descending=True)
boxes = boxes[order]
landmarks = landmarks[order]
scores = scores[order]
keep = nms(boxes, scores, nms_threshold)
boxes = boxes[keep, :].int()
if boxes.shape[0] == 0:
result += [[{"bbox": [], "score": -1, "landmarks": []}]]
continue
landmarks = landmarks[keep]
scores = scores[keep].cpu().numpy().astype(np.float64)[:keep_top_k]
boxes = boxes.cpu().numpy()[:keep_top_k, :]
landmarks = landmarks.cpu().numpy()[:keep_top_k, :]
landmarks = landmarks.reshape([-1, 2])
if pads is None:
pads_numpy = np.array([0, 0, 0, 0])
else:
pads_numpy = pads[batch_id]
unpadded = unpad_from_size(pads_numpy, bboxes=boxes, keypoints=landmarks)
resize_coeff = max(original_shapes[batch_id]) / max(image_height, image_width)
boxes = (unpadded["bboxes"] * resize_coeff).astype(int)
landmarks = (unpadded["keypoints"].reshape(-1, 10) * resize_coeff).astype(int)
for crop_id, bbox in enumerate(boxes):
annotations += [
{
"bbox": bbox.tolist(),
"score": float(scores[crop_id]),
"landmarks": landmarks[crop_id].reshape(-1, 2).tolist(),
}
]
result += [annotations]
return result
def main() -> None:
args = get_args()
torch.distributed.init_process_group(backend="nccl")
with args.config_path.open() as f:
hparams = yaml.load(f, Loader=yaml.SafeLoader)
hparams.update(
{
"json_path": args.output_path,
"visualize": args.visualize,
"confidence_threshold": args.confidence_threshold,
"nms_threshold": args.nms_threshold,
"keep_top_k": args.keep_top_k,
"local_rank": args.local_rank,
"prior_box": object_from_dict(hparams["prior_box"], image_size=[args.max_size, args.max_size]),
"fp16": args.fp16,
"folder_in_name": args.folder_in_name,
}
)
if args.visualize:
output_vis_path = args.output_path / "viz"
output_vis_path.mkdir(parents=True, exist_ok=True)
hparams["output_vis_path"] = output_vis_path
output_label_path = args.output_path / "labels"
output_label_path.mkdir(parents=True, exist_ok=True)
hparams["output_label_path"] = output_label_path
device = torch.device("cuda", args.local_rank)
model = object_from_dict(hparams["model"])
model = model.to(device)
if args.fp16:
model = model.half()
corrections: Dict[str, str] = {"model.": ""}
state_dict = state_dict_from_disk(file_path=args.weight_path, rename_in_layers=corrections)
model.load_state_dict(state_dict)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank
)
file_paths = list(args.input_path.rglob("*.jpg"))
dataset = InferenceDataset(file_paths, max_size=args.max_size, transform=from_dict(hparams["test_aug"]))
sampler: DistributedSampler = DistributedSampler(dataset, shuffle=False)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=True,
shuffle=False,
drop_last=False,
sampler=sampler,
)
predict(dataloader, model, hparams, device)
def predict(dataloader: torch.utils.data.DataLoader, model: nn.Module, hparams: dict, device: torch.device) -> None:
model.eval()
if hparams["local_rank"] == 0:
loader = tqdm(dataloader)
else:
loader = dataloader
with torch.no_grad():
for batch in loader:
torched_images = batch["torched_image"]
if hparams["fp16"]:
torched_images = torched_images.half()
pads = batch["pads"]
image_paths = batch["image_path"]
image_heights = batch["image_height"]
image_widths = batch["image_width"]
batch_size = torched_images.shape[0]
image_heights = image_heights.cpu().numpy()
image_widths = image_widths.cpu().numpy()
original_shapes = list(zip(image_heights, image_widths))
prediction = model(torched_images.to(device))
output_annotations = process_predictions(
prediction=prediction,
original_shapes=original_shapes,
input_shape=torched_images.shape,
pads=pads.cpu().numpy(),
confidence_threshold=hparams["confidence_threshold"],
nms_threshold=hparams["nms_threshold"],
prior_box=hparams["prior_box"],
variance=hparams["test_parameters"]["variance"],
keep_top_k=hparams["keep_top_k"],
)
for batch_id in range(batch_size):
annotations = output_annotations[batch_id]
if not annotations[0]["bbox"]:
continue
folder_name = Path(image_paths[batch_id]).parent.name
file_name = Path(image_paths[batch_id]).name
file_id = Path(image_paths[batch_id]).stem
predictions = {
"file_name": file_name,
"annotations": annotations,
"file_path": str(Path(folder_name) / file_name),
}
(hparams["output_label_path"] / folder_name).mkdir(exist_ok=True, parents=True)
result_path = hparams["output_label_path"] / folder_name / f"{file_id}.json"
with result_path.open("w") as f:
json.dump(predictions, f, indent=2)
if hparams["visualize"]:
normalized_image = np.transpose(torched_images[batch_id].cpu().numpy(), (1, 2, 0))
image = unnormalize(normalized_image)
unpadded = unpad_from_size(pads[batch_id].cpu().numpy(), image)
original_image_height = image_heights[batch_id].item()
original_image_width = image_widths[batch_id].item()
image = cv2.resize(
unpadded["image"].astype(np.uint8), (original_image_width, original_image_height)
)
image = vis_annotations(image, annotations=annotations)
(hparams["output_vis_path"] / folder_name).mkdir(exist_ok=True, parents=True)
result_path = hparams["output_vis_path"] / folder_name / f"{file_id}.jpg"
cv2.imwrite(str(result_path), cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if __name__ == "__main__":
main()
| true | true |
1c32366e0506e618798d03ee9041626145d5f87a | 259,512 | py | Python | xarray/tests/test_dataset.py | pentschev/xarray | de6144c0e8c8fc316cfc412a2057af4d1a04edfd | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xarray/tests/test_dataset.py | pentschev/xarray | de6144c0e8c8fc316cfc412a2057af4d1a04edfd | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xarray/tests/test_dataset.py | pentschev/xarray | de6144c0e8c8fc316cfc412a2057af4d1a04edfd | [
"CC-BY-4.0",
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import pickle
import sys
import warnings
from copy import copy, deepcopy
from io import StringIO
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
from pandas.core.computation.ops import UndefinedVariableError
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.tseries.frequencies import to_offset
import xarray as xr
from xarray import (
DataArray,
Dataset,
IndexVariable,
MergeError,
Variable,
align,
backends,
broadcast,
open_dataset,
set_options,
)
from xarray.coding.cftimeindex import CFTimeIndex
from xarray.core import dtypes, indexing, utils
from xarray.core.common import duck_array_ops, full_like
from xarray.core.indexes import Index
from xarray.core.pycompat import integer_types
from xarray.core.utils import is_scalar
from . import (
InaccessibleArray,
UnexpectedDataAccess,
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
has_cftime,
has_dask,
requires_bottleneck,
requires_cftime,
requires_dask,
requires_numbagg,
requires_numexpr,
requires_scipy,
requires_sparse,
source_ndarray,
)
try:
import dask.array as da
except ImportError:
pass
pytestmark = [
pytest.mark.filterwarnings("error:Mean of empty slice"),
pytest.mark.filterwarnings("error:All-NaN (slice|axis) encountered"),
]
def create_test_data(seed=None, add_attrs=True):
rs = np.random.RandomState(seed)
_vars = {
"var1": ["dim1", "dim2"],
"var2": ["dim1", "dim2"],
"var3": ["dim3", "dim1"],
}
_dims = {"dim1": 8, "dim2": 9, "dim3": 10}
obj = Dataset()
obj["dim2"] = ("dim2", 0.5 * np.arange(_dims["dim2"]))
obj["dim3"] = ("dim3", list("abcdefghij"))
obj["time"] = ("time", pd.date_range("2000-01-01", periods=20))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data)
if add_attrs:
obj[v].attrs = {"foo": "variable"}
obj.coords["numbers"] = (
"dim3",
np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3], dtype="int64"),
)
obj.encoding = {"foo": "bar"}
assert all(obj.data.flags.writeable for obj in obj.variables.values())
return obj
def create_append_test_data(seed=None):
rs = np.random.RandomState(seed)
lat = [2, 1, 0]
lon = [0, 1, 2]
nt1 = 3
nt2 = 2
time1 = pd.date_range("2000-01-01", periods=nt1)
time2 = pd.date_range("2000-02-01", periods=nt2)
string_var = np.array(["ae", "bc", "df"], dtype=object)
string_var_to_append = np.array(["asdf", "asdfg"], dtype=object)
unicode_var = ["áó", "áó", "áó"]
datetime_var = np.array(
["2019-01-01", "2019-01-02", "2019-01-03"], dtype="datetime64[s]"
)
datetime_var_to_append = np.array(
["2019-01-04", "2019-01-05"], dtype="datetime64[s]"
)
bool_var = np.array([True, False, True], dtype=bool)
bool_var_to_append = np.array([False, True], dtype=bool)
ds = xr.Dataset(
data_vars={
"da": xr.DataArray(
rs.rand(3, 3, nt1),
coords=[lat, lon, time1],
dims=["lat", "lon", "time"],
),
"string_var": xr.DataArray(string_var, coords=[time1], dims=["time"]),
"unicode_var": xr.DataArray(
unicode_var, coords=[time1], dims=["time"]
).astype(np.unicode_),
"datetime_var": xr.DataArray(datetime_var, coords=[time1], dims=["time"]),
"bool_var": xr.DataArray(bool_var, coords=[time1], dims=["time"]),
}
)
ds_to_append = xr.Dataset(
data_vars={
"da": xr.DataArray(
rs.rand(3, 3, nt2),
coords=[lat, lon, time2],
dims=["lat", "lon", "time"],
),
"string_var": xr.DataArray(
string_var_to_append, coords=[time2], dims=["time"]
),
"unicode_var": xr.DataArray(
unicode_var[:nt2], coords=[time2], dims=["time"]
).astype(np.unicode_),
"datetime_var": xr.DataArray(
datetime_var_to_append, coords=[time2], dims=["time"]
),
"bool_var": xr.DataArray(bool_var_to_append, coords=[time2], dims=["time"]),
}
)
ds_with_new_var = xr.Dataset(
data_vars={
"new_var": xr.DataArray(
rs.rand(3, 3, nt1 + nt2),
coords=[lat, lon, time1.append(time2)],
dims=["lat", "lon", "time"],
)
}
)
assert all(objp.data.flags.writeable for objp in ds.variables.values())
assert all(objp.data.flags.writeable for objp in ds_to_append.variables.values())
return ds, ds_to_append, ds_with_new_var
def create_test_multiindex():
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
return Dataset({}, {"x": mindex})
def create_test_stacked_array():
x = DataArray(pd.Index(np.r_[:10], name="x"))
y = DataArray(pd.Index(np.r_[:20], name="y"))
a = x * y
b = x * y * y
return a, b
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def __init__(self):
super().__init__()
self._indexvars = set()
def store(self, variables, *args, **kwargs):
super().store(variables, *args, **kwargs)
for k, v in variables.items():
if isinstance(v, IndexVariable):
self._indexvars.add(k)
def get_variables(self):
def lazy_inaccessible(k, v):
if k in self._indexvars:
return v
data = indexing.LazilyIndexedArray(InaccessibleArray(v.values))
return Variable(v.dims, data, v.attrs)
return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()}
class TestDataset:
def test_repr(self):
data = create_test_data(seed=123)
data.attrs["foo"] = "bar"
# need to insert str dtype at runtime to handle different endianness
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8)
Coordinates:
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-20
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Dimensions without coordinates: dim1
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 ... 0.1995 0.4684 -0.8312
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 ... 0.1302 1.267 0.3328
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 ... -0.2452 -0.3616
Attributes:
foo: bar"""
% data["dim3"].dtype
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
with set_options(display_width=100):
max_len = max(map(len, repr(data).split("\n")))
assert 90 < max_len < 100
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: ()
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(Dataset()).split("\n"))
print(actual)
assert expected == actual
# verify that ... doesn't appear for scalar coordinates
data = Dataset({"foo": ("x", np.ones(10))}).mean()
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: ()
Data variables:
foo float64 1.0"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
# verify long attributes are truncated
data = Dataset(attrs={"foo": "bar" * 1000})
assert len(repr(data)) < 1000
def test_repr_multiindex(self):
data = create_test_multiindex()
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) MultiIndex
- level_1 (x) object 'a' 'a' 'b' 'b'
- level_2 (x) int64 1 2 1 2
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
# verify that long level names are not truncated
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("a_quite_long_level_name", "level_2")
)
data = Dataset({}, {"x": mindex})
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) MultiIndex
- a_quite_long_level_name (x) object 'a' 'a' 'b' 'b'
- level_2 (x) int64 1 2 1 2
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
def test_repr_period_index(self):
data = create_test_data(seed=456)
data.coords["time"] = pd.period_range("2000-01-01", periods=20, freq="B")
# check that creating the repr doesn't raise an error #GH645
repr(data)
def test_unicode_data(self):
# regression test for GH834
data = Dataset({"foø": ["ba®"]}, attrs={"å": "∑"})
repr(data) # should not raise
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (foø: 1)
Coordinates:
* foø (foø) %cU3 %r
Data variables:
*empty*
Attributes:
å: ∑"""
% (byteorder, "ba®")
)
actual = str(data)
assert expected == actual
def test_repr_nep18(self):
class Array:
def __init__(self):
self.shape = (2,)
self.dtype = np.dtype(np.float64)
def __array_function__(self, *args, **kwargs):
pass
def __repr__(self):
return "Custom\nArray"
dataset = Dataset({"foo": ("x", Array())})
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 2)
Dimensions without coordinates: x
Data variables:
foo (x) float64 Custom Array"""
)
assert expected == repr(dataset)
def test_info(self):
ds = create_test_data(seed=123)
ds = ds.drop_vars("dim3") # string type prints differently in PY2 vs PY3
ds.attrs["unicode_attr"] = "ba®"
ds.attrs["string_attr"] = "bar"
buf = StringIO()
ds.info(buf=buf)
expected = dedent(
"""\
xarray.Dataset {
dimensions:
\tdim2 = 9 ;
\ttime = 20 ;
\tdim1 = 8 ;
\tdim3 = 10 ;
variables:
\tfloat64 dim2(dim2) ;
\tdatetime64[ns] time(time) ;
\tfloat64 var1(dim1, dim2) ;
\t\tvar1:foo = variable ;
\tfloat64 var2(dim1, dim2) ;
\t\tvar2:foo = variable ;
\tfloat64 var3(dim3, dim1) ;
\t\tvar3:foo = variable ;
\tint64 numbers(dim3) ;
// global attributes:
\t:unicode_attr = ba® ;
\t:string_attr = bar ;
}"""
)
actual = buf.getvalue()
assert expected == actual
buf.close()
def test_constructor(self):
x1 = ("x", 2 * np.arange(100))
x2 = ("x", np.arange(1000))
z = (["x", "y"], np.arange(1000).reshape(100, 10))
with pytest.raises(ValueError, match=r"conflicting sizes"):
Dataset({"a": x1, "b": x2})
with pytest.raises(ValueError, match=r"disallows such variables"):
Dataset({"a": x1, "x": z})
with pytest.raises(TypeError, match=r"tuple of form"):
Dataset({"x": (1, 2, 3, 4, 5, 6, 7)})
with pytest.raises(ValueError, match=r"already exists as a scalar"):
Dataset({"x": 0, "y": ("x", [1, 2, 3])})
# verify handling of DataArrays
expected = Dataset({"x": x1, "z": z})
actual = Dataset({"z": expected["z"]})
assert_identical(expected, actual)
def test_constructor_invalid_dims(self):
# regression for GH1120
with pytest.raises(MergeError):
Dataset(
data_vars=dict(v=("y", [1, 2, 3, 4])),
coords=dict(y=DataArray([0.1, 0.2, 0.3, 0.4], dims="x")),
)
def test_constructor_1d(self):
expected = Dataset({"x": (["x"], 5.0 + np.arange(5))})
actual = Dataset({"x": 5.0 + np.arange(5)})
assert_identical(expected, actual)
actual = Dataset({"x": [5, 6, 7, 8, 9]})
assert_identical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({"x": ([], 1)})
for arg in [1, np.array(1), expected["x"]]:
actual = Dataset({"x": arg})
assert_identical(expected, actual)
class Arbitrary:
pass
d = pd.Timestamp("2000-01-01T12")
args = [
True,
None,
3.4,
np.nan,
"hello",
b"raw",
np.datetime64("2000-01-01"),
d,
d.to_pydatetime(),
Arbitrary(),
]
for arg in args:
print(arg)
expected = Dataset({"x": ([], arg)})
actual = Dataset({"x": arg})
assert_identical(expected, actual)
def test_constructor_deprecated(self):
with pytest.raises(ValueError, match=r"DataArray dimensions"):
DataArray([1, 2, 3], coords={"x": [0, 1, 2]})
def test_constructor_auto_align(self):
a = DataArray([1, 2], [("x", [0, 1])])
b = DataArray([3, 4], [("x", [1, 2])])
# verify align uses outer join
expected = Dataset(
{"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]}
)
actual = Dataset({"a": a, "b": b})
assert_identical(expected, actual)
# regression test for GH346
assert isinstance(actual.variables["x"], IndexVariable)
# variable with different dimensions
c = ("y", [3, 4])
expected2 = expected.merge({"c": c})
actual = Dataset({"a": a, "b": b, "c": c})
assert_identical(expected2, actual)
# variable that is only aligned against the aligned variables
d = ("x", [3, 2, 1])
expected3 = expected.merge({"d": d})
actual = Dataset({"a": a, "b": b, "d": d})
assert_identical(expected3, actual)
e = ("x", [0, 0])
with pytest.raises(ValueError, match=r"conflicting sizes"):
Dataset({"a": a, "b": b, "e": e})
def test_constructor_pandas_sequence(self):
ds = self.make_example_math_dataset()
pandas_objs = {
var_name: ds[var_name].to_pandas() for var_name in ["foo", "bar"]
}
ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)
del ds_based_on_pandas["x"]
assert_equal(ds, ds_based_on_pandas)
# reindex pandas obj, check align works
rearranged_index = reversed(pandas_objs["foo"].index)
pandas_objs["foo"] = pandas_objs["foo"].reindex(rearranged_index)
ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)
del ds_based_on_pandas["x"]
assert_equal(ds, ds_based_on_pandas)
def test_constructor_pandas_single(self):
das = [
DataArray(np.random.rand(4), dims=["a"]), # series
DataArray(np.random.rand(4, 3), dims=["a", "b"]), # df
]
for a in das:
pandas_obj = a.to_pandas()
ds_based_on_pandas = Dataset(pandas_obj)
for dim in ds_based_on_pandas.data_vars:
assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim])
def test_constructor_compat(self):
data = {"x": DataArray(0, coords={"y": 1}), "y": ("z", [1, 1, 1])}
expected = Dataset({"x": 0}, {"y": ("z", [1, 1, 1])})
actual = Dataset(data)
assert_identical(expected, actual)
data = {"y": ("z", [1, 1, 1]), "x": DataArray(0, coords={"y": 1})}
actual = Dataset(data)
assert_identical(expected, actual)
original = Dataset(
{"a": (("x", "y"), np.ones((2, 3)))},
{"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]},
)
expected = Dataset(
{"a": ("x", np.ones(2)), "b": ("y", np.ones(3))},
{"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]},
)
actual = Dataset(
{"a": original["a"][:, 0], "b": original["a"][0].drop_vars("x")}
)
assert_identical(expected, actual)
data = {"x": DataArray(0, coords={"y": 3}), "y": ("z", [1, 1, 1])}
with pytest.raises(MergeError):
Dataset(data)
data = {"x": DataArray(0, coords={"y": 1}), "y": [1, 1]}
actual = Dataset(data)
expected = Dataset({"x": 0}, {"y": [1, 1]})
assert_identical(expected, actual)
def test_constructor_with_coords(self):
with pytest.raises(ValueError, match=r"found in both data_vars and"):
Dataset({"a": ("x", [1])}, {"a": ("x", [1])})
ds = Dataset({}, {"a": ("x", [1])})
assert not ds.data_vars
assert list(ds.coords.keys()) == ["a"]
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
Dataset({}, {"x": mindex, "y": mindex})
Dataset({}, {"x": mindex, "level_1": range(4)})
def test_properties(self):
ds = create_test_data()
assert ds.dims == {"dim1": 8, "dim2": 9, "dim3": 10, "time": 20}
assert ds.sizes == ds.dims
# These exact types aren't public API, but this makes sure we don't
# change them inadvertently:
assert isinstance(ds.dims, utils.Frozen)
assert isinstance(ds.dims.mapping, dict)
assert type(ds.dims.mapping) is dict
assert list(ds) == list(ds.data_vars)
assert list(ds.keys()) == list(ds.data_vars)
assert "aasldfjalskdfj" not in ds.variables
assert "dim1" in repr(ds.variables)
assert len(ds) == 3
assert bool(ds)
assert list(ds.data_vars) == ["var1", "var2", "var3"]
assert list(ds.data_vars.keys()) == ["var1", "var2", "var3"]
assert "var1" in ds.data_vars
assert "dim1" not in ds.data_vars
assert "numbers" not in ds.data_vars
assert len(ds.data_vars) == 3
assert set(ds.xindexes) == {"dim2", "dim3", "time"}
assert len(ds.xindexes) == 3
assert "dim2" in repr(ds.xindexes)
assert all([isinstance(idx, Index) for idx in ds.xindexes.values()])
assert set(ds.indexes) == {"dim2", "dim3", "time"}
assert len(ds.indexes) == 3
assert "dim2" in repr(ds.indexes)
assert all([isinstance(idx, pd.Index) for idx in ds.indexes.values()])
assert list(ds.coords) == ["dim2", "dim3", "time", "numbers"]
assert "dim2" in ds.coords
assert "numbers" in ds.coords
assert "var1" not in ds.coords
assert "dim1" not in ds.coords
assert len(ds.coords) == 4
assert Dataset({"x": np.int64(1), "y": np.float32([1, 2])}).nbytes == 16
def test_asarray(self):
ds = Dataset({"x": 0})
with pytest.raises(TypeError, match=r"cannot directly convert"):
np.asarray(ds)
def test_get_index(self):
ds = Dataset({"foo": (("x", "y"), np.zeros((2, 3)))}, coords={"x": ["a", "b"]})
assert ds.get_index("x").equals(pd.Index(["a", "b"]))
assert ds.get_index("y").equals(pd.Index([0, 1, 2]))
with pytest.raises(KeyError):
ds.get_index("z")
def test_attr_access(self):
ds = Dataset(
{"tmin": ("x", [42], {"units": "Celcius"})}, attrs={"title": "My test data"}
)
assert_identical(ds.tmin, ds["tmin"])
assert_identical(ds.tmin.x, ds.x)
assert ds.title == ds.attrs["title"]
assert ds.tmin.units == ds["tmin"].attrs["units"]
assert {"tmin", "title"} <= set(dir(ds))
assert "units" in set(dir(ds.tmin))
# should defer to variable of same name
ds.attrs["tmin"] = -999
assert ds.attrs["tmin"] == -999
assert_identical(ds.tmin, ds["tmin"])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a["foo"] = (("time", "x"), d)
assert "foo" in a.variables
assert "foo" in a
a["bar"] = (("time", "x"), d)
# order of creation is preserved
assert list(a.variables) == ["foo", "bar"]
assert_array_equal(a["foo"].values, d)
# try to add variable with dim (10,3) with data that's (3,10)
with pytest.raises(ValueError):
a["qux"] = (("time", "x"), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {"foo": "bar"}
a["x"] = ("x", vec, attributes)
assert "x" in a.coords
assert isinstance(a.coords["x"].to_index(), pd.Index)
assert_identical(a.coords["x"].variable, a.variables["x"])
b = Dataset()
b["x"] = ("x", vec, attributes)
assert_identical(a["x"], b["x"])
assert a.dims == b.dims
# this should work
a["x"] = ("x", vec[:5])
a["z"] = ("x", np.arange(5))
with pytest.raises(ValueError):
# now it shouldn't, since there is a conflicting length
a["x"] = ("x", vec[:4])
arr = np.random.random((10, 1))
scal = np.array(0)
with pytest.raises(ValueError):
a["y"] = ("y", arr)
with pytest.raises(ValueError):
a["y"] = ("y", scal)
assert "y" not in a.dims
def test_coords_properties(self):
# use int64 for repr consistency on windows
data = Dataset(
{
"x": ("x", np.array([-1, -2], "int64")),
"y": ("y", np.array([0, 1, 2], "int64")),
"foo": (["x", "y"], np.random.randn(2, 3)),
},
{"a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10)},
)
assert 4 == len(data.coords)
assert ["x", "y", "a", "b"] == list(data.coords)
assert_identical(data.coords["x"].variable, data["x"].variable)
assert_identical(data.coords["y"].variable, data["y"].variable)
assert "x" in data.coords
assert "a" in data.coords
assert 0 not in data.coords
assert "foo" not in data.coords
with pytest.raises(KeyError):
data.coords["foo"]
with pytest.raises(KeyError):
data.coords[0]
expected = dedent(
"""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10"""
)
actual = repr(data.coords)
assert expected == actual
assert {"x": 2, "y": 3} == data.coords.dims
def test_coords_modify(self):
data = Dataset(
{
"x": ("x", [-1, -2]),
"y": ("y", [0, 1, 2]),
"foo": (["x", "y"], np.random.randn(2, 3)),
},
{"a": ("x", [4, 5]), "b": -10},
)
actual = data.copy(deep=True)
actual.coords["x"] = ("x", ["a", "b"])
assert_array_equal(actual["x"], ["a", "b"])
actual = data.copy(deep=True)
actual.coords["z"] = ("z", ["a", "b"])
assert_array_equal(actual["z"], ["a", "b"])
actual = data.copy(deep=True)
with pytest.raises(ValueError, match=r"conflicting sizes"):
actual.coords["x"] = ("x", [-1])
assert_identical(actual, data) # should not be modified
actual = data.copy()
del actual.coords["b"]
expected = data.reset_coords("b", drop=True)
assert_identical(expected, actual)
with pytest.raises(KeyError):
del data.coords["not_found"]
with pytest.raises(KeyError):
del data.coords["foo"]
actual = data.copy(deep=True)
actual.coords.update({"c": 11})
expected = data.merge({"c": 11}).set_coords("c")
assert_identical(expected, actual)
# regression test for GH3746
del actual.coords["x"]
assert "x" not in actual.xindexes
def test_update_index(self):
actual = Dataset(coords={"x": [1, 2, 3]})
actual["x"] = ["a", "b", "c"]
assert actual.xindexes["x"].equals(pd.Index(["a", "b", "c"]))
def test_coords_setitem_with_new_dimension(self):
actual = Dataset()
actual.coords["foo"] = ("x", [1, 2, 3])
expected = Dataset(coords={"foo": ("x", [1, 2, 3])})
assert_identical(expected, actual)
def test_coords_setitem_multiindex(self):
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data.coords["level_1"] = range(4)
def test_coords_set(self):
one_coord = Dataset({"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])})
two_coords = Dataset({"zzz": ("x", [2])}, {"x": ("x", [0]), "yy": ("x", [1])})
all_coords = Dataset(
coords={"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])}
)
actual = one_coord.set_coords("x")
assert_identical(one_coord, actual)
actual = one_coord.set_coords(["x"])
assert_identical(one_coord, actual)
actual = one_coord.set_coords("yy")
assert_identical(two_coords, actual)
actual = one_coord.set_coords(["yy", "zzz"])
assert_identical(all_coords, actual)
actual = one_coord.reset_coords()
assert_identical(one_coord, actual)
actual = two_coords.reset_coords()
assert_identical(one_coord, actual)
actual = all_coords.reset_coords()
assert_identical(one_coord, actual)
actual = all_coords.reset_coords(["yy", "zzz"])
assert_identical(one_coord, actual)
actual = all_coords.reset_coords("zzz")
assert_identical(two_coords, actual)
with pytest.raises(ValueError, match=r"cannot remove index"):
one_coord.reset_coords("x")
actual = all_coords.reset_coords("zzz", drop=True)
expected = all_coords.drop_vars("zzz")
assert_identical(expected, actual)
expected = two_coords.drop_vars("zzz")
assert_identical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({"foo": ("y", [-1, 0, 1])}, {"x": 10, "y": [2, 3, 4]})
expected = Dataset(coords={"x": 10, "y": [2, 3, 4]})
actual = orig.coords.to_dataset()
assert_identical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={"a": ("x", [1, 2]), "x": [0, 1]}).coords
other_coords = Dataset(coords={"b": ("x", ["a", "b"]), "x": [0, 1]}).coords
expected = Dataset(
coords={"a": ("x", [1, 2]), "b": ("x", ["a", "b"]), "x": [0, 1]}
)
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"x": ("x", ["a"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"x": ("x", ["a", "b"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"x": ("x", ["a", "b", "c"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"a": ("x", [8, 9])}).coords
expected = Dataset(coords={"x": range(2)})
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"x": np.nan}).coords
actual = orig_coords.merge(other_coords)
assert_identical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
assert_identical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={"a": ("x", [1, 1])}).coords
other_coords = Dataset(coords={"a": 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"a": ("y", [1])}).coords
expected = Dataset(coords={"a": (["x", "y"], [[1], [1]])})
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected.transpose(), actual)
orig_coords = Dataset(coords={"a": ("x", [np.nan])}).coords
other_coords = Dataset(coords={"a": np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
def test_data_vars_properties(self):
ds = Dataset()
ds["foo"] = (("x",), [1.0])
ds["bar"] = 2.0
assert set(ds.data_vars) == {"foo", "bar"}
assert "foo" in ds.data_vars
assert "x" not in ds.data_vars
assert_identical(ds["foo"], ds.data_vars["foo"])
expected = dedent(
"""\
Data variables:
foo (x) float64 1.0
bar float64 2.0"""
)
actual = repr(ds.data_vars)
assert expected == actual
def test_equals_and_identical(self):
data = create_test_data(seed=42)
assert data.equals(data)
assert data.identical(data)
data2 = create_test_data(seed=42)
data2.attrs["foobar"] = "baz"
assert data.equals(data2)
assert not data.identical(data2)
del data2["time"]
assert not data.equals(data2)
data = create_test_data(seed=42).rename({"var1": None})
assert data.equals(data)
assert data.identical(data)
data2 = data.reset_coords()
assert not data2.equals(data)
assert not data2.identical(data)
def test_equals_failures(self):
data = create_test_data()
assert not data.equals("foo")
assert not data.identical(123)
assert not data.broadcast_equals({1: 2})
def test_broadcast_equals(self):
data1 = Dataset(coords={"x": 0})
data2 = Dataset(coords={"x": [0]})
assert data1.broadcast_equals(data2)
assert not data1.equals(data2)
assert not data1.identical(data2)
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {"foobar": "baz"}
assert data.attrs["foobar"], "baz"
assert isinstance(data.attrs, dict)
@requires_dask
def test_chunk(self):
data = create_test_data()
for v in data.variables.values():
assert isinstance(v.data, np.ndarray)
assert data.chunks == {}
reblocked = data.chunk()
for k, v in reblocked.variables.items():
if k in reblocked.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
expected_chunks = {"dim1": (8,), "dim2": (9,), "dim3": (10,)}
assert reblocked.chunks == expected_chunks
def get_dask_names(ds):
return {k: v.data.name for k, v in ds.items()}
orig_dask_names = get_dask_names(reblocked)
reblocked = data.chunk({"time": 5, "dim1": 5, "dim2": 5, "dim3": 5})
# time is not a dim in any of the data_vars, so it
# doesn't get chunked
expected_chunks = {"dim1": (5, 3), "dim2": (5, 4), "dim3": (5, 5)}
assert reblocked.chunks == expected_chunks
# make sure dask names change when rechunking by different amounts
# regression test for GH3350
new_dask_names = get_dask_names(reblocked)
for k, v in new_dask_names.items():
assert v != orig_dask_names[k]
reblocked = data.chunk(expected_chunks)
assert reblocked.chunks == expected_chunks
# reblock on already blocked data
orig_dask_names = get_dask_names(reblocked)
reblocked = reblocked.chunk(expected_chunks)
new_dask_names = get_dask_names(reblocked)
assert reblocked.chunks == expected_chunks
assert_identical(reblocked, data)
# recuhnking with same chunk sizes should not change names
for k, v in new_dask_names.items():
assert v == orig_dask_names[k]
with pytest.raises(ValueError, match=r"some chunks"):
data.chunk({"foo": 10})
@requires_dask
def test_dask_is_lazy(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with pytest.raises(UnexpectedDataAccess):
ds.load()
with pytest.raises(UnexpectedDataAccess):
ds["var1"].values
# these should not raise UnexpectedDataAccess:
ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({"dim1": "foobar"})
ds.set_coords("var1")
ds.drop_vars("var1")
def test_isel(self):
data = create_test_data()
slicers = {"dim1": slice(None, None, 2), "dim2": slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
assert list(data.dims) == list(ret.dims)
for d in data.dims:
if d in slicers:
assert ret.dims[d] == np.arange(data.dims[d])[slicers[d]].size
else:
assert data.dims[d] == ret.dims[d]
# Verify that the data is what we expect
for v in data.variables:
assert data[v].dims == ret[v].dims
assert data[v].attrs == ret[v].attrs
slice_list = [slice(None)] * data[v].values.ndim
for d, s in slicers.items():
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[tuple(slice_list)]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with pytest.raises(ValueError):
data.isel(not_a_dim=slice(0, 2))
with pytest.raises(
ValueError,
match=r"Dimensions {'not_a_dim'} do not exist. Expected "
r"one or more of "
r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*",
):
data.isel(not_a_dim=slice(0, 2))
with pytest.warns(
UserWarning,
match=r"Dimensions {'not_a_dim'} do not exist. "
r"Expected one or more of "
r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*",
):
data.isel(not_a_dim=slice(0, 2), missing_dims="warn")
assert_identical(data, data.isel(not_a_dim=slice(0, 2), missing_dims="ignore"))
ret = data.isel(dim1=0)
assert {"time": 20, "dim2": 9, "dim3": 10} == ret.dims
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(ret.xindexes)
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
assert {"time": 2, "dim2": 5, "dim3": 10} == ret.dims
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(ret.xindexes)
ret = data.isel(time=0, dim1=0, dim2=slice(5))
assert {"dim2": 5, "dim3": 10} == ret.dims
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(list(ret.xindexes) + ["time"])
def test_isel_fancy(self):
# isel with fancy indexing.
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel(
dim1=(("test_coord",), pdim1),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert "test_coord" in actual.dims
assert actual.coords["test_coord"].shape == (len(pdim1),)
# Should work with DataArray
actual = data.isel(
dim1=DataArray(pdim1, dims="test_coord"),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert "test_coord" in actual.dims
assert actual.coords["test_coord"].shape == (len(pdim1),)
expected = data.isel(
dim1=(("test_coord",), pdim1),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert_identical(actual, expected)
# DataArray with coordinate
idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)})
idx2 = DataArray(pdim2, dims=["b"], coords={"b": np.random.randn(3)})
idx3 = DataArray(pdim3, dims=["c"], coords={"c": np.random.randn(3)})
# Should work with DataArray
actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)
assert "a" in actual.dims
assert "b" in actual.dims
assert "c" in actual.dims
assert "time" in actual.coords
assert "dim2" in actual.coords
assert "dim3" in actual.coords
expected = data.isel(
dim1=(("a",), pdim1), dim2=(("b",), pdim2), dim3=(("c",), pdim3)
)
expected = expected.assign_coords(a=idx1["a"], b=idx2["b"], c=idx3["c"])
assert_identical(actual, expected)
idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)})
idx2 = DataArray(pdim2, dims=["a"])
idx3 = DataArray(pdim3, dims=["a"])
# Should work with DataArray
actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)
assert "a" in actual.dims
assert "time" in actual.coords
assert "dim2" in actual.coords
assert "dim3" in actual.coords
expected = data.isel(
dim1=(("a",), pdim1), dim2=(("a",), pdim2), dim3=(("a",), pdim3)
)
expected = expected.assign_coords(a=idx1["a"])
assert_identical(actual, expected)
actual = data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2))
assert "points" in actual.dims
assert "dim3" in actual.dims
assert "dim3" not in actual.data_vars
np.testing.assert_array_equal(data["dim2"][pdim2], actual["dim2"])
# test that the order of the indexers doesn't matter
assert_identical(
data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2)),
data.isel(dim2=(("points",), pdim2), dim1=(("points",), pdim1)),
)
# make sure we're raising errors in the right places
with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"):
data.isel(dim1=(("points",), [1, 2]), dim2=(("points",), [1, 2, 3]))
with pytest.raises(TypeError, match=r"cannot use a Dataset"):
data.isel(dim1=Dataset({"points": [1, 2]}))
# test to be sure we keep around variables that were not indexed
ds = Dataset({"x": [1, 2, 3, 4], "y": 0})
actual = ds.isel(x=(("points",), [0, 1, 2]))
assert_identical(ds["y"], actual["y"])
# tests using index or DataArray as indexers
stations = Dataset()
stations["station"] = (("station",), ["A", "B", "C"])
stations["dim1s"] = (("station",), [1, 2, 3])
stations["dim2s"] = (("station",), [4, 5, 1])
actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"])
assert "station" in actual.coords
assert "station" in actual.dims
assert_identical(actual["station"].drop_vars(["dim2"]), stations["station"])
with pytest.raises(ValueError, match=r"conflicting values for "):
data.isel(
dim1=DataArray(
[0, 1, 2], dims="station", coords={"station": [0, 1, 2]}
),
dim2=DataArray(
[0, 1, 2], dims="station", coords={"station": [0, 1, 3]}
),
)
# multi-dimensional selection
stations = Dataset()
stations["a"] = (("a",), ["A", "B", "C"])
stations["b"] = (("b",), [0, 1])
stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]])
stations["dim2s"] = (("a",), [4, 5, 1])
actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"])
assert "a" in actual.coords
assert "a" in actual.dims
assert "b" in actual.coords
assert "b" in actual.dims
assert "dim2" in actual.coords
assert "a" in actual["dim2"].dims
assert_identical(actual["a"].drop_vars(["dim2"]), stations["a"])
assert_identical(actual["b"], stations["b"])
expected_var1 = data["var1"].variable[
stations["dim1s"].variable, stations["dim2s"].variable
]
expected_var2 = data["var2"].variable[
stations["dim1s"].variable, stations["dim2s"].variable
]
expected_var3 = data["var3"].variable[slice(None), stations["dim1s"].variable]
assert_equal(actual["a"].drop_vars("dim2"), stations["a"])
assert_array_equal(actual["var1"], expected_var1)
assert_array_equal(actual["var2"], expected_var2)
assert_array_equal(actual["var3"], expected_var3)
def test_isel_dataarray(self):
"""Test for indexing by DataArray"""
data = create_test_data()
# indexing with DataArray with same-name coordinates.
indexing_da = DataArray(
np.arange(1, 4), dims=["dim1"], coords={"dim1": np.random.randn(3)}
)
actual = data.isel(dim1=indexing_da)
assert_identical(indexing_da["dim1"], actual["dim1"])
assert_identical(data["dim2"], actual["dim2"])
# Conflict in the dimension coordinate
indexing_da = DataArray(
np.arange(1, 4), dims=["dim2"], coords={"dim2": np.random.randn(3)}
)
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
actual = data.isel(dim2=indexing_da)
# Also the case for DataArray
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
actual = data["var2"].isel(dim2=indexing_da)
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
data["dim2"].isel(dim2=indexing_da)
# same name coordinate which does not conflict
indexing_da = DataArray(
np.arange(1, 4), dims=["dim2"], coords={"dim2": data["dim2"].values[1:4]}
)
actual = data.isel(dim2=indexing_da)
assert_identical(actual["dim2"], indexing_da["dim2"])
# Silently drop conflicted (non-dimensional) coordinate of indexer
indexing_da = DataArray(
np.arange(1, 4),
dims=["dim2"],
coords={
"dim2": data["dim2"].values[1:4],
"numbers": ("dim2", np.arange(2, 5)),
},
)
actual = data.isel(dim2=indexing_da)
assert_identical(actual["numbers"], data["numbers"])
# boolean data array with coordinate with the same name
indexing_da = DataArray(
np.arange(1, 10), dims=["dim2"], coords={"dim2": data["dim2"].values}
)
indexing_da = indexing_da < 3
actual = data.isel(dim2=indexing_da)
assert_identical(actual["dim2"], data["dim2"][:2])
# boolean data array with non-dimensioncoordinate
indexing_da = DataArray(
np.arange(1, 10),
dims=["dim2"],
coords={
"dim2": data["dim2"].values,
"non_dim": (("dim2",), np.random.randn(9)),
"non_dim2": 0,
},
)
indexing_da = indexing_da < 3
actual = data.isel(dim2=indexing_da)
assert_identical(
actual["dim2"].drop_vars("non_dim").drop_vars("non_dim2"), data["dim2"][:2]
)
assert_identical(actual["non_dim"], indexing_da["non_dim"][:2])
assert_identical(actual["non_dim2"], indexing_da["non_dim2"])
# non-dimension coordinate will be also attached
indexing_da = DataArray(
np.arange(1, 4),
dims=["dim2"],
coords={"non_dim": (("dim2",), np.random.randn(3))},
)
actual = data.isel(dim2=indexing_da)
assert "non_dim" in actual
assert "non_dim" in actual.coords
# Index by a scalar DataArray
indexing_da = DataArray(3, dims=[], coords={"station": 2})
actual = data.isel(dim2=indexing_da)
assert "station" in actual
actual = data.isel(dim2=indexing_da["station"])
assert "station" in actual
# indexer generated from coordinates
indexing_ds = Dataset({}, coords={"dim2": [0, 1, 2]})
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
actual = data.isel(dim2=indexing_ds["dim2"])
def test_sel(self):
data = create_test_data()
int_slicers = {"dim1": slice(None, None, 2), "dim2": slice(2), "dim3": slice(3)}
loc_slicers = {
"dim1": slice(None, None, 2),
"dim2": slice(0, 0.5),
"dim3": slice("a", "c"),
}
assert_equal(data.isel(**int_slicers), data.sel(**loc_slicers))
data["time"] = ("time", pd.date_range("2000-01-01", periods=20))
assert_equal(data.isel(time=0), data.sel(time="2000-01-01"))
assert_equal(
data.isel(time=slice(10)), data.sel(time=slice("2000-01-01", "2000-01-10"))
)
assert_equal(data, data.sel(time=slice("1999", "2005")))
times = pd.date_range("2000-01-01", periods=3)
assert_equal(data.isel(time=slice(3)), data.sel(time=times))
assert_equal(
data.isel(time=slice(3)), data.sel(time=(data["time.dayofyear"] <= 3))
)
td = pd.to_timedelta(np.arange(3), unit="days")
data = Dataset({"x": ("td", np.arange(3)), "td": td})
assert_equal(data, data.sel(td=td))
assert_equal(data, data.sel(td=slice("3 days")))
assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0 days")))
assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0h")))
assert_equal(data.isel(td=slice(1, 3)), data.sel(td=slice("1 days", "2 days")))
def test_sel_dataarray(self):
data = create_test_data()
ind = DataArray([0.0, 0.5, 1.0], dims=["dim2"])
actual = data.sel(dim2=ind)
assert_equal(actual, data.isel(dim2=[0, 1, 2]))
# with different dimension
ind = DataArray([0.0, 0.5, 1.0], dims=["new_dim"])
actual = data.sel(dim2=ind)
expected = data.isel(dim2=Variable("new_dim", [0, 1, 2]))
assert "new_dim" in actual.dims
assert_equal(actual, expected)
# Multi-dimensional
ind = DataArray([[0.0], [0.5], [1.0]], dims=["new_dim", "new_dim2"])
actual = data.sel(dim2=ind)
expected = data.isel(dim2=Variable(("new_dim", "new_dim2"), [[0], [1], [2]]))
assert "new_dim" in actual.dims
assert "new_dim2" in actual.dims
assert_equal(actual, expected)
# with coordinate
ind = DataArray(
[0.0, 0.5, 1.0], dims=["new_dim"], coords={"new_dim": ["a", "b", "c"]}
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2]).rename({"dim2": "new_dim"})
assert "new_dim" in actual.dims
assert "new_dim" in actual.coords
assert_equal(
actual.drop_vars("new_dim").drop_vars("dim2"), expected.drop_vars("new_dim")
)
assert_equal(actual["new_dim"].drop_vars("dim2"), ind["new_dim"])
# with conflicted coordinate (silently ignored)
ind = DataArray(
[0.0, 0.5, 1.0], dims=["dim2"], coords={"dim2": ["a", "b", "c"]}
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2])
assert_equal(actual, expected)
# with conflicted coordinate (silently ignored)
ind = DataArray(
[0.0, 0.5, 1.0],
dims=["new_dim"],
coords={"new_dim": ["a", "b", "c"], "dim2": 3},
)
actual = data.sel(dim2=ind)
assert_equal(
actual["new_dim"].drop_vars("dim2"), ind["new_dim"].drop_vars("dim2")
)
expected = data.isel(dim2=[0, 1, 2])
expected["dim2"] = (("new_dim"), expected["dim2"].values)
assert_equal(actual["dim2"].drop_vars("new_dim"), expected["dim2"])
assert actual["var1"].dims == ("dim1", "new_dim")
# with non-dimensional coordinate
ind = DataArray(
[0.0, 0.5, 1.0],
dims=["dim2"],
coords={
"dim2": ["a", "b", "c"],
"numbers": ("dim2", [0, 1, 2]),
"new_dim": ("dim2", [1.1, 1.2, 1.3]),
},
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2])
assert_equal(actual.drop_vars("new_dim"), expected)
assert np.allclose(actual["new_dim"].values, ind["new_dim"].values)
def test_sel_dataarray_mindex(self):
midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two"))
mds = xr.Dataset(
{"var": (("x", "y"), np.random.rand(6, 3))},
coords={"x": midx, "y": range(3)},
)
actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="x"))
actual_sel = mds.sel(x=DataArray(midx[:3], dims="x"))
assert actual_isel["x"].dims == ("x",)
assert actual_sel["x"].dims == ("x",)
assert_identical(actual_isel, actual_sel)
actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="z"))
actual_sel = mds.sel(x=Variable("z", midx[:3]))
assert actual_isel["x"].dims == ("z",)
assert actual_sel["x"].dims == ("z",)
assert_identical(actual_isel, actual_sel)
# with coordinate
actual_isel = mds.isel(
x=xr.DataArray(np.arange(3), dims="z", coords={"z": [0, 1, 2]})
)
actual_sel = mds.sel(
x=xr.DataArray(midx[:3], dims="z", coords={"z": [0, 1, 2]})
)
assert actual_isel["x"].dims == ("z",)
assert actual_sel["x"].dims == ("z",)
assert_identical(actual_isel, actual_sel)
# Vectorized indexing with level-variables raises an error
with pytest.raises(ValueError, match=r"Vectorized selection is "):
mds.sel(one=["a", "b"])
with pytest.raises(
ValueError,
match=r"Vectorized selection is not available along MultiIndex variable: x",
):
mds.sel(
x=xr.DataArray(
[np.array(midx[:2]), np.array(midx[-2:])], dims=["a", "b"]
)
)
def test_sel_categorical(self):
ind = pd.Series(["foo", "bar"], dtype="category")
df = pd.DataFrame({"ind": ind, "values": [1, 2]})
ds = df.set_index("ind").to_xarray()
actual = ds.sel(ind="bar")
expected = ds.isel(ind=1)
assert_identical(expected, actual)
def test_sel_categorical_error(self):
ind = pd.Series(["foo", "bar"], dtype="category")
df = pd.DataFrame({"ind": ind, "values": [1, 2]})
ds = df.set_index("ind").to_xarray()
with pytest.raises(ValueError):
ds.sel(ind="bar", method="nearest")
with pytest.raises(ValueError):
ds.sel(ind="bar", tolerance="nearest")
def test_categorical_index(self):
cat = pd.CategoricalIndex(
["foo", "bar", "foo"],
categories=["foo", "bar", "baz", "qux", "quux", "corge"],
)
ds = xr.Dataset(
{"var": ("cat", np.arange(3))},
coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 1])},
)
# test slice
actual = ds.sel(cat="foo")
expected = ds.isel(cat=[0, 2])
assert_identical(expected, actual)
# make sure the conversion to the array works
actual = ds.sel(cat="foo")["cat"].values
assert (actual == np.array(["foo", "foo"])).all()
ds = ds.set_index(index=["cat", "c"])
actual = ds.unstack("index")
assert actual["var"].shape == (2, 2)
def test_categorical_reindex(self):
cat = pd.CategoricalIndex(
["foo", "bar", "baz"],
categories=["foo", "bar", "baz", "qux", "quux", "corge"],
)
ds = xr.Dataset(
{"var": ("cat", np.arange(3))},
coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 2])},
)
actual = ds.reindex(cat=["foo"])["cat"].values
assert (actual == np.array(["foo"])).all()
def test_categorical_multiindex(self):
i1 = pd.Series([0, 0])
cat = pd.CategoricalDtype(categories=["foo", "baz", "bar"])
i2 = pd.Series(["baz", "bar"], dtype=cat)
df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2]}).set_index(
["i1", "i2"]
)
actual = df.to_xarray()
assert actual["values"].shape == (1, 2)
def test_sel_drop(self):
data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
expected = Dataset({"foo": 1})
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.sel(x=0, drop=False)
assert_identical(expected, selected)
data = Dataset({"foo": ("x", [1, 2, 3])})
expected = Dataset({"foo": 1})
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
def test_isel_drop(self):
data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
expected = Dataset({"foo": 1})
selected = data.isel(x=0, drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.isel(x=0, drop=False)
assert_identical(expected, selected)
def test_head(self):
data = create_test_data()
expected = data.isel(time=slice(5), dim2=slice(6))
actual = data.head(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel(time=slice(0))
actual = data.head(time=0)
assert_equal(expected, actual)
expected = data.isel({dim: slice(6) for dim in data.dims})
actual = data.head(6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(5) for dim in data.dims})
actual = data.head()
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.head([3])
with pytest.raises(TypeError, match=r"expected integer type"):
data.head(dim2=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
data.head(time=-3)
def test_tail(self):
data = create_test_data()
expected = data.isel(time=slice(-5, None), dim2=slice(-6, None))
actual = data.tail(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel(dim1=slice(0))
actual = data.tail(dim1=0)
assert_equal(expected, actual)
expected = data.isel({dim: slice(-6, None) for dim in data.dims})
actual = data.tail(6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(-5, None) for dim in data.dims})
actual = data.tail()
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.tail([3])
with pytest.raises(TypeError, match=r"expected integer type"):
data.tail(dim2=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
data.tail(time=-3)
def test_thin(self):
data = create_test_data()
expected = data.isel(time=slice(None, None, 5), dim2=slice(None, None, 6))
actual = data.thin(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(None, None, 6) for dim in data.dims})
actual = data.thin(6)
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.thin([3])
with pytest.raises(TypeError, match=r"expected integer type"):
data.thin(dim2=3.1)
with pytest.raises(ValueError, match=r"cannot be zero"):
data.thin(time=0)
with pytest.raises(ValueError, match=r"expected positive int"):
data.thin(time=-3)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_sel_fancy(self):
data = create_test_data()
# add in a range() index
data["dim1"] = data.dim1
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel(
dim1=Variable(("test_coord",), pdim1),
dim2=Variable(("test_coord",), pdim2),
dim3=Variable(("test_coord"), pdim3),
)
actual = data.sel(
dim1=Variable(("test_coord",), data.dim1[pdim1]),
dim2=Variable(("test_coord",), data.dim2[pdim2]),
dim3=Variable(("test_coord",), data.dim3[pdim3]),
)
assert_identical(expected, actual)
# DataArray Indexer
idx_t = DataArray(
data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_2 = DataArray(
data["dim2"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_3 = DataArray(
data["dim3"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)
expected = data.isel(
time=Variable(("a",), [3, 2, 1]),
dim2=Variable(("a",), [3, 2, 1]),
dim3=Variable(("a",), [3, 2, 1]),
)
expected = expected.assign_coords(a=idx_t["a"])
assert_identical(expected, actual)
idx_t = DataArray(
data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_2 = DataArray(
data["dim2"][[2, 1, 3]].values, dims=["b"], coords={"b": [0, 1, 2]}
)
idx_3 = DataArray(
data["dim3"][[1, 2, 1]].values, dims=["c"], coords={"c": [0.0, 1.1, 2.2]}
)
actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)
expected = data.isel(
time=Variable(("a",), [3, 2, 1]),
dim2=Variable(("b",), [2, 1, 3]),
dim3=Variable(("c",), [1, 2, 1]),
)
expected = expected.assign_coords(a=idx_t["a"], b=idx_2["b"], c=idx_3["c"])
assert_identical(expected, actual)
# test from sel_points
data = Dataset({"foo": (("x", "y"), np.arange(9).reshape(3, 3))})
data.coords.update({"x": [0, 1, 2], "y": [0, 1, 2]})
expected = Dataset(
{"foo": ("points", [0, 4, 8])},
coords={
"x": Variable(("points",), [0, 1, 2]),
"y": Variable(("points",), [0, 1, 2]),
},
)
actual = data.sel(
x=Variable(("points",), [0, 1, 2]), y=Variable(("points",), [0, 1, 2])
)
assert_identical(expected, actual)
expected.coords.update({"x": ("points", [0, 1, 2]), "y": ("points", [0, 1, 2])})
actual = data.sel(
x=Variable(("points",), [0.1, 1.1, 2.5]),
y=Variable(("points",), [0, 1.2, 2.0]),
method="pad",
)
assert_identical(expected, actual)
idx_x = DataArray([0, 1, 2], dims=["a"], coords={"a": ["a", "b", "c"]})
idx_y = DataArray([0, 2, 1], dims=["b"], coords={"b": [0, 3, 6]})
expected_ary = data["foo"][[0, 1, 2], [0, 2, 1]]
actual = data.sel(x=idx_x, y=idx_y)
assert_array_equal(expected_ary, actual["foo"])
assert_identical(actual["a"].drop_vars("x"), idx_x["a"])
assert_identical(actual["b"].drop_vars("y"), idx_y["b"])
with pytest.raises(KeyError):
data.sel(x=[2.5], y=[2.0], method="pad", tolerance=1e-3)
def test_sel_method(self):
data = create_test_data()
expected = data.sel(dim2=1)
actual = data.sel(dim2=0.95, method="nearest")
assert_identical(expected, actual)
actual = data.sel(dim2=0.95, method="nearest", tolerance=1)
assert_identical(expected, actual)
with pytest.raises(KeyError):
actual = data.sel(dim2=np.pi, method="nearest", tolerance=0)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method="backfill")
assert_identical(expected, actual)
with pytest.raises(NotImplementedError, match=r"slice objects"):
data.sel(dim2=slice(1, 3), method="ffill")
with pytest.raises(TypeError, match=r"``method``"):
# this should not pass silently
data.sel(method=data)
# cannot pass method if there is no associated coordinate
with pytest.raises(ValueError, match=r"cannot supply"):
data.sel(dim1=0, method="nearest")
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3="a")
actual = data.loc[dict(dim3="a")]
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"can only lookup dict"):
data.loc["a"]
def test_selection_multiindex(self):
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
mdata = Dataset(data_vars={"var": ("x", range(8))}, coords={"x": mindex})
def test_sel(lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None):
ds = mdata.sel(x=lab_indexer)
expected_ds = mdata.isel(x=pos_indexer)
if not replaced_idx:
assert_identical(ds, expected_ds)
else:
if renamed_dim:
assert ds["var"].dims[0] == renamed_dim
ds = ds.rename({renamed_dim: "x"})
assert_identical(ds["var"].variable, expected_ds["var"].variable)
assert not ds["x"].equals(expected_ds["x"])
test_sel(("a", 1, -1), 0)
test_sel(("b", 2, -2), -1)
test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three")
test_sel(("a",), range(4), replaced_idx=True)
test_sel("a", range(4), replaced_idx=True)
test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7])
test_sel(slice("a", "b"), range(8))
test_sel(slice(("a", 1), ("b", 1)), range(6))
test_sel({"one": "a", "two": 1, "three": -1}, 0)
test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three")
test_sel({"one": "a"}, range(4), replaced_idx=True)
assert_identical(mdata.loc[{"x": {"one": "a"}}], mdata.sel(x={"one": "a"}))
assert_identical(mdata.loc[{"x": "a"}], mdata.sel(x="a"))
assert_identical(mdata.loc[{"x": ("a", 1)}], mdata.sel(x=("a", 1)))
assert_identical(mdata.loc[{"x": ("a", 1, -1)}], mdata.sel(x=("a", 1, -1)))
assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1))
def test_broadcast_like(self):
original1 = DataArray(
np.random.randn(5), [("x", range(5))], name="a"
).to_dataset()
original2 = DataArray(np.random.randn(6), [("y", range(6))], name="b")
expected1, expected2 = broadcast(original1, original2)
assert_identical(
original1.broadcast_like(original2), expected1.transpose("y", "x")
)
assert_identical(original2.broadcast_like(original1), expected2)
def test_to_pandas(self):
# 0D -> series
actual = Dataset({"a": 1, "b": 2}).to_pandas()
expected = pd.Series([1, 2], ["a", "b"])
assert_array_equal(actual, expected)
# 1D -> dataframe
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
actual = ds.to_pandas()
expected = ds.to_dataframe()
assert expected.equals(actual), (expected, actual)
# 2D -> error
x2d = np.random.randn(10, 10)
y2d = np.random.randn(10, 10)
with pytest.raises(ValueError, match=r"cannot convert Datasets"):
Dataset({"a": (["t", "r"], x2d), "b": (["t", "r"], y2d)}).to_pandas()
def test_reindex_like(self):
data = create_test_data()
data["letters"] = ("dim3", 10 * ["a"])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
assert_identical(actual, expected)
expected = data.copy(deep=True)
expected["dim3"] = ("dim3", list("cdefghijkl"))
expected["var3"][:-2] = expected["var3"][2:].values
expected["var3"][-2:] = np.nan
expected["letters"] = expected["letters"].astype(object)
expected["letters"][-2:] = np.nan
expected["numbers"] = expected["numbers"].astype(float)
expected["numbers"][:-2] = expected["numbers"][2:].values
expected["numbers"][-2:] = np.nan
actual = data.reindex_like(expected)
assert_identical(actual, expected)
def test_reindex(self):
data = create_test_data()
assert_identical(data, data.reindex())
expected = data.assign_coords(dim1=data["dim1"])
actual = data.reindex(dim1=data["dim1"])
assert_identical(actual, expected)
actual = data.reindex(dim1=data["dim1"].values)
assert_identical(actual, expected)
actual = data.reindex(dim1=data["dim1"].to_index())
assert_identical(actual, expected)
with pytest.raises(
ValueError, match=r"cannot reindex or align along dimension"
):
data.reindex(dim1=data["dim1"][:5])
expected = data.isel(dim2=slice(5))
actual = data.reindex(dim2=data["dim2"][:5])
assert_identical(actual, expected)
# test dict-like argument
actual = data.reindex({"dim2": data["dim2"]})
expected = data
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"cannot specify both"):
data.reindex({"x": 0}, x=0)
with pytest.raises(ValueError, match=r"dictionary"):
data.reindex("foo")
# invalid dimension
with pytest.raises(ValueError, match=r"invalid reindex dim"):
data.reindex(invalid=0)
# out of order
expected = data.sel(dim2=data["dim2"][:5:-1])
actual = data.reindex(dim2=data["dim2"][:5:-1])
assert_identical(actual, expected)
# multiple fill values
expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(
var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),
var2=lambda ds: ds.var2.copy(data=[[-20, -20, -20, -20]] * len(ds.dim1)),
)
actual = data.reindex(
dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10, "var2": -20}
)
assert_identical(actual, expected)
# use the default value
expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(
var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),
var2=lambda ds: ds.var2.copy(
data=[[np.nan, np.nan, np.nan, np.nan]] * len(ds.dim1)
),
)
actual = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10})
assert_identical(actual, expected)
# regression test for #279
expected = Dataset({"x": ("time", np.random.randn(5))}, {"time": range(5)})
time2 = DataArray(np.arange(5), dims="time2")
with pytest.raises(ValueError):
actual = expected.reindex(time=time2)
# another regression test
ds = Dataset(
{"foo": (["x", "y"], np.zeros((3, 4)))}, {"x": range(3), "y": range(4)}
)
expected = Dataset(
{"foo": (["x", "y"], np.zeros((3, 2)))}, {"x": [0, 1, 3], "y": [0, 1]}
)
expected["foo"][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
assert_identical(expected, actual)
def test_reindex_warning(self):
data = create_test_data()
with pytest.raises(ValueError):
# DataArray with different dimension raises Future warning
ind = xr.DataArray([0.0, 1.0], dims=["new_dim"], name="ind")
data.reindex(dim2=ind)
# Should not warn
ind = xr.DataArray([0.0, 1.0], dims=["dim2"], name="ind")
with pytest.warns(None) as ws:
data.reindex(dim2=ind)
assert len(ws) == 0
def test_reindex_variables_copied(self):
data = create_test_data()
reindexed_data = data.reindex(copy=False)
for k in data.variables:
assert reindexed_data.variables[k] is not data.variables[k]
def test_reindex_method(self):
ds = Dataset({"x": ("y", [10, 20]), "y": [0, 1]})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method="backfill")
expected = Dataset({"x": ("y", [10, 20, np.nan]), "y": y})
assert_identical(expected, actual)
actual = ds.reindex(y=y, method="backfill", tolerance=0.1)
expected = Dataset({"x": ("y", 3 * [np.nan]), "y": y})
assert_identical(expected, actual)
actual = ds.reindex(y=y, method="pad")
expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": y})
assert_identical(expected, actual)
alt = Dataset({"y": y})
actual = ds.reindex_like(alt, method="pad")
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}])
def test_reindex_fill_value(self, fill_value):
ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]})
y = [0, 1, 2]
actual = ds.reindex(y=y, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_x = fill_value_z = np.nan
elif isinstance(fill_value, dict):
fill_value_x = fill_value["x"]
fill_value_z = fill_value["z"]
else:
fill_value_x = fill_value_z = fill_value
expected = Dataset(
{
"x": ("y", [10, 20, fill_value_x]),
"z": ("y", [-20, -10, fill_value_z]),
"y": y,
}
)
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}])
def test_reindex_like_fill_value(self, fill_value):
ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]})
y = [0, 1, 2]
alt = Dataset({"y": y})
actual = ds.reindex_like(alt, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_x = fill_value_z = np.nan
elif isinstance(fill_value, dict):
fill_value_x = fill_value["x"]
fill_value_z = fill_value["z"]
else:
fill_value_x = fill_value_z = fill_value
expected = Dataset(
{
"x": ("y", [10, 20, fill_value_x]),
"z": ("y", [-20, -10, fill_value_z]),
"y": y,
}
)
assert_identical(expected, actual)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_reindex_str_dtype(self, dtype):
data = Dataset({"data": ("x", [1, 2]), "x": np.array(["a", "b"], dtype=dtype)})
actual = data.reindex(x=data.x)
expected = data
assert_identical(expected, actual)
assert actual.x.dtype == expected.x.dtype
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": 2, "bar": 1}])
def test_align_fill_value(self, fill_value):
x = Dataset({"foo": DataArray([1, 2], dims=["x"], coords={"x": [1, 2]})})
y = Dataset({"bar": DataArray([1, 2], dims=["x"], coords={"x": [1, 3]})})
x2, y2 = align(x, y, join="outer", fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_foo = fill_value_bar = np.nan
elif isinstance(fill_value, dict):
fill_value_foo = fill_value["foo"]
fill_value_bar = fill_value["bar"]
else:
fill_value_foo = fill_value_bar = fill_value
expected_x2 = Dataset(
{
"foo": DataArray(
[1, 2, fill_value_foo], dims=["x"], coords={"x": [1, 2, 3]}
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[1, fill_value_bar, 2], dims=["x"], coords={"x": [1, 2, 3]}
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right["dim3"] = ("dim3", list("cdefghijkl"))
right["var3"][:-2] = right["var3"][2:].values
right["var3"][-2:] = np.random.randn(*right["var3"][-2:].shape)
right["numbers"][:-2] = right["numbers"][2:].values
right["numbers"][-2:] = -10
intersection = list("cdefghij")
union = list("abcdefghijkl")
left2, right2 = align(left, right, join="inner")
assert_array_equal(left2["dim3"], intersection)
assert_identical(left2, right2)
left2, right2 = align(left, right, join="outer")
assert_array_equal(left2["dim3"], union)
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(left2["var3"][-2:]).all()
assert np.isnan(right2["var3"][:2]).all()
left2, right2 = align(left, right, join="left")
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_equal(left2["dim3"].variable, left["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(right2["var3"][:2]).all()
left2, right2 = align(left, right, join="right")
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_equal(left2["dim3"].variable, right["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(left2["var3"][-2:]).all()
with pytest.raises(ValueError, match=r"invalid value for join"):
align(left, right, join="foobar")
with pytest.raises(TypeError):
align(left, right, foo="bar")
def test_align_exact(self):
left = xr.Dataset(coords={"x": [0, 1]})
right = xr.Dataset(coords={"x": [1, 2]})
left1, left2 = xr.align(left, left, join="exact")
assert_identical(left1, left)
assert_identical(left2, left)
with pytest.raises(ValueError, match=r"indexes .* not equal"):
xr.align(left, right, join="exact")
def test_align_override(self):
left = xr.Dataset(coords={"x": [0, 1, 2]})
right = xr.Dataset(coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]})
expected_right = xr.Dataset(coords={"x": [0, 1, 2], "y": [1, 2, 3]})
new_left, new_right = xr.align(left, right, join="override")
assert_identical(left, new_left)
assert_identical(new_right, expected_right)
new_left, new_right = xr.align(left, right, exclude="x", join="override")
assert_identical(left, new_left)
assert_identical(right, new_right)
new_left, new_right = xr.align(
left.isel(x=0, drop=True), right, exclude="x", join="override"
)
assert_identical(left.isel(x=0, drop=True), new_left)
assert_identical(right, new_right)
with pytest.raises(ValueError, match=r"Indexes along dimension 'x' don't have"):
xr.align(left.isel(x=0).expand_dims("x"), right, join="override")
def test_align_exclude(self):
x = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]}
)
}
)
y = Dataset(
{
"bar": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 3], "y": [5, 6]}
)
}
)
x2, y2 = align(x, y, exclude=["y"], join="outer")
expected_x2 = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4], [np.nan, np.nan]],
dims=["x", "y"],
coords={"x": [1, 2, 3], "y": [3, 4]},
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[[1, 2], [np.nan, np.nan], [3, 4]],
dims=["x", "y"],
coords={"x": [1, 2, 3], "y": [5, 6]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align_nocopy(self):
x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [1, 2, 3])])})
y = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
expected_x2 = x
expected_y2 = Dataset(
{"foo": DataArray([1, 2, np.nan], coords=[("x", [1, 2, 3])])}
)
x2, y2 = align(x, y, copy=False, join="outer")
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert source_ndarray(x["foo"].data) is source_ndarray(x2["foo"].data)
x2, y2 = align(x, y, copy=True, join="outer")
assert source_ndarray(x["foo"].data) is not source_ndarray(x2["foo"].data)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align_indexes(self):
x = Dataset({"foo": DataArray([1, 2, 3], dims="x", coords=[("x", [1, 2, 3])])})
(x2,) = align(x, indexes={"x": [2, 3, 1]})
expected_x2 = Dataset(
{"foo": DataArray([2, 3, 1], dims="x", coords={"x": [2, 3, 1]})}
)
assert_identical(expected_x2, x2)
def test_align_non_unique(self):
x = Dataset({"foo": ("x", [3, 4, 5]), "x": [0, 0, 1]})
x1, x2 = align(x, x)
assert_identical(x1, x)
assert_identical(x2, x)
y = Dataset({"bar": ("x", [6, 7]), "x": [0, 1]})
with pytest.raises(ValueError, match=r"cannot reindex or align"):
align(x, y)
def test_align_str_dtype(self):
a = Dataset({"foo": ("x", [0, 1]), "x": ["a", "b"]})
b = Dataset({"foo": ("x", [1, 2]), "x": ["b", "c"]})
expected_a = Dataset({"foo": ("x", [0, 1, np.NaN]), "x": ["a", "b", "c"]})
expected_b = Dataset({"foo": ("x", [np.NaN, 1, 2]), "x": ["a", "b", "c"]})
actual_a, actual_b = xr.align(a, b, join="outer")
assert_identical(expected_a, actual_a)
assert expected_a.x.dtype == actual_a.x.dtype
assert_identical(expected_b, actual_b)
assert expected_b.x.dtype == actual_b.x.dtype
def test_broadcast(self):
ds = Dataset(
{"foo": 0, "bar": ("x", [1]), "baz": ("y", [2, 3])}, {"c": ("x", [4])}
)
expected = Dataset(
{
"foo": (("x", "y"), [[0, 0]]),
"bar": (("x", "y"), [[1, 1]]),
"baz": (("x", "y"), [[2, 3]]),
},
{"c": ("x", [4])},
)
(actual,) = broadcast(ds)
assert_identical(expected, actual)
ds_x = Dataset({"foo": ("x", [1])})
ds_y = Dataset({"bar": ("y", [2, 3])})
expected_x = Dataset({"foo": (("x", "y"), [[1, 1]])})
expected_y = Dataset({"bar": (("x", "y"), [[2, 3]])})
actual_x, actual_y = broadcast(ds_x, ds_y)
assert_identical(expected_x, actual_x)
assert_identical(expected_y, actual_y)
array_y = ds_y["bar"]
expected_y = expected_y["bar"]
actual_x, actual_y = broadcast(ds_x, array_y)
assert_identical(expected_x, actual_x)
assert_identical(expected_y, actual_y)
def test_broadcast_nocopy(self):
# Test that data is not copied if not needed
x = Dataset({"foo": (("x", "y"), [[1, 1]])})
y = Dataset({"bar": ("y", [2, 3])})
(actual_x,) = broadcast(x)
assert_identical(x, actual_x)
assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
actual_x, actual_y = broadcast(x, y)
assert_identical(x, actual_x)
assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
def test_broadcast_exclude(self):
x = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]}
),
"bar": DataArray(5),
}
)
y = Dataset(
{
"foo": DataArray(
[[1, 2]], dims=["z", "y"], coords={"z": [1], "y": [5, 6]}
)
}
)
x2, y2 = broadcast(x, y, exclude=["y"])
expected_x2 = Dataset(
{
"foo": DataArray(
[[[1, 2]], [[3, 4]]],
dims=["x", "z", "y"],
coords={"z": [1], "x": [1, 2], "y": [3, 4]},
),
"bar": DataArray(
[[5], [5]], dims=["x", "z"], coords={"x": [1, 2], "z": [1]}
),
}
)
expected_y2 = Dataset(
{
"foo": DataArray(
[[[1, 2]], [[1, 2]]],
dims=["x", "z", "y"],
coords={"z": [1], "x": [1, 2], "y": [5, 6]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_broadcast_misaligned(self):
x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [-1, -2, -3])])})
y = Dataset(
{
"bar": DataArray(
[[1, 2], [3, 4]],
dims=["y", "x"],
coords={"y": [1, 2], "x": [10, -3]},
)
}
)
x2, y2 = broadcast(x, y)
expected_x2 = Dataset(
{
"foo": DataArray(
[[3, 3], [2, 2], [1, 1], [np.nan, np.nan]],
dims=["x", "y"],
coords={"y": [1, 2], "x": [-3, -2, -1, 10]},
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]],
dims=["x", "y"],
coords={"y": [1, 2], "x": [-3, -2, -1, 10]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_variable_indexing(self):
data = create_test_data()
v = data["var1"]
d1 = data["dim1"]
d2 = data["dim2"]
assert_equal(v, v[d1.values])
assert_equal(v, v[d1])
assert_equal(v[:3], v[d1 < 3])
assert_equal(v[:, 3:], v[:, d2 >= 1.5])
assert_equal(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
assert_equal(v[:3, :2], v[range(3), range(2)])
assert_equal(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
assert_identical(data, data.drop_vars([]))
expected = Dataset({k: data[k] for k in data.variables if k != "time"})
actual = data.drop_vars("time")
assert_identical(expected, actual)
actual = data.drop_vars(["time"])
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"cannot be found"):
data.drop_vars("not_found_here")
actual = data.drop_vars("not_found_here", errors="ignore")
assert_identical(data, actual)
actual = data.drop_vars(["not_found_here"], errors="ignore")
assert_identical(data, actual)
actual = data.drop_vars(["time", "not_found_here"], errors="ignore")
assert_identical(expected, actual)
# deprecated approach with `drop` works (straight copy paste from above)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop("not_found_here", errors="ignore")
assert_identical(data, actual)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop(["not_found_here"], errors="ignore")
assert_identical(data, actual)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop(["time", "not_found_here"], errors="ignore")
assert_identical(expected, actual)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop({"time", "not_found_here"}, errors="ignore")
assert_identical(expected, actual)
def test_drop_index_labels(self):
data = Dataset({"A": (["x", "y"], np.random.randn(2, 3)), "x": ["a", "b"]})
with pytest.warns(DeprecationWarning):
actual = data.drop(["a"], dim="x")
expected = data.isel(x=[1])
assert_identical(expected, actual)
with pytest.warns(DeprecationWarning):
actual = data.drop(["a", "b"], dim="x")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
with pytest.raises(KeyError):
# not contained in axis
with pytest.warns(DeprecationWarning):
data.drop(["c"], dim="x")
with pytest.warns(DeprecationWarning):
actual = data.drop(["c"], dim="x", errors="ignore")
assert_identical(data, actual)
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
data.drop(["c"], dim="x", errors="wrong_value")
with pytest.warns(DeprecationWarning):
actual = data.drop(["a", "b", "c"], "x", errors="ignore")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
# DataArrays as labels are a nasty corner case as they are not
# Iterable[Hashable] - DataArray.__iter__ yields scalar DataArrays.
actual = data.drop_sel(x=DataArray(["a", "b", "c"]), errors="ignore")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
with pytest.warns(DeprecationWarning):
data.drop(DataArray(["a", "b", "c"]), dim="x", errors="ignore")
assert_identical(expected, actual)
actual = data.drop_sel(y=[1])
expected = data.isel(y=[0, 2])
assert_identical(expected, actual)
with pytest.raises(KeyError, match=r"not found in axis"):
data.drop_sel(x=0)
def test_drop_labels_by_keyword(self):
data = Dataset(
{"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)}
)
# Basic functionality.
assert len(data.coords["x"]) == 2
with pytest.warns(DeprecationWarning):
ds1 = data.drop(["a"], dim="x")
ds2 = data.drop_sel(x="a")
ds3 = data.drop_sel(x=["a"])
ds4 = data.drop_sel(x=["a", "b"])
ds5 = data.drop_sel(x=["a", "b"], y=range(0, 6, 2))
arr = DataArray(range(3), dims=["c"])
with pytest.warns(FutureWarning):
data.drop(arr.coords)
with pytest.warns(FutureWarning):
data.drop(arr.xindexes)
assert_array_equal(ds1.coords["x"], ["b"])
assert_array_equal(ds2.coords["x"], ["b"])
assert_array_equal(ds3.coords["x"], ["b"])
assert ds4.coords["x"].size == 0
assert ds5.coords["x"].size == 0
assert_array_equal(ds5.coords["y"], [1, 3, 5])
# Error handling if user tries both approaches.
with pytest.raises(ValueError):
data.drop(labels=["a"], x="a")
with pytest.raises(ValueError):
data.drop(labels=["a"], dim="x", x="a")
warnings.filterwarnings("ignore", r"\W*drop")
with pytest.raises(ValueError):
data.drop(dim="x", x="a")
def test_drop_labels_by_position(self):
data = Dataset(
{"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)}
)
# Basic functionality.
assert len(data.coords["x"]) == 2
actual = data.drop_isel(x=0)
expected = data.drop_sel(x="a")
assert_identical(expected, actual)
actual = data.drop_isel(x=[0])
expected = data.drop_sel(x=["a"])
assert_identical(expected, actual)
actual = data.drop_isel(x=[0, 1])
expected = data.drop_sel(x=["a", "b"])
assert_identical(expected, actual)
assert actual.coords["x"].size == 0
actual = data.drop_isel(x=[0, 1], y=range(0, 6, 2))
expected = data.drop_sel(x=["a", "b"], y=range(0, 6, 2))
assert_identical(expected, actual)
assert actual.coords["x"].size == 0
with pytest.raises(KeyError):
data.drop_isel(z=1)
def test_drop_dims(self):
data = xr.Dataset(
{
"A": (["x", "y"], np.random.randn(2, 3)),
"B": ("x", np.random.randn(2)),
"x": ["a", "b"],
"z": np.pi,
}
)
actual = data.drop_dims("x")
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
actual = data.drop_dims("y")
expected = data.drop_vars("A")
assert_identical(expected, actual)
actual = data.drop_dims(["x", "y"])
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
with pytest.raises((ValueError, KeyError)):
data.drop_dims("z") # not a dimension
with pytest.raises((ValueError, KeyError)):
data.drop_dims(None)
actual = data.drop_dims("z", errors="ignore")
assert_identical(data, actual)
actual = data.drop_dims(None, errors="ignore")
assert_identical(data, actual)
with pytest.raises(ValueError):
actual = data.drop_dims("z", errors="wrong_value")
actual = data.drop_dims(["x", "y", "z"], errors="ignore")
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
def test_copy(self):
data = create_test_data()
data.attrs["Test"] = [1, 2, 3]
for copied in [data.copy(deep=False), copy(data)]:
assert_identical(data, copied)
assert data.encoding == copied.encoding
# Note: IndexVariable objects with string dtype are always
# copied because of xarray.core.util.safe_cast_to_index.
# Limiting the test to data variables.
for k in data.data_vars:
v0 = data.variables[k]
v1 = copied.variables[k]
assert source_ndarray(v0.data) is source_ndarray(v1.data)
copied["foo"] = ("z", np.arange(5))
assert "foo" not in data
copied.attrs["foo"] = "bar"
assert "foo" not in data.attrs
assert data.attrs["Test"] is copied.attrs["Test"]
for copied in [data.copy(deep=True), deepcopy(data)]:
assert_identical(data, copied)
for k, v0 in data.variables.items():
v1 = copied.variables[k]
assert v0 is not v1
assert data.attrs["Test"] is not copied.attrs["Test"]
def test_copy_with_data(self):
orig = create_test_data()
new_data = {k: np.random.randn(*v.shape) for k, v in orig.data_vars.items()}
actual = orig.copy(data=new_data)
expected = orig.copy()
for k, v in new_data.items():
expected[k].data = v
assert_identical(expected, actual)
@pytest.mark.xfail(raises=AssertionError)
@pytest.mark.parametrize(
"deep, expected_orig",
[
[
True,
xr.DataArray(
xr.IndexVariable("a", np.array([1, 2])),
coords={"a": [1, 2]},
dims=["a"],
),
],
[
False,
xr.DataArray(
xr.IndexVariable("a", np.array([999, 2])),
coords={"a": [999, 2]},
dims=["a"],
),
],
],
)
def test_copy_coords(self, deep, expected_orig):
"""The test fails for the shallow copy, and apparently only on Windows
for some reason. In windows coords seem to be immutable unless it's one
dataset deep copied from another."""
ds = xr.DataArray(
np.ones([2, 2, 2]),
coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]},
dims=["a", "b", "c"],
name="value",
).to_dataset()
ds_cp = ds.copy(deep=deep)
ds_cp.coords["a"].data[0] = 999
expected_cp = xr.DataArray(
xr.IndexVariable("a", np.array([999, 2])),
coords={"a": [999, 2]},
dims=["a"],
)
assert_identical(ds_cp.coords["a"], expected_cp)
assert_identical(ds.coords["a"], expected_orig)
def test_copy_with_data_errors(self):
orig = create_test_data()
new_var1 = np.arange(orig["var1"].size).reshape(orig["var1"].shape)
with pytest.raises(ValueError, match=r"Data must be dict-like"):
orig.copy(data=new_var1)
with pytest.raises(ValueError, match=r"only contain variables in original"):
orig.copy(data={"not_in_original": new_var1})
with pytest.raises(ValueError, match=r"contain all variables in original"):
orig.copy(data={"var1": new_var1})
def test_rename(self):
data = create_test_data()
newnames = {"var1": "renamed_var1", "dim2": "renamed_dim2"}
renamed = data.rename(newnames)
variables = dict(data.variables)
for k, v in newnames.items():
variables[v] = variables.pop(k)
for k, v in variables.items():
dims = list(v.dims)
for name, newname in newnames.items():
if name in dims:
dims[dims.index(name)] = newname
assert_equal(
Variable(dims, v.values, v.attrs),
renamed[k].variable.to_base_variable(),
)
assert v.encoding == renamed[k].encoding
assert type(v) is type(renamed.variables[k]) # noqa: E721
assert "var1" not in renamed
assert "dim2" not in renamed
with pytest.raises(ValueError, match=r"cannot rename 'not_a_var'"):
data.rename({"not_a_var": "nada"})
with pytest.raises(ValueError, match=r"'var1' conflicts"):
data.rename({"var2": "var1"})
# verify that we can rename a variable without accessing the data
var1 = data["var1"]
data["var1"] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with pytest.raises(UnexpectedDataAccess):
renamed["renamed_var1"].values
renamed_kwargs = data.rename(**newnames)
assert_identical(renamed, renamed_kwargs)
def test_rename_old_name(self):
# regtest for GH1477
data = create_test_data()
with pytest.raises(ValueError, match=r"'samecol' conflicts"):
data.rename({"var1": "samecol", "var2": "samecol"})
# This shouldn't cause any problems.
data.rename({"var1": "var2", "var2": "var1"})
def test_rename_same_name(self):
data = create_test_data()
newnames = {"var1": "var1", "dim2": "dim2"}
renamed = data.rename(newnames)
assert_identical(renamed, data)
def test_rename_dims(self):
original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42})
expected = Dataset(
{"x": ("x_new", [0, 1, 2]), "y": ("x_new", [10, 11, 12]), "z": 42}
)
expected = expected.set_coords("x")
dims_dict = {"x": "x_new"}
actual = original.rename_dims(dims_dict)
assert_identical(expected, actual)
actual_2 = original.rename_dims(**dims_dict)
assert_identical(expected, actual_2)
# Test to raise ValueError
dims_dict_bad = {"x_bad": "x_new"}
with pytest.raises(ValueError):
original.rename_dims(dims_dict_bad)
with pytest.raises(ValueError):
original.rename_dims({"x": "z"})
def test_rename_vars(self):
original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42})
expected = Dataset(
{"x_new": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42}
)
expected = expected.set_coords("x_new")
name_dict = {"x": "x_new"}
actual = original.rename_vars(name_dict)
assert_identical(expected, actual)
actual_2 = original.rename_vars(**name_dict)
assert_identical(expected, actual_2)
# Test to raise ValueError
names_dict_bad = {"x_bad": "x_new"}
with pytest.raises(ValueError):
original.rename_vars(names_dict_bad)
def test_rename_multiindex(self):
mindex = pd.MultiIndex.from_tuples(
[([1, 2]), ([3, 4])], names=["level0", "level1"]
)
data = Dataset({}, {"x": mindex})
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data.rename({"x": "level0"})
@requires_cftime
def test_rename_does_not_change_CFTimeIndex_type(self):
# make sure CFTimeIndex is not converted to DatetimeIndex #3522
time = xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
orig = Dataset(coords={"time": time})
renamed = orig.rename(time="time_new")
assert "time_new" in renamed.xindexes
# TODO: benbovy - flexible indexes: update when CFTimeIndex
# inherits from xarray.Index
assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), CFTimeIndex)
assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new"
# check original has not changed
assert "time" in orig.xindexes
assert isinstance(orig.xindexes["time"].to_pandas_index(), CFTimeIndex)
assert orig.xindexes["time"].to_pandas_index().name == "time"
# note: rename_dims(time="time_new") drops "ds.indexes"
renamed = orig.rename_dims()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex)
renamed = orig.rename_vars()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex)
def test_rename_does_not_change_DatetimeIndex_type(self):
# make sure DatetimeIndex is conderved on rename
time = pd.date_range(start="2000", periods=6, freq="2MS")
orig = Dataset(coords={"time": time})
renamed = orig.rename(time="time_new")
assert "time_new" in renamed.xindexes
# TODO: benbovy - flexible indexes: update when DatetimeIndex
# inherits from xarray.Index?
assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), DatetimeIndex)
assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new"
# check original has not changed
assert "time" in orig.xindexes
assert isinstance(orig.xindexes["time"].to_pandas_index(), DatetimeIndex)
assert orig.xindexes["time"].to_pandas_index().name == "time"
# note: rename_dims(time="time_new") drops "ds.indexes"
renamed = orig.rename_dims()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex)
renamed = orig.rename_vars()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex)
def test_swap_dims(self):
original = Dataset({"x": [1, 2, 3], "y": ("x", list("abc")), "z": 42})
expected = Dataset({"z": 42}, {"x": ("y", [1, 2, 3]), "y": list("abc")})
actual = original.swap_dims({"x": "y"})
assert_identical(expected, actual)
assert isinstance(actual.variables["y"], IndexVariable)
assert isinstance(actual.variables["x"], Variable)
pd.testing.assert_index_equal(
actual.xindexes["y"].to_pandas_index(),
expected.xindexes["y"].to_pandas_index(),
)
roundtripped = actual.swap_dims({"y": "x"})
assert_identical(original.set_coords("y"), roundtripped)
with pytest.raises(ValueError, match=r"cannot swap"):
original.swap_dims({"y": "x"})
with pytest.raises(ValueError, match=r"replacement dimension"):
original.swap_dims({"x": "z"})
expected = Dataset(
{"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])}
)
actual = original.swap_dims({"x": "u"})
assert_identical(expected, actual)
# as kwargs
expected = Dataset(
{"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])}
)
actual = original.swap_dims(x="u")
assert_identical(expected, actual)
# handle multiindex case
idx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"])
original = Dataset({"x": [1, 2, 3], "y": ("x", idx), "z": 42})
expected = Dataset({"z": 42}, {"x": ("y", [1, 2, 3]), "y": idx})
actual = original.swap_dims({"x": "y"})
assert_identical(expected, actual)
assert isinstance(actual.variables["y"], IndexVariable)
assert isinstance(actual.variables["x"], Variable)
pd.testing.assert_index_equal(
actual.xindexes["y"].to_pandas_index(),
expected.xindexes["y"].to_pandas_index(),
)
def test_expand_dims_error(self):
original = Dataset(
{
"x": ("a", np.random.randn(3)),
"y": (["b", "a"], np.random.randn(4, 3)),
"z": ("a", np.random.randn(3)),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
with pytest.raises(ValueError, match=r"already exists"):
original.expand_dims(dim=["x"])
# Make sure it raises true error also for non-dimensional coordinates
# which has dimension.
original = original.set_coords("z")
with pytest.raises(ValueError, match=r"already exists"):
original.expand_dims(dim=["z"])
original = Dataset(
{
"x": ("a", np.random.randn(3)),
"y": (["b", "a"], np.random.randn(4, 3)),
"z": ("a", np.random.randn(3)),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
with pytest.raises(TypeError, match=r"value of new dimension"):
original.expand_dims({"d": 3.2})
with pytest.raises(ValueError, match=r"both keyword and positional"):
original.expand_dims({"d": 4}, e=4)
def test_expand_dims_int(self):
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
actual = original.expand_dims(["z"], [1])
expected = Dataset(
{
"x": original["x"].expand_dims("z", 1),
"y": original["y"].expand_dims("z", 1),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
# make sure squeeze restores the original data set.
roundtripped = actual.squeeze("z")
assert_identical(original, roundtripped)
# another test with a negative axis
actual = original.expand_dims(["z"], [-1])
expected = Dataset(
{
"x": original["x"].expand_dims("z", -1),
"y": original["y"].expand_dims("z", -1),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
# make sure squeeze restores the original data set.
roundtripped = actual.squeeze("z")
assert_identical(original, roundtripped)
def test_expand_dims_coords(self):
original = Dataset({"x": ("a", np.array([1, 2, 3]))})
expected = Dataset(
{"x": (("b", "a"), np.array([[1, 2, 3], [1, 2, 3]]))}, coords={"b": [1, 2]}
)
actual = original.expand_dims(dict(b=[1, 2]))
assert_identical(expected, actual)
assert "b" not in original._coord_names
def test_expand_dims_existing_scalar_coord(self):
original = Dataset({"x": 1}, {"a": 2})
expected = Dataset({"x": (("a",), [1])}, {"a": [2]})
actual = original.expand_dims("a")
assert_identical(expected, actual)
def test_isel_expand_dims_roundtrip(self):
original = Dataset({"x": (("a",), [1])}, {"a": [2]})
actual = original.isel(a=0).expand_dims("a")
assert_identical(actual, original)
def test_expand_dims_mixed_int_and_coords(self):
# Test expanding one dimension to have size > 1 that doesn't have
# coordinates, and also expanding another dimension to have size > 1
# that DOES have coordinates.
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
)
actual = original.expand_dims({"d": 4, "e": ["l", "m", "n"]})
expected = Dataset(
{
"x": xr.DataArray(
original["x"].values * np.ones([4, 3, 3]),
coords=dict(d=range(4), e=["l", "m", "n"], a=np.linspace(0, 1, 3)),
dims=["d", "e", "a"],
).drop_vars("d"),
"y": xr.DataArray(
original["y"].values * np.ones([4, 3, 4, 3]),
coords=dict(
d=range(4),
e=["l", "m", "n"],
b=np.linspace(0, 1, 4),
a=np.linspace(0, 1, 3),
),
dims=["d", "e", "b", "a"],
).drop_vars("d"),
},
coords={"c": np.linspace(0, 1, 5)},
)
assert_identical(actual, expected)
def test_expand_dims_kwargs_python36plus(self):
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
other_way = original.expand_dims(e=["l", "m", "n"])
other_way_expected = Dataset(
{
"x": xr.DataArray(
original["x"].values * np.ones([3, 3]),
coords=dict(e=["l", "m", "n"], a=np.linspace(0, 1, 3)),
dims=["e", "a"],
),
"y": xr.DataArray(
original["y"].values * np.ones([3, 4, 3]),
coords=dict(
e=["l", "m", "n"],
b=np.linspace(0, 1, 4),
a=np.linspace(0, 1, 3),
),
dims=["e", "b", "a"],
),
},
coords={"c": np.linspace(0, 1, 5)},
attrs={"key": "entry"},
)
assert_identical(other_way_expected, other_way)
def test_set_index(self):
expected = create_test_multiindex()
mindex = expected["x"].to_index()
indexes = [mindex.get_level_values(n) for n in mindex.names]
coords = {idx.name: ("x", idx) for idx in indexes}
ds = Dataset({}, coords=coords)
obj = ds.set_index(x=mindex.names)
assert_identical(obj, expected)
# ensure set_index with no existing index and a single data var given
# doesn't return multi-index
ds = Dataset(data_vars={"x_var": ("x", [0, 1, 2])})
expected = Dataset(coords={"x": [0, 1, 2]})
assert_identical(ds.set_index(x="x_var"), expected)
# Issue 3176: Ensure clear error message on key error.
with pytest.raises(ValueError) as excinfo:
ds.set_index(foo="bar")
assert str(excinfo.value) == "bar is not the name of an existing variable."
def test_reset_index(self):
ds = create_test_multiindex()
mindex = ds["x"].to_index()
indexes = [mindex.get_level_values(n) for n in mindex.names]
coords = {idx.name: ("x", idx) for idx in indexes}
expected = Dataset({}, coords=coords)
obj = ds.reset_index("x")
assert_identical(obj, expected)
def test_reset_index_keep_attrs(self):
coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True})
ds = Dataset({}, {"coord_1": coord_1})
expected = Dataset({}, {"coord_1_": coord_1})
obj = ds.reset_index("coord_1")
assert_identical(expected, obj)
def test_reorder_levels(self):
ds = create_test_multiindex()
mindex = ds["x"].to_index()
midx = mindex.reorder_levels(["level_2", "level_1"])
expected = Dataset({}, coords={"x": midx})
reindexed = ds.reorder_levels(x=["level_2", "level_1"])
assert_identical(reindexed, expected)
ds = Dataset({}, coords={"x": [1, 2]})
with pytest.raises(ValueError, match=r"has no MultiIndex"):
ds.reorder_levels(x=["level_1", "level_2"])
def test_stack(self):
ds = Dataset(
{"a": ("x", [0, 1]), "b": (("x", "y"), [[0, 1], [2, 3]]), "y": ["a", "b"]}
)
exp_index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["x", "y"])
expected = Dataset(
{"a": ("z", [0, 0, 1, 1]), "b": ("z", [0, 1, 2, 3]), "z": exp_index}
)
actual = ds.stack(z=["x", "y"])
assert_identical(expected, actual)
actual = ds.stack(z=[...])
assert_identical(expected, actual)
# non list dims with ellipsis
actual = ds.stack(z=(...,))
assert_identical(expected, actual)
# ellipsis with given dim
actual = ds.stack(z=[..., "y"])
assert_identical(expected, actual)
exp_index = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["y", "x"])
expected = Dataset(
{"a": ("z", [0, 1, 0, 1]), "b": ("z", [0, 2, 1, 3]), "z": exp_index}
)
actual = ds.stack(z=["y", "x"])
assert_identical(expected, actual)
def test_unstack(self):
index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["x", "y"])
ds = Dataset({"b": ("z", [0, 1, 2, 3]), "z": index})
expected = Dataset(
{"b": (("x", "y"), [[0, 1], [2, 3]]), "x": [0, 1], "y": ["a", "b"]}
)
for dim in ["z", ["z"], None]:
actual = ds.unstack(dim)
assert_identical(actual, expected)
def test_unstack_errors(self):
ds = Dataset({"x": [1, 2, 3]})
with pytest.raises(ValueError, match=r"does not contain the dimensions"):
ds.unstack("foo")
with pytest.raises(ValueError, match=r"do not have a MultiIndex"):
ds.unstack("x")
def test_unstack_fill_value(self):
ds = xr.Dataset(
{"var": (("x",), np.arange(6)), "other_var": (("x",), np.arange(3, 9))},
coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)},
)
# make ds incomplete
ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"])
# test fill_value
actual = ds.unstack("index", fill_value=-1)
expected = ds.unstack("index").fillna(-1).astype(int)
assert actual["var"].dtype == int
assert_equal(actual, expected)
actual = ds["var"].unstack("index", fill_value=-1)
expected = ds["var"].unstack("index").fillna(-1).astype(int)
assert_equal(actual, expected)
actual = ds.unstack("index", fill_value={"var": -1, "other_var": 1})
expected = ds.unstack("index").fillna({"var": -1, "other_var": 1}).astype(int)
assert_equal(actual, expected)
@requires_sparse
def test_unstack_sparse(self):
ds = xr.Dataset(
{"var": (("x",), np.arange(6))},
coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)},
)
# make ds incomplete
ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"])
# test fill_value
actual = ds.unstack("index", sparse=True)
expected = ds.unstack("index")
assert actual["var"].variable._to_dense().equals(expected["var"].variable)
assert actual["var"].data.density < 1.0
actual = ds["var"].unstack("index", sparse=True)
expected = ds["var"].unstack("index")
assert actual.variable._to_dense().equals(expected.variable)
assert actual.data.density < 1.0
def test_stack_unstack_fast(self):
ds = Dataset(
{
"a": ("x", [0, 1]),
"b": (("x", "y"), [[0, 1], [2, 3]]),
"x": [0, 1],
"y": ["a", "b"],
}
)
actual = ds.stack(z=["x", "y"]).unstack("z")
assert actual.broadcast_equals(ds)
actual = ds[["b"]].stack(z=["x", "y"]).unstack("z")
assert actual.identical(ds[["b"]])
def test_stack_unstack_slow(self):
ds = Dataset(
{
"a": ("x", [0, 1]),
"b": (("x", "y"), [[0, 1], [2, 3]]),
"x": [0, 1],
"y": ["a", "b"],
}
)
stacked = ds.stack(z=["x", "y"])
actual = stacked.isel(z=slice(None, None, -1)).unstack("z")
assert actual.broadcast_equals(ds)
stacked = ds[["b"]].stack(z=["x", "y"])
actual = stacked.isel(z=slice(None, None, -1)).unstack("z")
assert actual.identical(ds[["b"]])
def test_to_stacked_array_invalid_sample_dims(self):
data = xr.Dataset(
data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])},
coords={"y": ["u", "v", "w"]},
)
with pytest.raises(ValueError):
data.to_stacked_array("features", sample_dims=["y"])
def test_to_stacked_array_name(self):
name = "adf9d"
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims, name=name)
assert y.name == name
def test_to_stacked_array_dtype_dims(self):
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims)
# TODO: benbovy - flexible indexes: update when MultiIndex has its own class
# inherited from xarray.Index
assert y.xindexes["features"].to_pandas_index().levels[1].dtype == D.y.dtype
assert y.dims == ("x", "features")
def test_to_stacked_array_to_unstacked_dataset(self):
# single dimension: regression test for GH4049
arr = xr.DataArray(np.arange(3), coords=[("x", [0, 1, 2])])
data = xr.Dataset({"a": arr, "b": arr})
stacked = data.to_stacked_array("y", sample_dims=["x"])
unstacked = stacked.to_unstacked_dataset("y")
assert_identical(unstacked, data)
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims).transpose("x", "features")
x = y.to_unstacked_dataset("features")
assert_identical(D, x)
# test on just one sample
x0 = y[0].to_unstacked_dataset("features")
d0 = D.isel(x=0)
assert_identical(d0, x0)
def test_to_stacked_array_to_unstacked_dataset_different_dimension(self):
# test when variables have different dimensionality
a, b = create_test_stacked_array()
sample_dims = ["x"]
D = xr.Dataset({"a": a, "b": b.isel(y=0)})
y = D.to_stacked_array("features", sample_dims)
x = y.to_unstacked_dataset("features")
assert_identical(D, x)
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable("dim1", np.arange(8))
actual = data.update({"var2": var2})
expected["var2"] = var2
assert_identical(expected, actual)
actual = data.copy()
actual_result = actual.update(data)
assert actual_result is actual
assert_identical(expected, actual)
other = Dataset(attrs={"new": "attr"})
actual = data.copy()
actual.update(other)
assert_identical(expected, actual)
def test_update_overwrite_coords(self):
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update(Dataset(coords={"b": 4}))
expected = Dataset({"a": ("x", [1, 2])}, {"b": 4})
assert_identical(data, expected)
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update(Dataset({"c": 5}, coords={"b": 4}))
expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 4})
assert_identical(data, expected)
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update({"c": DataArray(5, coords={"b": 4})})
expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 3})
assert_identical(data, expected)
def test_update_auto_align(self):
ds = Dataset({"x": ("t", [3, 4])}, {"t": [0, 1]})
expected = Dataset({"x": ("t", [3, 4]), "y": ("t", [np.nan, 5])}, {"t": [0, 1]})
actual = ds.copy()
other = {"y": ("t", [5]), "t": [1]}
with pytest.raises(ValueError, match=r"conflicting sizes"):
actual.update(other)
actual.update(Dataset(other))
assert_identical(expected, actual)
actual = ds.copy()
other = Dataset({"y": ("t", [5]), "t": [100]})
actual.update(other)
expected = Dataset(
{"x": ("t", [3, 4]), "y": ("t", [np.nan] * 2)}, {"t": [0, 1]}
)
assert_identical(expected, actual)
def test_getitem(self):
data = create_test_data()
assert isinstance(data["var1"], DataArray)
assert_equal(data["var1"].variable, data.variables["var1"])
with pytest.raises(KeyError):
data["notfound"]
with pytest.raises(KeyError):
data[["var1", "notfound"]]
actual = data[["var1", "var2"]]
expected = Dataset({"var1": data["var1"], "var2": data["var2"]})
assert_equal(expected, actual)
actual = data["numbers"]
expected = DataArray(
data["numbers"].variable,
{"dim3": data["dim3"], "numbers": data["numbers"]},
dims="dim3",
name="numbers",
)
assert_identical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
assert_identical(expected, actual)
def test_getitem_hashable(self):
data = create_test_data()
data[(3, 4)] = data["var1"] + 1
expected = data["var1"] + 1
expected.name = (3, 4)
assert_identical(expected, data[(3, 4)])
with pytest.raises(KeyError, match=r"('var1', 'var2')"):
data[("var1", "var2")]
def test_virtual_variables_default_coords(self):
dataset = Dataset({"foo": ("x", range(10))})
expected = DataArray(range(10), dims="x", name="x")
actual = dataset["x"]
assert_identical(expected, actual)
assert isinstance(actual.variable, IndexVariable)
actual = dataset[["x", "foo"]]
expected = dataset.assign_coords(x=range(10))
assert_identical(expected, actual)
def test_virtual_variables_time(self):
# access virtual variables
data = create_test_data()
expected = DataArray(
1 + np.arange(20), coords=[data["time"]], dims="time", name="dayofyear"
)
assert_array_equal(
data["time.month"].values, data.variables["time"].to_index().month
)
assert_array_equal(data["time.season"].values, "DJF")
# test virtual variable math
assert_array_equal(data["time.dayofyear"] + 1, 2 + np.arange(20))
assert_array_equal(np.sin(data["time.dayofyear"]), np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {"dayofyear": data["time.dayofyear"]})
actual = data[["time.dayofyear"]]
assert_equal(expected, actual)
# non-coordinate variables
ds = Dataset({"t": ("x", pd.date_range("2000-01-01", periods=3))})
assert (ds["t.year"] == 2000).all()
def test_virtual_variable_same_name(self):
# regression test for GH367
times = pd.date_range("2000-01-01", freq="H", periods=5)
data = Dataset({"time": times})
actual = data["time.time"]
expected = DataArray(times.time, [("time", times)], name="time")
assert_identical(actual, expected)
def test_virtual_variable_multiindex(self):
# access multi-index levels as virtual variables
data = create_test_multiindex()
expected = DataArray(
["a", "a", "b", "b"],
name="level_1",
coords=[data["x"].to_index()],
dims="x",
)
assert_identical(expected, data["level_1"])
# combine multi-index level and datetime
dr_index = pd.date_range("1/1/2011", periods=4, freq="H")
mindex = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], dr_index], names=("level_str", "level_date")
)
data = Dataset({}, {"x": mindex})
expected = DataArray(
mindex.get_level_values("level_date").hour,
name="hour",
coords=[mindex],
dims="x",
)
assert_identical(expected, data["level_date.hour"])
# attribute style access
assert_identical(data.level_str, data["level_str"])
def test_time_season(self):
ds = Dataset({"t": pd.date_range("2000-01-01", periods=12, freq="M")})
seas = ["DJF"] * 2 + ["MAM"] * 3 + ["JJA"] * 3 + ["SON"] * 3 + ["DJF"]
assert_array_equal(seas, ds["t.season"])
def test_slice_virtual_variable(self):
data = create_test_data()
assert_equal(
data["time.dayofyear"][:10].variable, Variable(["time"], 1 + np.arange(10))
)
assert_equal(data["time.dayofyear"][0].variable, Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(["dim1"], np.random.randn(8))
data1 = create_test_data()
data1["A"] = var
data2 = data1.copy()
data2["A"] = var
assert_identical(data1, data2)
# assign a dataset array
dv = 2 * data2["A"]
data1["B"] = dv.variable
data2["B"] = dv
assert_identical(data1, data2)
# can't assign an ND array without dimensions
with pytest.raises(ValueError, match=r"without explicit dimension names"):
data2["C"] = var.values.reshape(2, 4)
# but can assign a 1D array
data1["C"] = var.values
data2["C"] = ("C", var.values)
assert_identical(data1, data2)
# can assign a scalar
data1["scalar"] = 0
data2["scalar"] = ([], 0)
assert_identical(data1, data2)
# can't use the same dimension name as a scalar var
with pytest.raises(ValueError, match=r"already exists as a scalar"):
data1["newvar"] = ("scalar", [3, 4, 5])
# can't resize a used dimension
with pytest.raises(ValueError, match=r"arguments without labels"):
data1["dim1"] = data1["dim1"][:5]
# override an existing value
data1["A"] = 3 * data2["A"]
assert_equal(data1["A"], 3 * data2["A"])
# test assignment with positional and label-based indexing
data3 = data1[["var1", "var2"]]
data3["var3"] = data3.var1.isel(dim1=0)
data4 = data3.copy()
err_msg = (
"can only set locations defined by dictionaries from Dataset.loc. Got: a"
)
with pytest.raises(TypeError, match=err_msg):
data1.loc["a"] = 0
err_msg = r"Variables \['A', 'B', 'scalar'\] in new values not available in original dataset:"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": 1}] = data1[{"dim2": 2}]
err_msg = "Variable 'var3': indexer {'dim2': 0} not available"
with pytest.raises(ValueError, match=err_msg):
data1[{"dim2": 0}] = 0.0
err_msg = "Variable 'var1': indexer {'dim2': 10} not available"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": 10}] = data3[{"dim2": 2}]
err_msg = "Variable 'var1': dimension 'dim2' appears in new values"
with pytest.raises(KeyError, match=err_msg):
data4[{"dim2": 2}] = data3[{"dim2": [2]}]
err_msg = (
"Variable 'var2': dimension order differs between original and new data"
)
data3["var2"] = data3["var2"].T
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3]}]
data3["var2"] = data3["var2"].T
err_msg = "indexes along dimension 'dim2' are not equal"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3, 4]}]
err_msg = "Dataset assignment only accepts DataArrays, Datasets, and scalars."
with pytest.raises(TypeError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3["var1"][{"dim2": [3, 4]}].values
data5 = data4.astype(str)
data5["var4"] = data4["var1"]
err_msg = "could not convert string to float: 'a'"
with pytest.raises(ValueError, match=err_msg):
data5[{"dim2": 1}] = "a"
data4[{"dim2": 0}] = 0.0
data4[{"dim2": 1}] = data3[{"dim2": 2}]
data4.loc[{"dim2": 1.5}] = 1.0
data4.loc[{"dim2": 2.0}] = data3.loc[{"dim2": 2.5}]
for v, dat3 in data3.items():
dat4 = data4[v]
assert_array_equal(dat4[{"dim2": 0}], 0.0)
assert_array_equal(dat4[{"dim2": 1}], dat3[{"dim2": 2}])
assert_array_equal(dat4.loc[{"dim2": 1.5}], 1.0)
assert_array_equal(dat4.loc[{"dim2": 2.0}], dat3.loc[{"dim2": 2.5}])
unchanged = [1.0, 2.5, 3.0, 3.5, 4.0]
assert_identical(
dat4.loc[{"dim2": unchanged}], dat3.loc[{"dim2": unchanged}]
)
def test_setitem_pandas(self):
ds = self.make_example_math_dataset()
ds["x"] = np.arange(3)
ds_copy = ds.copy()
ds_copy["bar"] = ds["bar"].to_pandas()
assert_equal(ds, ds_copy)
def test_setitem_auto_align(self):
ds = Dataset()
ds["x"] = ("y", range(3))
ds["y"] = 1 + np.arange(3)
expected = Dataset({"x": ("y", range(3)), "y": 1 + np.arange(3)})
assert_identical(ds, expected)
ds["y"] = DataArray(range(3), dims="y")
expected = Dataset({"x": ("y", range(3))}, {"y": range(3)})
assert_identical(ds, expected)
ds["x"] = DataArray([1, 2], coords=[("y", [0, 1])])
expected = Dataset({"x": ("y", [1, 2, np.nan])}, {"y": range(3)})
assert_identical(ds, expected)
ds["x"] = 42
expected = Dataset({"x": 42, "y": range(3)})
assert_identical(ds, expected)
ds["x"] = DataArray([4, 5, 6, 7], coords=[("y", [0, 1, 2, 3])])
expected = Dataset({"x": ("y", [4, 5, 6])}, {"y": range(3)})
assert_identical(ds, expected)
def test_setitem_dimension_override(self):
# regression test for GH-3377
ds = xr.Dataset({"x": [0, 1, 2]})
ds["x"] = ds["x"][:2]
expected = Dataset({"x": [0, 1]})
assert_identical(ds, expected)
ds = xr.Dataset({"x": [0, 1, 2]})
ds["x"] = np.array([0, 1])
assert_identical(ds, expected)
ds = xr.Dataset({"x": [0, 1, 2]})
ds.coords["x"] = [0, 1]
assert_identical(ds, expected)
def test_setitem_with_coords(self):
# Regression test for GH:2068
ds = create_test_data()
other = DataArray(
np.arange(10), dims="dim3", coords={"numbers": ("dim3", np.arange(10))}
)
expected = ds.copy()
expected["var3"] = other.drop_vars("numbers")
actual = ds.copy()
actual["var3"] = other
assert_identical(expected, actual)
assert "numbers" in other.coords # should not change other
# with alignment
other = ds["var3"].isel(dim3=slice(1, -1))
other["numbers"] = ("dim3", np.arange(8))
actual = ds.copy()
actual["var3"] = other
assert "numbers" in other.coords # should not change other
expected = ds.copy()
expected["var3"] = ds["var3"].isel(dim3=slice(1, -1))
assert_identical(expected, actual)
# with non-duplicate coords
other = ds["var3"].isel(dim3=slice(1, -1))
other["numbers"] = ("dim3", np.arange(8))
other["position"] = ("dim3", np.arange(8))
actual = ds.copy()
actual["var3"] = other
assert "position" in actual
assert "position" in other.coords
# assigning a coordinate-only dataarray
actual = ds.copy()
other = actual["numbers"]
other[0] = 10
actual["numbers"] = other
assert actual["numbers"][0] == 10
# GH: 2099
ds = Dataset(
{"var": ("x", [1, 2, 3])},
coords={"x": [0, 1, 2], "z1": ("x", [1, 2, 3]), "z2": ("x", [1, 2, 3])},
)
ds["var"] = ds["var"] * 2
assert np.allclose(ds["var"], [2, 4, 6])
def test_setitem_align_new_indexes(self):
ds = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
ds["bar"] = DataArray([2, 3, 4], [("x", [1, 2, 3])])
expected = Dataset(
{"foo": ("x", [1, 2, 3]), "bar": ("x", [np.nan, 2, 3])}, {"x": [0, 1, 2]}
)
assert_identical(ds, expected)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_setitem_str_dtype(self, dtype):
ds = xr.Dataset(coords={"x": np.array(["x", "y"], dtype=dtype)})
ds["foo"] = xr.DataArray(np.array([0, 0]), dims=["x"])
assert np.issubdtype(ds.x.dtype, dtype)
def test_setitem_using_list(self):
# assign a list of variables
var1 = Variable(["dim1"], np.random.randn(8))
var2 = Variable(["dim1"], np.random.randn(8))
actual = create_test_data()
expected = actual.copy()
expected["A"] = var1
expected["B"] = var2
actual[["A", "B"]] = [var1, var2]
assert_identical(actual, expected)
# assign a list of dataset arrays
dv = 2 * expected[["A", "B"]]
actual[["C", "D"]] = [d.variable for d in dv.data_vars.values()]
expected[["C", "D"]] = dv
assert_identical(actual, expected)
@pytest.mark.parametrize(
"var_list, data, error_regex",
[
(
["A", "B"],
[Variable(["dim1"], np.random.randn(8))],
r"Different lengths",
),
([], [Variable(["dim1"], np.random.randn(8))], r"Empty list of variables"),
(["A", "B"], xr.DataArray([1, 2]), r"assign single DataArray"),
],
)
def test_setitem_using_list_errors(self, var_list, data, error_regex):
actual = create_test_data()
with pytest.raises(ValueError, match=error_regex):
actual[var_list] = data
def test_assign(self):
ds = Dataset()
actual = ds.assign(x=[0, 1, 2], y=2)
expected = Dataset({"x": [0, 1, 2], "y": 2})
assert_identical(actual, expected)
assert list(actual.variables) == ["x", "y"]
assert_identical(ds, Dataset())
actual = actual.assign(y=lambda ds: ds.x ** 2)
expected = Dataset({"y": ("x", [0, 1, 4]), "x": [0, 1, 2]})
assert_identical(actual, expected)
actual = actual.assign_coords(z=2)
expected = Dataset({"y": ("x", [0, 1, 4])}, {"z": 2, "x": [0, 1, 2]})
assert_identical(actual, expected)
ds = Dataset({"a": ("x", range(3))}, {"b": ("x", ["A"] * 2 + ["B"])})
actual = ds.groupby("b").assign(c=lambda ds: 2 * ds.a)
expected = ds.merge({"c": ("x", [0, 2, 4])})
assert_identical(actual, expected)
actual = ds.groupby("b").assign(c=lambda ds: ds.a.sum())
expected = ds.merge({"c": ("x", [1, 1, 2])})
assert_identical(actual, expected)
actual = ds.groupby("b").assign_coords(c=lambda ds: ds.a.sum())
expected = expected.set_coords("c")
assert_identical(actual, expected)
def test_assign_coords(self):
ds = Dataset()
actual = ds.assign(x=[0, 1, 2], y=2)
actual = actual.assign_coords(x=list("abc"))
expected = Dataset({"x": list("abc"), "y": 2})
assert_identical(actual, expected)
actual = ds.assign(x=[0, 1, 2], y=[2, 3])
actual = actual.assign_coords({"y": [2.0, 3.0]})
expected = ds.assign(x=[0, 1, 2], y=[2.0, 3.0])
assert_identical(actual, expected)
def test_assign_attrs(self):
expected = Dataset(attrs=dict(a=1, b=2))
new = Dataset()
actual = new.assign_attrs(a=1, b=2)
assert_identical(actual, expected)
assert new.attrs == {}
expected.attrs["c"] = 3
new_actual = actual.assign_attrs({"c": 3})
assert_identical(new_actual, expected)
assert actual.attrs == dict(a=1, b=2)
def test_assign_multiindex_level(self):
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data.assign(level_1=range(4))
data.assign_coords(level_1=range(4))
# raise an Error when any level name is used as dimension GH:2299
with pytest.raises(ValueError):
data["y"] = ("level_1", [0, 1])
def test_merge_multiindex_level(self):
data = create_test_multiindex()
other = Dataset({"z": ("level_1", [0, 1])}) # conflict dimension
with pytest.raises(ValueError):
data.merge(other)
other = Dataset({"level_1": ("x", [0, 1])}) # conflict variable name
with pytest.raises(ValueError):
data.merge(other)
def test_setitem_original_non_unique_index(self):
# regression test for GH943
original = Dataset({"data": ("x", np.arange(5))}, coords={"x": [0, 1, 2, 0, 1]})
expected = Dataset({"data": ("x", np.arange(5))}, {"x": range(5)})
actual = original.copy()
actual["x"] = list(range(5))
assert_identical(actual, expected)
actual = original.copy()
actual["x"] = ("x", list(range(5)))
assert_identical(actual, expected)
actual = original.copy()
actual.coords["x"] = list(range(5))
assert_identical(actual, expected)
def test_setitem_both_non_unique_index(self):
# regression test for GH956
names = ["joaquin", "manolo", "joaquin"]
values = np.random.randint(0, 256, (3, 4, 4))
array = DataArray(
values, dims=["name", "row", "column"], coords=[names, range(4), range(4)]
)
expected = Dataset({"first": array, "second": array})
actual = array.rename("first").to_dataset()
actual["second"] = array
assert_identical(expected, actual)
def test_setitem_multiindex_level(self):
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data["level_1"] = range(4)
def test_delitem(self):
data = create_test_data()
all_items = set(data.variables)
assert set(data.variables) == all_items
del data["var1"]
assert set(data.variables) == all_items - {"var1"}
del data["numbers"]
assert set(data.variables) == all_items - {"var1", "numbers"}
assert "numbers" not in data.coords
expected = Dataset()
actual = Dataset({"y": ("x", [1, 2])})
del actual["y"]
assert_identical(expected, actual)
def test_squeeze(self):
data = Dataset({"foo": (["x", "y", "z"], [[[1], [2]]])})
for args in [[], [["x"]], [["x", "z"]]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(
{k: v.squeeze(*get_args(v)) for k, v in data.variables.items()}
)
expected = expected.set_coords(data.coords)
assert_identical(expected, data.squeeze(*args))
# invalid squeeze
with pytest.raises(ValueError, match=r"cannot select a dimension"):
data.squeeze("y")
def test_squeeze_drop(self):
data = Dataset({"foo": ("x", [1])}, {"x": [0]})
expected = Dataset({"foo": 1})
selected = data.squeeze(drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.squeeze(drop=False)
assert_identical(expected, selected)
data = Dataset({"foo": (("x", "y"), [[1]])}, {"x": [0], "y": [0]})
expected = Dataset({"foo": 1})
selected = data.squeeze(drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": ("x", [1])}, {"x": [0]})
selected = data.squeeze(dim="y", drop=True)
assert_identical(expected, selected)
data = Dataset({"foo": (("x",), [])}, {"x": []})
selected = data.squeeze(drop=True)
assert_identical(data, selected)
def test_groupby(self):
data = Dataset(
{"z": (["x", "y"], np.random.randn(3, 5))},
{"x": ("x", list("abc")), "c": ("x", [0, 1, 0]), "y": range(5)},
)
groupby = data.groupby("x")
assert len(groupby) == 3
expected_groups = {"a": 0, "b": 1, "c": 2}
assert groupby.groups == expected_groups
expected_items = [
("a", data.isel(x=0)),
("b", data.isel(x=1)),
("c", data.isel(x=2)),
]
for actual, expected in zip(groupby, expected_items):
assert actual[0] == expected[0]
assert_equal(actual[1], expected[1])
def identity(x):
return x
for k in ["x", "c", "y"]:
actual = data.groupby(k, squeeze=False).map(identity)
assert_equal(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({"z": (["x", "y"], np.random.randn(3, 5))})
actual = data.groupby("x").map(lambda ds: ds["z"])
expected = data["z"]
assert_identical(expected, actual)
actual = data["z"].groupby("x").map(lambda x: x.to_dataset())
expected = data
assert_identical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby("dim1"))[:3]):
assert data["dim1"][n] == t
assert_equal(data["var1"][n], sub["var1"])
assert_equal(data["var2"][n], sub["var2"])
assert_equal(data["var3"][:, n], sub["var3"])
def test_groupby_errors(self):
data = create_test_data()
with pytest.raises(TypeError, match=r"`group` must be"):
data.groupby(np.arange(10))
with pytest.raises(ValueError, match=r"length does not match"):
data.groupby(data["dim1"][:3])
with pytest.raises(TypeError, match=r"`group` must be"):
data.groupby(data.coords["dim1"].to_index())
def test_groupby_reduce(self):
data = Dataset(
{
"xy": (["x", "y"], np.random.randn(3, 4)),
"xonly": ("x", np.random.randn(3)),
"yonly": ("y", np.random.randn(4)),
"letters": ("y", ["a", "a", "b", "b"]),
}
)
expected = data.mean("y")
expected["yonly"] = expected["yonly"].variable.set_dims({"x": 3})
actual = data.groupby("x").mean(...)
assert_allclose(expected, actual)
actual = data.groupby("x").mean("y")
assert_allclose(expected, actual)
letters = data["letters"]
expected = Dataset(
{
"xy": data["xy"].groupby(letters).mean(...),
"xonly": (data["xonly"].mean().variable.set_dims({"letters": 2})),
"yonly": data["yonly"].groupby(letters).mean(),
}
)
actual = data.groupby("letters").mean(...)
assert_allclose(expected, actual)
def test_groupby_math(self):
def reorder_dims(x):
return x.transpose("dim1", "dim2", "dim3", "time")
ds = create_test_data()
ds["dim1"] = ds["dim1"]
for squeeze in [True, False]:
grouped = ds.groupby("dim1", squeeze=squeeze)
expected = reorder_dims(ds + ds.coords["dim1"])
actual = grouped + ds.coords["dim1"]
assert_identical(expected, reorder_dims(actual))
actual = ds.coords["dim1"] + grouped
assert_identical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
assert_identical(expected, reorder_dims(actual))
actual = ds2 + grouped
assert_identical(expected, reorder_dims(actual))
grouped = ds.groupby("numbers")
zeros = DataArray([0, 0, 0, 0], [("numbers", range(4))])
expected = (ds + Variable("dim3", np.zeros(10))).transpose(
"dim3", "dim1", "dim2", "time"
)
actual = grouped + zeros
assert_equal(expected, actual)
actual = zeros + grouped
assert_equal(expected, actual)
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
grouped + ds
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
ds + grouped
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + 1
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + grouped
with pytest.raises(TypeError, match=r"in-place operations"):
ds += grouped
ds = Dataset(
{
"x": ("time", np.arange(100)),
"time": pd.date_range("2000-01-01", periods=100),
}
)
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
ds + ds.groupby("time.month")
def test_groupby_math_virtual(self):
ds = Dataset(
{"x": ("t", [1, 2, 3])}, {"t": pd.date_range("20100101", periods=3)}
)
grouped = ds.groupby("t.day")
actual = grouped - grouped.mean(...)
expected = Dataset({"x": ("t", [0, 0, 0])}, ds[["t", "t.day"]])
assert_identical(actual, expected)
def test_groupby_nan(self):
# nan should be excluded from groupby
ds = Dataset({"foo": ("x", [1, 2, 3, 4])}, {"bar": ("x", [1, 1, 2, np.nan])})
actual = ds.groupby("bar").mean(...)
expected = Dataset({"foo": ("bar", [1.5, 3]), "bar": [1, 2]})
assert_identical(actual, expected)
def test_groupby_order(self):
# groupby should preserve variables order
ds = Dataset()
for vn in ["a", "b", "c"]:
ds[vn] = DataArray(np.arange(10), dims=["t"])
data_vars_ref = list(ds.data_vars.keys())
ds = ds.groupby("t").mean(...)
data_vars = list(ds.data_vars.keys())
assert data_vars == data_vars_ref
# coords are now at the end of the list, so the test below fails
# all_vars = list(ds.variables.keys())
# all_vars_ref = list(ds.variables.keys())
# self.assertEqual(all_vars, all_vars_ref)
def test_resample_and_first(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
actual = ds.resample(time="1D").first(keep_attrs=True)
expected = ds.isel(time=[0, 4, 8])
assert_identical(expected, actual)
# upsampling
expected_time = pd.date_range("2000-01-01", freq="3H", periods=19)
expected = ds.reindex(time=expected_time)
actual = ds.resample(time="3H")
for how in ["mean", "sum", "first", "last"]:
method = getattr(actual, how)
result = method()
assert_equal(expected, result)
for method in [np.mean]:
result = actual.reduce(method)
assert_equal(expected, result)
def test_resample_min_count(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
# inject nan
ds["foo"] = xr.where(ds["foo"] > 2.0, np.nan, ds["foo"])
actual = ds.resample(time="1D").sum(min_count=1)
expected = xr.concat(
[
ds.isel(time=slice(i * 4, (i + 1) * 4)).sum("time", min_count=1)
for i in range(3)
],
dim=actual["time"],
)
assert_equal(expected, actual)
def test_resample_by_mean_with_keep_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").mean(keep_attrs=True)
actual = resampled_ds["bar"].attrs
expected = ds["bar"].attrs
assert expected == actual
actual = resampled_ds.attrs
expected = ds.attrs
assert expected == actual
with pytest.warns(
UserWarning, match="Passing ``keep_attrs`` to ``resample`` has no effect."
):
ds.resample(time="1D", keep_attrs=True)
def test_resample_loffset(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
# Our use of `loffset` may change if we align our API with pandas' changes.
# ref https://github.com/pydata/xarray/pull/4537
actual = ds.resample(time="24H", loffset="-12H").mean().bar
expected_ = ds.bar.to_series().resample("24H").mean()
expected_.index += to_offset("-12H")
expected = DataArray.from_series(expected_)
assert_identical(actual, expected)
def test_resample_by_mean_discarding_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").mean(keep_attrs=False)
assert resampled_ds["bar"].attrs == {}
assert resampled_ds.attrs == {}
def test_resample_by_last_discarding_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").last(keep_attrs=False)
assert resampled_ds["bar"].attrs == {}
assert resampled_ds.attrs == {}
@requires_scipy
def test_resample_drop_nondim_coords(self):
xs = np.arange(6)
ys = np.arange(3)
times = pd.date_range("2000-01-01", freq="6H", periods=5)
data = np.tile(np.arange(5), (6, 3, 1))
xx, yy = np.meshgrid(xs * 5, ys * 2.5)
tt = np.arange(len(times), dtype=int)
array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time"))
xcoord = DataArray(xx.T, {"x": xs, "y": ys}, ("x", "y"))
ycoord = DataArray(yy.T, {"x": xs, "y": ys}, ("x", "y"))
tcoord = DataArray(tt, {"time": times}, ("time",))
ds = Dataset({"data": array, "xc": xcoord, "yc": ycoord, "tc": tcoord})
ds = ds.set_coords(["xc", "yc", "tc"])
# Re-sample
actual = ds.resample(time="12H").mean("time")
assert "tc" not in actual.coords
# Up-sample - filling
actual = ds.resample(time="1H").ffill()
assert "tc" not in actual.coords
# Up-sample - interpolation
actual = ds.resample(time="1H").interpolate("linear")
assert "tc" not in actual.coords
def test_resample_old_api(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", "time")
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", dim="time", how="mean")
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", dim="time")
def test_resample_ds_da_are_the_same(self):
time = pd.date_range("2000-01-01", freq="6H", periods=365 * 4)
ds = xr.Dataset(
{
"foo": (("time", "x"), np.random.randn(365 * 4, 5)),
"time": time,
"x": np.arange(5),
}
)
assert_identical(
ds.resample(time="M").mean()["foo"], ds.foo.resample(time="M").mean()
)
def test_ds_resample_apply_func_args(self):
def func(arg1, arg2, arg3=0.0):
return arg1.mean("time") + arg2 + arg3
times = pd.date_range("2000", freq="D", periods=3)
ds = xr.Dataset({"foo": ("time", [1.0, 1.0, 1.0]), "time": times})
expected = xr.Dataset({"foo": ("time", [3.0, 3.0, 3.0]), "time": times})
actual = ds.resample(time="D").map(func, args=(1.0,), arg3=1.0)
assert_identical(expected, actual)
def test_to_array(self):
ds = Dataset(
{"a": 1, "b": ("x", [1, 2, 3])},
coords={"c": 42},
attrs={"Conventions": "None"},
)
data = [[1, 1, 1], [1, 2, 3]]
coords = {"c": 42, "variable": ["a", "b"]}
dims = ("variable", "x")
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_array()
assert_identical(expected, actual)
actual = ds.to_array("abc", name="foo")
expected = expected.rename({"variable": "abc"}).rename("foo")
assert_identical(expected, actual)
def test_to_and_from_dataframe(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = pd.DataFrame(
np.array([x, y]).T, columns=["a", "b"], index=pd.Index(t, name="t")
)
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords("b").to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
assert_identical(ds, Dataset.from_dataframe(actual))
# test a case with a MultiIndex
w = np.random.randn(2, 3)
ds = Dataset({"w": (("x", "y"), w)})
ds["y"] = ("y", list("abc"))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"]
)
expected = pd.DataFrame(w.reshape(-1), columns=["w"], index=exp_index)
actual = ds.to_dataframe()
assert expected.equals(actual)
# check roundtrip
assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual))
# Check multiindex reordering
new_order = ["x", "y"]
actual = ds.to_dataframe(dim_order=new_order)
assert expected.equals(actual)
new_order = ["y", "x"]
exp_index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b", "c", "c"], [0, 1, 0, 1, 0, 1]], names=["y", "x"]
)
expected = pd.DataFrame(
w.transpose().reshape(-1), columns=["w"], index=exp_index
)
actual = ds.to_dataframe(dim_order=new_order)
assert expected.equals(actual)
invalid_order = ["x"]
with pytest.raises(
ValueError, match="does not match the set of dimensions of this"
):
ds.to_dataframe(dim_order=invalid_order)
invalid_order = ["x", "z"]
with pytest.raises(
ValueError, match="does not match the set of dimensions of this"
):
ds.to_dataframe(dim_order=invalid_order)
# check pathological cases
df = pd.DataFrame([1])
actual = Dataset.from_dataframe(df)
expected = Dataset({0: ("index", [1])}, {"index": [0]})
assert_identical(expected, actual)
df = pd.DataFrame()
actual = Dataset.from_dataframe(df)
expected = Dataset(coords={"index": []})
assert_identical(expected, actual)
# GH697
df = pd.DataFrame({"A": []})
actual = Dataset.from_dataframe(df)
expected = Dataset({"A": DataArray([], dims=("index",))}, {"index": []})
assert_identical(expected, actual)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({"x": pd.Index(["bar"]), "a": ("y", np.array([1], "int64"))}).isel(
x=0
)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ["a", "x"]]
expected = pd.DataFrame(
[[1, "bar"]], index=pd.Index([0], name="y"), columns=["a", "x"]
)
assert expected.equals(actual), (expected, actual)
ds = Dataset({"x": np.array([0], "int64"), "y": np.array([1], "int64")})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=["x", "y"])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
def test_from_dataframe_categorical(self):
cat = pd.CategoricalDtype(
categories=["foo", "bar", "baz", "qux", "quux", "corge"]
)
i1 = pd.Series(["foo", "bar", "foo"], dtype=cat)
i2 = pd.Series(["bar", "bar", "baz"], dtype=cat)
df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2, 3]})
ds = df.set_index("i1").to_xarray()
assert len(ds["i1"]) == 3
ds = df.set_index(["i1", "i2"]).to_xarray()
assert len(ds["i1"]) == 2
assert len(ds["i2"]) == 2
@requires_sparse
def test_from_dataframe_sparse(self):
import sparse
df_base = pd.DataFrame(
{"x": range(10), "y": list("abcdefghij"), "z": np.arange(0, 100, 10)}
)
ds_sparse = Dataset.from_dataframe(df_base.set_index("x"), sparse=True)
ds_dense = Dataset.from_dataframe(df_base.set_index("x"), sparse=False)
assert isinstance(ds_sparse["y"].data, sparse.COO)
assert isinstance(ds_sparse["z"].data, sparse.COO)
ds_sparse["y"].data = ds_sparse["y"].data.todense()
ds_sparse["z"].data = ds_sparse["z"].data.todense()
assert_identical(ds_dense, ds_sparse)
ds_sparse = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=True)
ds_dense = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=False)
assert isinstance(ds_sparse["z"].data, sparse.COO)
ds_sparse["z"].data = ds_sparse["z"].data.todense()
assert_identical(ds_dense, ds_sparse)
def test_to_and_from_empty_dataframe(self):
# GH697
expected = pd.DataFrame({"foo": []})
ds = Dataset.from_dataframe(expected)
assert len(ds["foo"]) == 0
actual = ds.to_dataframe()
assert len(actual) == 0
assert expected.equals(actual)
def test_from_dataframe_multiindex(self):
index = pd.MultiIndex.from_product([["a", "b"], [1, 2, 3]], names=["x", "y"])
df = pd.DataFrame({"z": np.arange(6)}, index=index)
expected = Dataset(
{"z": (("x", "y"), [[0, 1, 2], [3, 4, 5]])},
coords={"x": ["a", "b"], "y": [1, 2, 3]},
)
actual = Dataset.from_dataframe(df)
assert_identical(actual, expected)
df2 = df.iloc[[3, 2, 1, 0, 4, 5], :]
actual = Dataset.from_dataframe(df2)
assert_identical(actual, expected)
df3 = df.iloc[:4, :]
expected3 = Dataset(
{"z": (("x", "y"), [[0, 1, 2], [3, np.nan, np.nan]])},
coords={"x": ["a", "b"], "y": [1, 2, 3]},
)
actual = Dataset.from_dataframe(df3)
assert_identical(actual, expected3)
df_nonunique = df.iloc[[0, 0], :]
with pytest.raises(ValueError, match=r"non-unique MultiIndex"):
Dataset.from_dataframe(df_nonunique)
def test_from_dataframe_unsorted_levels(self):
# regression test for GH-4186
index = pd.MultiIndex(
levels=[["b", "a"], ["foo"]], codes=[[0, 1], [0, 0]], names=["lev1", "lev2"]
)
df = pd.DataFrame({"c1": [0, 2], "c2": [1, 3]}, index=index)
expected = Dataset(
{
"c1": (("lev1", "lev2"), [[0], [2]]),
"c2": (("lev1", "lev2"), [[1], [3]]),
},
coords={"lev1": ["b", "a"], "lev2": ["foo"]},
)
actual = Dataset.from_dataframe(df)
assert_identical(actual, expected)
def test_from_dataframe_non_unique_columns(self):
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ["foo", "foo"]
with pytest.raises(ValueError, match=r"non-unique columns"):
Dataset.from_dataframe(df)
def test_convert_dataframe_with_many_types_and_multiindex(self):
# regression test for GH737
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
}
)
df.index = pd.MultiIndex.from_product([["a"], range(3)], names=["one", "two"])
roundtripped = Dataset.from_dataframe(df).to_dataframe()
# we can't do perfectly, but we should be at least as faithful as
# np.asarray
expected = df.apply(np.asarray)
assert roundtripped.equals(expected)
def test_to_and_from_dict(self):
# <xarray.Dataset>
# Dimensions: (t: 10)
# Coordinates:
# * t (t) <U1 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
# Data variables:
# a (t) float64 0.6916 -1.056 -1.163 0.9792 -0.7865 ...
# b (t) float64 1.32 0.1954 1.91 1.39 0.519 -0.2772 ...
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = {
"coords": {"t": {"dims": ("t",), "data": t, "attrs": {}}},
"attrs": {},
"dims": {"t": 10},
"data_vars": {
"a": {"dims": ("t",), "data": x.tolist(), "attrs": {}},
"b": {"dims": ("t",), "data": y.tolist(), "attrs": {}},
},
}
actual = ds.to_dict()
# check that they are identical
assert expected == actual
# check roundtrip
assert_identical(ds, Dataset.from_dict(actual))
# check the data=False option
expected_no_data = expected.copy()
del expected_no_data["coords"]["t"]["data"]
del expected_no_data["data_vars"]["a"]["data"]
del expected_no_data["data_vars"]["b"]["data"]
endiantype = "<U1" if sys.byteorder == "little" else ">U1"
expected_no_data["coords"]["t"].update({"dtype": endiantype, "shape": (10,)})
expected_no_data["data_vars"]["a"].update({"dtype": "float64", "shape": (10,)})
expected_no_data["data_vars"]["b"].update({"dtype": "float64", "shape": (10,)})
actual_no_data = ds.to_dict(data=False)
assert expected_no_data == actual_no_data
# verify coords are included roundtrip
expected_ds = ds.set_coords("b")
actual = Dataset.from_dict(expected_ds.to_dict())
assert_identical(expected_ds, actual)
# test some incomplete dicts:
# this one has no attrs field, the dims are strings, and x, y are
# np.arrays
d = {
"coords": {"t": {"dims": "t", "data": t}},
"dims": "t",
"data_vars": {"a": {"dims": "t", "data": x}, "b": {"dims": "t", "data": y}},
}
assert_identical(ds, Dataset.from_dict(d))
# this is kind of a flattened version with no coords, or data_vars
d = {
"a": {"dims": "t", "data": x},
"t": {"data": t, "dims": "t"},
"b": {"dims": "t", "data": y},
}
assert_identical(ds, Dataset.from_dict(d))
# this one is missing some necessary information
d = {
"a": {"data": x},
"t": {"data": t, "dims": "t"},
"b": {"dims": "t", "data": y},
}
with pytest.raises(
ValueError, match=r"cannot convert dict without the key 'dims'"
):
Dataset.from_dict(d)
def test_to_and_from_dict_with_time_dim(self):
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
t = pd.date_range("20130101", periods=10)
lat = [77.7, 83.2, 76]
ds = Dataset(
{
"a": (["t", "lat"], x),
"b": (["t", "lat"], y),
"t": ("t", t),
"lat": ("lat", lat),
}
)
roundtripped = Dataset.from_dict(ds.to_dict())
assert_identical(ds, roundtripped)
def test_to_and_from_dict_with_nan_nat(self):
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
y[2] = np.nan
t = pd.Series(pd.date_range("20130101", periods=10))
t[2] = np.nan
lat = [77.7, 83.2, 76]
ds = Dataset(
{
"a": (["t", "lat"], x),
"b": (["t", "lat"], y),
"t": ("t", t),
"lat": ("lat", lat),
}
)
roundtripped = Dataset.from_dict(ds.to_dict())
assert_identical(ds, roundtripped)
def test_to_dict_with_numpy_attrs(self):
# this doesn't need to roundtrip
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
attrs = {
"created": np.float64(1998),
"coords": np.array([37, -110.1, 100]),
"maintainer": "bar",
}
ds = Dataset({"a": ("t", x, attrs), "b": ("t", y, attrs), "t": ("t", t)})
expected_attrs = {
"created": attrs["created"].item(),
"coords": attrs["coords"].tolist(),
"maintainer": "bar",
}
actual = ds.to_dict()
# check that they are identical
assert expected_attrs == actual["data_vars"]["a"]["attrs"]
def test_pickle(self):
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
assert_identical(data, roundtripped)
# regression test for #167:
assert data.dims == roundtripped.dims
def test_lazy_load(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with pytest.raises(UnexpectedDataAccess):
ds.load()
with pytest.raises(UnexpectedDataAccess):
ds["var1"].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({"foo": (("a", "b"), x), "bar": (("b", y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna("a")
assert_identical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna("b")
assert_identical(actual, expected)
actual = ds.dropna("b", subset=["foo", "bar"])
assert_identical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna("b", subset=["foo"])
assert_identical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna("b", subset=["bar"])
assert_identical(actual, expected)
actual = ds.dropna("a", subset=[])
assert_identical(actual, ds)
actual = ds.dropna("a", subset=["bar"])
assert_identical(actual, ds)
actual = ds.dropna("a", how="all")
assert_identical(actual, ds)
actual = ds.dropna("b", how="all", subset=["bar"])
expected = ds.isel(b=[0, 1, 2])
assert_identical(actual, expected)
actual = ds.dropna("b", thresh=1, subset=["bar"])
assert_identical(actual, expected)
actual = ds.dropna("b", thresh=2)
assert_identical(actual, ds)
actual = ds.dropna("b", thresh=4)
expected = ds.isel(b=[1, 2, 3])
assert_identical(actual, expected)
actual = ds.dropna("a", thresh=3)
expected = ds.isel(a=[1, 3])
assert_identical(actual, ds)
with pytest.raises(ValueError, match=r"a single dataset dimension"):
ds.dropna("foo")
with pytest.raises(ValueError, match=r"invalid how"):
ds.dropna("a", how="somehow")
with pytest.raises(TypeError, match=r"must specify how or thresh"):
ds.dropna("a", how=None)
def test_fillna(self):
ds = Dataset({"a": ("x", [np.nan, 1, np.nan, 3])}, {"x": [0, 1, 2, 3]})
# fill with -1
actual = ds.fillna(-1)
expected = Dataset({"a": ("x", [-1, 1, -1, 3])}, {"x": [0, 1, 2, 3]})
assert_identical(expected, actual)
actual = ds.fillna({"a": -1})
assert_identical(expected, actual)
other = Dataset({"a": -1})
actual = ds.fillna(other)
assert_identical(expected, actual)
actual = ds.fillna({"a": other.a})
assert_identical(expected, actual)
# fill with range(4)
b = DataArray(range(4), coords=[("x", range(4))])
actual = ds.fillna(b)
expected = b.rename("a").to_dataset()
assert_identical(expected, actual)
actual = ds.fillna(expected)
assert_identical(expected, actual)
actual = ds.fillna(range(4))
assert_identical(expected, actual)
actual = ds.fillna(b[:3])
assert_identical(expected, actual)
# okay to only include some data variables
ds["b"] = np.nan
actual = ds.fillna({"a": -1})
expected = Dataset(
{"a": ("x", [-1, 1, -1, 3]), "b": np.nan}, {"x": [0, 1, 2, 3]}
)
assert_identical(expected, actual)
# but new data variables is not okay
with pytest.raises(ValueError, match=r"must be contained"):
ds.fillna({"x": 0})
# empty argument should be OK
result = ds.fillna({})
assert_identical(ds, result)
result = ds.fillna(Dataset(coords={"c": 42}))
expected = ds.assign_coords(c=42)
assert_identical(expected, result)
# groupby
expected = Dataset({"a": ("x", range(4))}, {"x": [0, 1, 2, 3]})
for target in [ds, expected]:
target.coords["b"] = ("x", [0, 0, 1, 1])
actual = ds.groupby("b").fillna(DataArray([0, 2], dims="b"))
assert_identical(expected, actual)
actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])}))
assert_identical(expected, actual)
# attrs with groupby
ds.attrs["attr"] = "ds"
ds.a.attrs["attr"] = "da"
actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])}))
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
da = DataArray(range(5), name="a", attrs={"attr": "da"})
actual = da.fillna(1)
assert actual.name == "a"
assert actual.attrs == da.attrs
ds = Dataset({"a": da}, attrs={"attr": "ds"})
actual = ds.fillna({"a": 1})
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
@pytest.mark.parametrize(
"func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs]
)
def test_propagate_attrs(self, func):
da = DataArray(range(5), name="a", attrs={"attr": "da"})
ds = Dataset({"a": da}, attrs={"attr": "ds"})
# test defaults
assert func(ds).attrs == ds.attrs
with set_options(keep_attrs=False):
assert func(ds).attrs != ds.attrs
assert func(ds).a.attrs != ds.a.attrs
with set_options(keep_attrs=False):
assert func(ds).attrs != ds.attrs
assert func(ds).a.attrs != ds.a.attrs
with set_options(keep_attrs=True):
assert func(ds).attrs == ds.attrs
assert func(ds).a.attrs == ds.a.attrs
def test_where(self):
ds = Dataset({"a": ("x", range(5))})
expected = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(ds > 1)
assert_identical(expected, actual)
actual = ds.where(ds.a > 1)
assert_identical(expected, actual)
actual = ds.where(ds.a.values > 1)
assert_identical(expected, actual)
actual = ds.where(True)
assert_identical(ds, actual)
expected = ds.copy(deep=True)
expected["a"].values = [np.nan] * 5
actual = ds.where(False)
assert_identical(expected, actual)
# 2d
ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])})
expected = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0)
assert_identical(expected, actual)
# groupby
ds = Dataset({"a": ("x", range(5))}, {"c": ("x", [0, 0, 1, 1, 1])})
cond = Dataset({"a": ("c", [True, False])})
expected = ds.copy(deep=True)
expected["a"].values = [0, 1] + [np.nan] * 3
actual = ds.groupby("c").where(cond)
assert_identical(expected, actual)
# attrs with groupby
ds.attrs["attr"] = "ds"
ds.a.attrs["attr"] = "da"
actual = ds.groupby("c").where(cond)
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
# attrs
da = DataArray(range(5), name="a", attrs={"attr": "da"})
actual = da.where(da.values > 1)
assert actual.name == "a"
assert actual.attrs == da.attrs
ds = Dataset({"a": da}, attrs={"attr": "ds"})
actual = ds.where(ds > 0)
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
# lambda
ds = Dataset({"a": ("x", range(5))})
expected = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(lambda x: x > 1)
assert_identical(expected, actual)
def test_where_other(self):
ds = Dataset({"a": ("x", range(5))}, {"x": range(5)})
expected = Dataset({"a": ("x", [-1, -1, 2, 3, 4])}, {"x": range(5)})
actual = ds.where(ds > 1, -1)
assert_equal(expected, actual)
assert actual.a.dtype == int
actual = ds.where(lambda x: x > 1, -1)
assert_equal(expected, actual)
with pytest.raises(ValueError, match=r"cannot set"):
ds.where(ds > 1, other=0, drop=True)
with pytest.raises(ValueError, match=r"indexes .* are not equal"):
ds.where(ds > 1, ds.isel(x=slice(3)))
with pytest.raises(ValueError, match=r"exact match required"):
ds.where(ds > 1, ds.assign(b=2))
def test_where_drop(self):
# if drop=True
# 1d
# data array case
array = DataArray(range(5), coords=[range(5)], dims=["x"])
expected = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=["x"])
actual = array.where(array > 1, drop=True)
assert_identical(expected, actual)
# dataset case
ds = Dataset({"a": array})
expected = Dataset({"a": expected})
actual = ds.where(ds > 1, drop=True)
assert_identical(expected, actual)
actual = ds.where(ds.a > 1, drop=True)
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"must be a"):
ds.where(np.arange(5) > 1, drop=True)
# 1d with odd coordinates
array = DataArray(
np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=["x"]
)
expected = DataArray(
np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=["x"]
)
actual = array.where(array > 2, drop=True)
assert_identical(expected, actual)
# 1d multiple variables
ds = Dataset({"a": (("x"), [0, 1, 2, 3]), "b": (("x"), [4, 5, 6, 7])})
expected = Dataset(
{"a": (("x"), [np.nan, 1, 2, 3]), "b": (("x"), [4, 5, 6, np.nan])}
)
actual = ds.where((ds > 0) & (ds < 7), drop=True)
assert_identical(expected, actual)
# 2d
ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])})
expected = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0, drop=True)
assert_identical(expected, actual)
# 2d with odd coordinates
ds = Dataset(
{"a": (("x", "y"), [[0, 1], [2, 3]])},
coords={
"x": [4, 3],
"y": [1, 2],
"z": (["x", "y"], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]]),
},
)
expected = Dataset(
{"a": (("x", "y"), [[3]])},
coords={"x": [3], "y": [2], "z": (["x", "y"], [[np.pi * 3]])},
)
actual = ds.where(ds > 2, drop=True)
assert_identical(expected, actual)
# 2d multiple variables
ds = Dataset(
{"a": (("x", "y"), [[0, 1], [2, 3]]), "b": (("x", "y"), [[4, 5], [6, 7]])}
)
expected = Dataset(
{
"a": (("x", "y"), [[np.nan, 1], [2, 3]]),
"b": (("x", "y"), [[4, 5], [6, 7]]),
}
)
actual = ds.where(ds > 0, drop=True)
assert_identical(expected, actual)
def test_where_drop_empty(self):
# regression test for GH1341
array = DataArray(np.random.rand(100, 10), dims=["nCells", "nVertLevels"])
mask = DataArray(np.zeros((100,), dtype="bool"), dims="nCells")
actual = array.where(mask, drop=True)
expected = DataArray(np.zeros((0, 10)), dims=["nCells", "nVertLevels"])
assert_identical(expected, actual)
def test_where_drop_no_indexes(self):
ds = Dataset({"foo": ("x", [0.0, 1.0])})
expected = Dataset({"foo": ("x", [1.0])})
actual = ds.where(ds == 1, drop=True)
assert_identical(expected, actual)
def test_reduce(self):
data = create_test_data()
assert len(data.mean().coords) == 0
actual = data.max()
expected = Dataset({k: v.max() for k, v in data.data_vars.items()})
assert_equal(expected, actual)
assert_equal(data.min(dim=["dim1"]), data.min(dim="dim1"))
for reduct, expected in [
("dim2", ["dim3", "time", "dim1"]),
(["dim2", "time"], ["dim3", "dim1"]),
(("dim2", "time"), ["dim3", "dim1"]),
((), ["dim2", "dim3", "time", "dim1"]),
]:
actual = list(data.min(dim=reduct).dims)
assert actual == expected
assert_equal(data.mean(dim=[]), data)
with pytest.raises(ValueError):
data.mean(axis=0)
def test_reduce_coords(self):
# regression test for GH1470
data = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"b": 4})
expected = xr.Dataset({"a": 2}, coords={"b": 4})
actual = data.mean("x")
assert_identical(actual, expected)
# should be consistent
actual = data["a"].mean("x").to_dataset()
assert_identical(actual, expected)
def test_mean_uint_dtype(self):
data = xr.Dataset(
{
"a": (("x", "y"), np.arange(6).reshape(3, 2).astype("uint")),
"b": (("x",), np.array([0.1, 0.2, np.nan])),
}
)
actual = data.mean("x", skipna=True)
expected = xr.Dataset(
{"a": data["a"].mean("x"), "b": data["b"].mean("x", skipna=True)}
)
assert_identical(actual, expected)
def test_reduce_bad_dim(self):
data = create_test_data()
with pytest.raises(ValueError, match=r"Dataset does not contain"):
data.mean(dim="bad_dim")
def test_reduce_cumsum(self):
data = xr.Dataset(
{"a": 1, "b": ("x", [1, 2]), "c": (("x", "y"), [[np.nan, 3], [0, 4]])}
)
assert_identical(data.fillna(0), data.cumsum("y"))
expected = xr.Dataset(
{"a": 1, "b": ("x", [1, 3]), "c": (("x", "y"), [[0, 3], [0, 7]])}
)
assert_identical(expected, data.cumsum())
@pytest.mark.parametrize(
"reduct, expected",
[
("dim1", ["dim2", "dim3", "time", "dim1"]),
("dim2", ["dim3", "time", "dim1", "dim2"]),
("dim3", ["dim2", "time", "dim1", "dim3"]),
("time", ["dim2", "dim3", "dim1"]),
],
)
@pytest.mark.parametrize("func", ["cumsum", "cumprod"])
def test_reduce_cumsum_test_dims(self, reduct, expected, func):
data = create_test_data()
with pytest.raises(ValueError, match=r"Dataset does not contain"):
getattr(data, func)(dim="bad_dim")
# ensure dimensions are correct
actual = getattr(data, func)(dim=reduct).dims
assert list(actual) == expected
def test_reduce_non_numeric(self):
data1 = create_test_data(seed=44)
data2 = create_test_data(seed=44)
add_vars = {"var4": ["dim1", "dim2"]}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.dims[d] for d in dims)
data = np.random.randint(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {"foo": "variable"})
assert "var4" not in data1.mean()
assert_equal(data1.mean(), data2.mean())
assert_equal(data1.mean(dim="dim1"), data2.mean(dim="dim1"))
@pytest.mark.filterwarnings(
"ignore:Once the behaviour of DataArray:DeprecationWarning"
)
def test_reduce_strings(self):
expected = Dataset({"x": "a"})
ds = Dataset({"x": ("y", ["a", "b"])})
ds.coords["y"] = [-10, 10]
actual = ds.min()
assert_identical(expected, actual)
expected = Dataset({"x": "b"})
actual = ds.max()
assert_identical(expected, actual)
expected = Dataset({"x": 0})
actual = ds.argmin()
assert_identical(expected, actual)
expected = Dataset({"x": 1})
actual = ds.argmax()
assert_identical(expected, actual)
expected = Dataset({"x": -10})
actual = ds.idxmin()
assert_identical(expected, actual)
expected = Dataset({"x": 10})
actual = ds.idxmax()
assert_identical(expected, actual)
expected = Dataset({"x": b"a"})
ds = Dataset({"x": ("y", np.array(["a", "b"], "S1"))})
actual = ds.min()
assert_identical(expected, actual)
expected = Dataset({"x": "a"})
ds = Dataset({"x": ("y", np.array(["a", "b"], "U1"))})
actual = ds.min()
assert_identical(expected, actual)
def test_reduce_dtypes(self):
# regression test for GH342
expected = Dataset({"x": 1})
actual = Dataset({"x": True}).sum()
assert_identical(expected, actual)
# regression test for GH505
expected = Dataset({"x": 3})
actual = Dataset({"x": ("y", np.array([1, 2], "uint16"))}).sum()
assert_identical(expected, actual)
expected = Dataset({"x": 1 + 1j})
actual = Dataset({"x": ("y", [1, 1j])}).sum()
assert_identical(expected, actual)
def test_reduce_keep_attrs(self):
data = create_test_data()
_attrs = {"attr1": "value1", "attr2": 2929}
attrs = dict(_attrs)
data.attrs = attrs
# Test dropped attrs
ds = data.mean()
assert ds.attrs == {}
for v in ds.data_vars.values():
assert v.attrs == {}
# Test kept attrs
ds = data.mean(keep_attrs=True)
assert ds.attrs == attrs
for k, v in ds.data_vars.items():
assert v.attrs == data[k].attrs
@pytest.mark.filterwarnings(
"ignore:Once the behaviour of DataArray:DeprecationWarning"
)
def test_reduce_argmin(self):
# regression test for #205
ds = Dataset({"a": ("x", [0, 1])})
expected = Dataset({"a": ([], 0)})
actual = ds.argmin()
assert_identical(expected, actual)
actual = ds.argmin("x")
assert_identical(expected, actual)
def test_reduce_scalars(self):
ds = Dataset({"x": ("a", [2, 2]), "y": 2, "z": ("b", [2])})
expected = Dataset({"x": 0, "y": 0, "z": 0})
actual = ds.var()
assert_identical(expected, actual)
expected = Dataset({"x": 0, "y": 0, "z": ("b", [0])})
actual = ds.var("a")
assert_identical(expected, actual)
def test_reduce_only_one_axis(self):
def mean_only_one_axis(x, axis):
if not isinstance(axis, integer_types):
raise TypeError("non-integer axis")
return x.mean(axis)
ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])})
expected = Dataset({"a": ("x", [2])})
actual = ds.reduce(mean_only_one_axis, "y")
assert_identical(expected, actual)
with pytest.raises(
TypeError, match=r"missing 1 required positional argument: 'axis'"
):
ds.reduce(mean_only_one_axis)
def test_reduce_no_axis(self):
def total_sum(x):
return np.sum(x.flatten())
ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])})
expected = Dataset({"a": ((), 10)})
actual = ds.reduce(total_sum)
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"unexpected keyword argument 'axis'"):
ds.reduce(total_sum, dim="x")
def test_reduce_keepdims(self):
ds = Dataset(
{"a": (["x", "y"], [[0, 1, 2, 3, 4]])},
coords={
"y": [0, 1, 2, 3, 4],
"x": [0],
"lat": (["x", "y"], [[0, 1, 2, 3, 4]]),
"c": -999.0,
},
)
# Shape should match behaviour of numpy reductions with keepdims=True
# Coordinates involved in the reduction should be removed
actual = ds.mean(keepdims=True)
expected = Dataset(
{"a": (["x", "y"], np.mean(ds.a, keepdims=True).data)}, coords={"c": ds.c}
)
assert_identical(expected, actual)
actual = ds.mean("x", keepdims=True)
expected = Dataset(
{"a": (["x", "y"], np.mean(ds.a, axis=0, keepdims=True).data)},
coords={"y": ds.y, "c": ds.c},
)
assert_identical(expected, actual)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
def test_quantile(self, q, skipna):
ds = create_test_data(seed=123)
for dim in [None, "dim1", ["dim1"]]:
ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)
if is_scalar(q):
assert "quantile" not in ds_quantile.dims
else:
assert "quantile" in ds_quantile.dims
for var, dar in ds.data_vars.items():
assert var in ds_quantile
assert_identical(
ds_quantile[var], dar.quantile(q, dim=dim, skipna=skipna)
)
dim = ["dim1", "dim2"]
ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)
assert "dim3" in ds_quantile.dims
assert all(d not in ds_quantile.dims for d in dim)
@pytest.mark.parametrize("skipna", [True, False])
def test_quantile_skipna(self, skipna):
q = 0.1
dim = "time"
ds = Dataset({"a": ([dim], np.arange(0, 11))})
ds = ds.where(ds >= 1)
result = ds.quantile(q=q, dim=dim, skipna=skipna)
value = 1.9 if skipna else np.nan
expected = Dataset({"a": value}, coords={"quantile": q})
assert_identical(result, expected)
@requires_bottleneck
def test_rank(self):
ds = create_test_data(seed=1234)
# only ds.var3 depends on dim3
z = ds.rank("dim3")
assert ["var3"] == list(z.data_vars)
# same as dataarray version
x = z.var3
y = ds.var3.rank("dim3")
assert_equal(x, y)
# coordinates stick
assert list(z.coords) == list(ds.coords)
assert list(x.coords) == list(y.coords)
# invalid dim
with pytest.raises(ValueError, match=r"does not contain"):
x.rank("invalid_dim")
def test_count(self):
ds = Dataset({"x": ("a", [np.nan, 1]), "y": 0, "z": np.nan})
expected = Dataset({"x": 1, "y": 1, "z": 0})
actual = ds.count()
assert_identical(expected, actual)
def test_map(self):
data = create_test_data()
data.attrs["foo"] = "bar"
assert_identical(data.map(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.map(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
assert_identical(expected, actual)
assert_identical(data.map(lambda x: x, keep_attrs=True), data.drop_vars("time"))
def scale(x, multiple=1):
return multiple * x
actual = data.map(scale, multiple=2)
assert_equal(actual["var1"], 2 * data["var1"])
assert_identical(actual["numbers"], data["numbers"])
actual = data.map(np.asarray)
expected = data.drop_vars("time") # time is not used on a data var
assert_equal(expected, actual)
def test_apply_pending_deprecated_map(self):
data = create_test_data()
data.attrs["foo"] = "bar"
with pytest.warns(PendingDeprecationWarning):
assert_identical(data.apply(np.mean), data.mean())
def make_example_math_dataset(self):
variables = {
"bar": ("x", np.arange(100, 400, 100)),
"foo": (("x", "y"), 1.0 * np.arange(12).reshape(3, 4)),
}
coords = {"abc": ("x", ["a", "b", "c"]), "y": 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds["foo"][0, 0] = np.nan
return ds
def test_dataset_number_math(self):
ds = self.make_example_math_dataset()
assert_identical(ds, +ds)
assert_identical(ds, ds + 0)
assert_identical(ds, 0 + ds)
assert_identical(ds, ds + np.array(0))
assert_identical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
assert_identical(ds, actual)
def test_unary_ops(self):
ds = self.make_example_math_dataset()
assert_identical(ds.map(abs), abs(ds))
assert_identical(ds.map(lambda x: x + 4), ds + 4)
for func in [
lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int),
]:
assert_identical(ds.map(func), func(ds))
assert_identical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with pytest.raises(AttributeError):
ds.item
with pytest.raises(AttributeError):
ds.searchsorted
def test_dataset_array_math(self):
ds = self.make_example_math_dataset()
expected = ds.map(lambda x: x - ds["foo"])
assert_identical(expected, ds - ds["foo"])
assert_identical(expected, -ds["foo"] + ds)
assert_identical(expected, ds - ds["foo"].variable)
assert_identical(expected, -ds["foo"].variable + ds)
actual = ds.copy(deep=True)
actual -= ds["foo"]
assert_identical(expected, actual)
expected = ds.map(lambda x: x + ds["bar"])
assert_identical(expected, ds + ds["bar"])
actual = ds.copy(deep=True)
actual += ds["bar"]
assert_identical(expected, actual)
expected = Dataset({"bar": ds["bar"] + np.arange(3)})
assert_identical(expected, ds[["bar"]] + np.arange(3))
assert_identical(expected, np.arange(3) + ds[["bar"]])
def test_dataset_dataset_math(self):
ds = self.make_example_math_dataset()
assert_identical(ds, ds + 0 * ds)
assert_identical(ds, ds + {"foo": 0, "bar": 0})
expected = ds.map(lambda x: 2 * x)
assert_identical(expected, 2 * ds)
assert_identical(expected, ds + ds)
assert_identical(expected, ds + ds.data_vars)
assert_identical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
assert_identical(expected, actual)
assert expected_id == id(actual)
assert_identical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
assert_identical(expected, subsampled + ds)
assert_identical(expected, ds + subsampled)
def test_dataset_math_auto_align(self):
ds = self.make_example_math_dataset()
subset = ds.isel(y=[1, 3])
expected = 2 * subset
actual = ds + subset
assert_identical(expected, actual)
actual = ds.isel(y=slice(1)) + ds.isel(y=slice(1, None))
expected = 2 * ds.drop_sel(y=ds.y)
assert_equal(actual, expected)
actual = ds + ds[["bar"]]
expected = (2 * ds[["bar"]]).merge(ds.coords)
assert_identical(expected, actual)
assert_identical(ds + Dataset(), ds.coords.to_dataset())
assert_identical(Dataset() + Dataset(), Dataset())
ds2 = Dataset(coords={"bar": 42})
assert_identical(ds + ds2, ds.coords.merge(ds2))
# maybe unary arithmetic with empty datasets should raise instead?
assert_identical(Dataset() + 1, Dataset())
actual = ds.copy(deep=True)
other = ds.isel(y=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
assert_identical(expected, actual)
def test_dataset_math_errors(self):
ds = self.make_example_math_dataset()
with pytest.raises(TypeError):
ds["foo"] += ds
with pytest.raises(TypeError):
ds["foo"].variable += ds
with pytest.raises(ValueError, match=r"must have the same"):
ds += ds[["bar"]]
# verify we can rollback in-place operations if something goes wrong
# nb. inplace datetime64 math actually will work with an integer array
# but not floats thanks to numpy's inconsistent handling
other = DataArray(np.datetime64("2000-01-01"), coords={"c": 2})
actual = ds.copy(deep=True)
with pytest.raises(TypeError):
actual += other
assert_identical(actual, ds)
def test_dataset_transpose(self):
ds = Dataset(
{
"a": (("x", "y"), np.random.randn(3, 4)),
"b": (("y", "x"), np.random.randn(4, 3)),
},
coords={
"x": range(3),
"y": range(4),
"xy": (("x", "y"), np.random.randn(3, 4)),
},
)
actual = ds.transpose()
expected = Dataset(
{"a": (("y", "x"), ds.a.values.T), "b": (("x", "y"), ds.b.values.T)},
coords={
"x": ds.x.values,
"y": ds.y.values,
"xy": (("y", "x"), ds.xy.values.T),
},
)
assert_identical(expected, actual)
actual = ds.transpose(...)
expected = ds
assert_identical(expected, actual)
actual = ds.transpose("x", "y")
expected = ds.map(lambda x: x.transpose("x", "y", transpose_coords=True))
assert_identical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds.variables:
assert actual[k].dims[::-1] == ds[k].dims
new_order = ("dim2", "dim3", "dim1", "time")
actual = ds.transpose(*new_order)
for k in ds.variables:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
assert actual[k].dims == expected_dims
# same as above but with ellipsis
new_order = ("dim2", "dim3", "dim1", "time")
actual = ds.transpose("dim2", "dim3", ...)
for k in ds.variables:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
assert actual[k].dims == expected_dims
with pytest.raises(ValueError, match=r"permuted"):
ds.transpose("dim1", "dim2", "dim3")
with pytest.raises(ValueError, match=r"permuted"):
ds.transpose("dim1", "dim2", "dim3", "time", "extra_dim")
assert "T" not in dir(ds)
def test_dataset_ellipsis_transpose_different_ordered_vars(self):
# https://github.com/pydata/xarray/issues/1081#issuecomment-544350457
ds = Dataset(
dict(
a=(("w", "x", "y", "z"), np.ones((2, 3, 4, 5))),
b=(("x", "w", "y", "z"), np.zeros((3, 2, 4, 5))),
)
)
result = ds.transpose(..., "z", "y")
assert list(result["a"].dims) == list("wxzy")
assert list(result["b"].dims) == list("xwzy")
def test_dataset_retains_period_index_on_transpose(self):
ds = create_test_data()
ds["time"] = pd.period_range("2000-01-01", periods=20)
transposed = ds.transpose()
assert isinstance(transposed.time.to_index(), pd.PeriodIndex)
def test_dataset_diff_n1_simple(self):
ds = Dataset({"foo": ("x", [5, 5, 6, 6])})
actual = ds.diff("x")
expected = Dataset({"foo": ("x", [0, 1, 0])})
assert_equal(expected, actual)
def test_dataset_diff_n1_label(self):
ds = Dataset({"foo": ("x", [5, 5, 6, 6])}, {"x": [0, 1, 2, 3]})
actual = ds.diff("x", label="lower")
expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [0, 1, 2]})
assert_equal(expected, actual)
actual = ds.diff("x", label="upper")
expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [1, 2, 3]})
assert_equal(expected, actual)
def test_dataset_diff_n1(self):
ds = create_test_data(seed=1)
actual = ds.diff("dim2")
expected = {}
expected["var1"] = DataArray(
np.diff(ds["var1"].values, axis=1),
{"dim2": ds["dim2"].values[1:]},
["dim1", "dim2"],
)
expected["var2"] = DataArray(
np.diff(ds["var2"].values, axis=1),
{"dim2": ds["dim2"].values[1:]},
["dim1", "dim2"],
)
expected["var3"] = ds["var3"]
expected = Dataset(expected, coords={"time": ds["time"].values})
expected.coords["numbers"] = ("dim3", ds["numbers"].values)
assert_equal(expected, actual)
def test_dataset_diff_n2(self):
ds = create_test_data(seed=1)
actual = ds.diff("dim2", n=2)
expected = {}
expected["var1"] = DataArray(
np.diff(ds["var1"].values, axis=1, n=2),
{"dim2": ds["dim2"].values[2:]},
["dim1", "dim2"],
)
expected["var2"] = DataArray(
np.diff(ds["var2"].values, axis=1, n=2),
{"dim2": ds["dim2"].values[2:]},
["dim1", "dim2"],
)
expected["var3"] = ds["var3"]
expected = Dataset(expected, coords={"time": ds["time"].values})
expected.coords["numbers"] = ("dim3", ds["numbers"].values)
assert_equal(expected, actual)
def test_dataset_diff_exception_n_neg(self):
ds = create_test_data(seed=1)
with pytest.raises(ValueError, match=r"must be non-negative"):
ds.diff("dim2", n=-1)
def test_dataset_diff_exception_label_str(self):
ds = create_test_data(seed=1)
with pytest.raises(ValueError, match=r"'label' argument has to"):
ds.diff("dim2", label="raise_me")
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": -10}])
def test_shift(self, fill_value):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.shift(x=1, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
elif isinstance(fill_value, dict):
fill_value = fill_value.get("foo", np.nan)
expected = Dataset({"foo": ("x", [fill_value, 1, 2])}, coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.shift(foo=123)
def test_roll_coords(self):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.roll(x=1, roll_coords=True)
ex_coords = {"bar": ("x", list("cab")), "x": [2, -4, 3]}
expected = Dataset({"foo": ("x", [3, 1, 2])}, ex_coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.roll(foo=123, roll_coords=True)
def test_roll_no_coords(self):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.roll(x=1, roll_coords=False)
expected = Dataset({"foo": ("x", [3, 1, 2])}, coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.roll(abc=321, roll_coords=False)
def test_roll_coords_none(self):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
with pytest.warns(FutureWarning):
actual = ds.roll(x=1, roll_coords=None)
ex_coords = {"bar": ("x", list("cab")), "x": [2, -4, 3]}
expected = Dataset({"foo": ("x", [3, 1, 2])}, ex_coords, attrs)
assert_identical(expected, actual)
def test_roll_multidim(self):
# regression test for 2445
arr = xr.DataArray(
[[1, 2, 3], [4, 5, 6]],
coords={"x": range(3), "y": range(2)},
dims=("y", "x"),
)
actual = arr.roll(x=1, roll_coords=True)
expected = xr.DataArray(
[[3, 1, 2], [6, 4, 5]], coords=[("y", [0, 1]), ("x", [2, 0, 1])]
)
assert_identical(expected, actual)
def test_real_and_imag(self):
attrs = {"foo": "bar"}
ds = Dataset({"x": ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({"x": ((), 1, attrs)}, attrs=attrs)
assert_identical(ds.real, expected_re)
expected_im = Dataset({"x": ((), 2, attrs)}, attrs=attrs)
assert_identical(ds.imag, expected_im)
def test_setattr_raises(self):
ds = Dataset({}, coords={"scalar": 1}, attrs={"foo": "bar"})
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.scalar = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.foo = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.other = 2
def test_filter_by_attrs(self):
precip = dict(standard_name="convective_precipitation_flux")
temp0 = dict(standard_name="air_potential_temperature", height="0 m")
temp10 = dict(standard_name="air_potential_temperature", height="10 m")
ds = Dataset(
{
"temperature_0": (["t"], [0], temp0),
"temperature_10": (["t"], [0], temp10),
"precipitation": (["t"], [0], precip),
},
coords={"time": (["t"], [0], dict(axis="T", long_name="time_in_seconds"))},
)
# Test return empty Dataset.
ds.filter_by_attrs(standard_name="invalid_standard_name")
new_ds = ds.filter_by_attrs(standard_name="invalid_standard_name")
assert not bool(new_ds.data_vars)
# Test return one DataArray.
new_ds = ds.filter_by_attrs(standard_name="convective_precipitation_flux")
assert new_ds["precipitation"].standard_name == "convective_precipitation_flux"
assert_equal(new_ds["precipitation"], ds["precipitation"])
# Test filter coordinates
new_ds = ds.filter_by_attrs(long_name="time_in_seconds")
assert new_ds["time"].long_name == "time_in_seconds"
assert not bool(new_ds.data_vars)
# Test return more than one DataArray.
new_ds = ds.filter_by_attrs(standard_name="air_potential_temperature")
assert len(new_ds.data_vars) == 2
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
# Test callable.
new_ds = ds.filter_by_attrs(height=lambda v: v is not None)
assert len(new_ds.data_vars) == 2
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
new_ds = ds.filter_by_attrs(height="10 m")
assert len(new_ds.data_vars) == 1
for var in new_ds.data_vars:
assert new_ds[var].height == "10 m"
# Test return empty Dataset due to conflicting filters
new_ds = ds.filter_by_attrs(
standard_name="convective_precipitation_flux", height="0 m"
)
assert not bool(new_ds.data_vars)
# Test return one DataArray with two filter conditions
new_ds = ds.filter_by_attrs(
standard_name="air_potential_temperature", height="0 m"
)
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
assert new_ds[var].height == "0 m"
assert new_ds[var].height != "10 m"
# Test return empty Dataset due to conflicting callables
new_ds = ds.filter_by_attrs(
standard_name=lambda v: False, height=lambda v: True
)
assert not bool(new_ds.data_vars)
def test_binary_op_propagate_indexes(self):
ds = Dataset(
{"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]})}
)
expected = ds.xindexes["x"]
actual = (ds * 2).xindexes["x"]
assert expected is actual
def test_binary_op_join_setting(self):
# arithmetic_join applies to data array coordinates
missing_2 = xr.Dataset({"x": [0, 1]})
missing_0 = xr.Dataset({"x": [1, 2]})
with xr.set_options(arithmetic_join="outer"):
actual = missing_2 + missing_0
expected = xr.Dataset({"x": [0, 1, 2]})
assert_equal(actual, expected)
# arithmetic join also applies to data_vars
ds1 = xr.Dataset({"foo": 1, "bar": 2})
ds2 = xr.Dataset({"bar": 2, "baz": 3})
expected = xr.Dataset({"bar": 4}) # default is inner joining
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="outer"):
expected = xr.Dataset({"foo": np.nan, "bar": 4, "baz": np.nan})
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="left"):
expected = xr.Dataset({"foo": np.nan, "bar": 4})
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="right"):
expected = xr.Dataset({"bar": 4, "baz": np.nan})
actual = ds1 + ds2
assert_equal(actual, expected)
def test_full_like(self):
# For more thorough tests, see test_variable.py
# Note: testing data_vars with mismatched dtypes
ds = Dataset(
{
"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]}),
"d2": DataArray([1.1, 2.2, 3.3], dims=["y"]),
},
attrs={"foo": "bar"},
)
actual = full_like(ds, 2)
expected = ds.copy(deep=True)
expected["d1"].values = [2, 2, 2]
expected["d2"].values = [2.0, 2.0, 2.0]
assert expected["d1"].dtype == int
assert expected["d2"].dtype == float
assert_identical(expected, actual)
# override dtype
actual = full_like(ds, fill_value=True, dtype=bool)
expected = ds.copy(deep=True)
expected["d1"].values = [True, True, True]
expected["d2"].values = [True, True, True]
assert expected["d1"].dtype == bool
assert expected["d2"].dtype == bool
assert_identical(expected, actual)
# with multiple fill values
actual = full_like(ds, {"d1": 1, "d2": 2.3})
expected = ds.assign(d1=("x", [1, 1, 1]), d2=("y", [2.3, 2.3, 2.3]))
assert expected["d1"].dtype == int
assert expected["d2"].dtype == float
assert_identical(expected, actual)
# override multiple dtypes
actual = full_like(ds, fill_value={"d1": 1, "d2": 2.3}, dtype={"d1": bool})
expected = ds.assign(d1=("x", [True, True, True]), d2=("y", [2.3, 2.3, 2.3]))
assert expected["d1"].dtype == bool
assert expected["d2"].dtype == float
assert_identical(expected, actual)
def test_combine_first(self):
dsx0 = DataArray([0, 0], [("x", ["a", "b"])]).to_dataset(name="dsx0")
dsx1 = DataArray([1, 1], [("x", ["b", "c"])]).to_dataset(name="dsx1")
actual = dsx0.combine_first(dsx1)
expected = Dataset(
{"dsx0": ("x", [0, 0, np.nan]), "dsx1": ("x", [np.nan, 1, 1])},
coords={"x": ["a", "b", "c"]},
)
assert_equal(actual, expected)
assert_equal(actual, xr.merge([dsx0, dsx1]))
# works just like xr.merge([self, other])
dsy2 = DataArray([2, 2, 2], [("x", ["b", "c", "d"])]).to_dataset(name="dsy2")
actual = dsx0.combine_first(dsy2)
expected = xr.merge([dsy2, dsx0])
assert_equal(actual, expected)
def test_sortby(self):
ds = Dataset(
{
"A": DataArray(
[[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])]
),
"B": DataArray([[5, 6], [7, 8], [9, 10]], dims=["x", "y"]),
}
)
sorted1d = Dataset(
{
"A": DataArray(
[[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])]
),
"B": DataArray([[9, 10], [7, 8], [5, 6]], dims=["x", "y"]),
}
)
sorted2d = Dataset(
{
"A": DataArray(
[[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])]
),
"B": DataArray([[10, 9], [8, 7], [6, 5]], dims=["x", "y"]),
}
)
expected = sorted1d
dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])])
actual = ds.sortby(dax)
assert_equal(actual, expected)
# test descending order sort
actual = ds.sortby(dax, ascending=False)
assert_equal(actual, ds)
# test alignment (fills in nan for 'c')
dax_short = DataArray([98, 97], [("x", ["b", "a"])])
actual = ds.sortby(dax_short)
assert_equal(actual, expected)
# test 1-D lexsort
# dax0 is sorted first to give indices of [1, 2, 0]
# and then dax1 would be used to move index 2 ahead of 1
dax0 = DataArray([100, 95, 95], [("x", ["c", "b", "a"])])
dax1 = DataArray([0, 1, 0], [("x", ["c", "b", "a"])])
actual = ds.sortby([dax0, dax1]) # lexsort underneath gives [2, 1, 0]
assert_equal(actual, expected)
expected = sorted2d
# test multi-dim sort by 1D dataarray values
day = DataArray([90, 80], [("y", [1, 0])])
actual = ds.sortby([day, dax])
assert_equal(actual, expected)
# test exception-raising
with pytest.raises(KeyError) as excinfo:
actual = ds.sortby("z")
with pytest.raises(ValueError) as excinfo:
actual = ds.sortby(ds["A"])
assert "DataArray is not 1-D" in str(excinfo.value)
expected = sorted1d
actual = ds.sortby("x")
assert_equal(actual, expected)
# test pandas.MultiIndex
indices = (("b", 1), ("b", 0), ("a", 1), ("a", 0))
midx = pd.MultiIndex.from_tuples(indices, names=["one", "two"])
ds_midx = Dataset(
{
"A": DataArray(
[[1, 2], [3, 4], [5, 6], [7, 8]], [("x", midx), ("y", [1, 0])]
),
"B": DataArray([[5, 6], [7, 8], [9, 10], [11, 12]], dims=["x", "y"]),
}
)
actual = ds_midx.sortby("x")
midx_reversed = pd.MultiIndex.from_tuples(
tuple(reversed(indices)), names=["one", "two"]
)
expected = Dataset(
{
"A": DataArray(
[[7, 8], [5, 6], [3, 4], [1, 2]],
[("x", midx_reversed), ("y", [1, 0])],
),
"B": DataArray([[11, 12], [9, 10], [7, 8], [5, 6]], dims=["x", "y"]),
}
)
assert_equal(actual, expected)
# multi-dim sort by coordinate objects
expected = sorted2d
actual = ds.sortby(["x", "y"])
assert_equal(actual, expected)
# test descending order sort
actual = ds.sortby(["x", "y"], ascending=False)
assert_equal(actual, ds)
def test_attribute_access(self):
ds = create_test_data(seed=1)
for key in ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]:
assert_equal(ds[key], getattr(ds, key))
assert key in dir(ds)
for key in ["dim3", "dim1", "numbers"]:
assert_equal(ds["var3"][key], getattr(ds.var3, key))
assert key in dir(ds["var3"])
# attrs
assert ds["var3"].attrs["foo"] == ds.var3.foo
assert "foo" in dir(ds["var3"])
def test_ipython_key_completion(self):
ds = create_test_data(seed=1)
actual = ds._ipython_key_completions_()
expected = ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]
for item in actual:
ds[item] # should not raise
assert sorted(actual) == sorted(expected)
# for dataarray
actual = ds["var3"]._ipython_key_completions_()
expected = ["dim3", "dim1", "numbers"]
for item in actual:
ds["var3"][item] # should not raise
assert sorted(actual) == sorted(expected)
# MultiIndex
ds_midx = ds.stack(dim12=["dim1", "dim2"])
actual = ds_midx._ipython_key_completions_()
expected = [
"var1",
"var2",
"var3",
"time",
"dim1",
"dim2",
"dim3",
"numbers",
"dim12",
]
for item in actual:
ds_midx[item] # should not raise
assert sorted(actual) == sorted(expected)
# coords
actual = ds.coords._ipython_key_completions_()
expected = ["time", "dim1", "dim2", "dim3", "numbers"]
for item in actual:
ds.coords[item] # should not raise
assert sorted(actual) == sorted(expected)
actual = ds["var3"].coords._ipython_key_completions_()
expected = ["dim1", "dim3", "numbers"]
for item in actual:
ds["var3"].coords[item] # should not raise
assert sorted(actual) == sorted(expected)
# data_vars
actual = ds.data_vars._ipython_key_completions_()
expected = ["var1", "var2", "var3", "dim1"]
for item in actual:
ds.data_vars[item] # should not raise
assert sorted(actual) == sorted(expected)
def test_polyfit_output(self):
ds = create_test_data(seed=1)
out = ds.polyfit("dim2", 2, full=False)
assert "var1_polyfit_coefficients" in out
out = ds.polyfit("dim1", 2, full=True)
assert "var1_polyfit_coefficients" in out
assert "dim1_matrix_rank" in out
out = ds.polyfit("time", 2)
assert len(out.data_vars) == 0
def test_polyfit_warnings(self):
ds = create_test_data(seed=1)
with warnings.catch_warnings(record=True) as ws:
ds.var1.polyfit("dim2", 10, full=False)
assert len(ws) == 1
assert ws[0].category == np.RankWarning
ds.var1.polyfit("dim2", 10, full=True)
assert len(ws) == 1
def test_pad(self):
ds = create_test_data(seed=1)
padded = ds.pad(dim2=(1, 1), constant_values=42)
assert padded["dim2"].shape == (11,)
assert padded["var1"].shape == (8, 11)
assert padded["var2"].shape == (8, 11)
assert padded["var3"].shape == (10, 8)
assert dict(padded.dims) == {"dim1": 8, "dim2": 11, "dim3": 10, "time": 20}
np.testing.assert_equal(padded["var1"].isel(dim2=[0, -1]).data, 42)
np.testing.assert_equal(padded["dim2"][[0, -1]].data, np.nan)
def test_astype_attrs(self):
data = create_test_data(seed=123)
data.attrs["foo"] = "bar"
assert data.attrs == data.astype(float).attrs
assert data.var1.attrs == data.astype(float).var1.attrs
assert not data.astype(float, keep_attrs=False).attrs
assert not data.astype(float, keep_attrs=False).var1.attrs
@pytest.mark.parametrize("parser", ["pandas", "python"])
@pytest.mark.parametrize(
"engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])]
)
@pytest.mark.parametrize(
"backend", ["numpy", pytest.param("dask", marks=[requires_dask])]
)
def test_query(self, backend, engine, parser):
"""Test querying a dataset."""
# setup test data
np.random.seed(42)
a = np.arange(0, 10, 1)
b = np.random.randint(0, 100, size=10)
c = np.linspace(0, 1, 20)
d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype(
object
)
e = np.arange(0, 10 * 20).reshape(10, 20)
f = np.random.normal(0, 1, size=(10, 20, 30))
if backend == "numpy":
ds = Dataset(
{
"a": ("x", a),
"b": ("x", b),
"c": ("y", c),
"d": ("z", d),
"e": (("x", "y"), e),
"f": (("x", "y", "z"), f),
}
)
elif backend == "dask":
ds = Dataset(
{
"a": ("x", da.from_array(a, chunks=3)),
"b": ("x", da.from_array(b, chunks=3)),
"c": ("y", da.from_array(c, chunks=7)),
"d": ("z", da.from_array(d, chunks=12)),
"e": (("x", "y"), da.from_array(e, chunks=(3, 7))),
"f": (("x", "y", "z"), da.from_array(f, chunks=(3, 7, 12))),
}
)
# query single dim, single variable
actual = ds.query(x="a > 5", engine=engine, parser=parser)
expect = ds.isel(x=(a > 5))
assert_identical(expect, actual)
# query single dim, single variable, via dict
actual = ds.query(dict(x="a > 5"), engine=engine, parser=parser)
expect = ds.isel(dict(x=(a > 5)))
assert_identical(expect, actual)
# query single dim, single variable
actual = ds.query(x="b > 50", engine=engine, parser=parser)
expect = ds.isel(x=(b > 50))
assert_identical(expect, actual)
# query single dim, single variable
actual = ds.query(y="c < .5", engine=engine, parser=parser)
expect = ds.isel(y=(c < 0.5))
assert_identical(expect, actual)
# query single dim, single string variable
if parser == "pandas":
# N.B., this query currently only works with the pandas parser
# xref https://github.com/pandas-dev/pandas/issues/40436
actual = ds.query(z='d == "bar"', engine=engine, parser=parser)
expect = ds.isel(z=(d == "bar"))
assert_identical(expect, actual)
# query single dim, multiple variables
actual = ds.query(x="(a > 5) & (b > 50)", engine=engine, parser=parser)
expect = ds.isel(x=((a > 5) & (b > 50)))
assert_identical(expect, actual)
# query single dim, multiple variables with computation
actual = ds.query(x="(a * b) > 250", engine=engine, parser=parser)
expect = ds.isel(x=(a * b) > 250)
assert_identical(expect, actual)
# check pandas query syntax is supported
if parser == "pandas":
actual = ds.query(x="(a > 5) and (b > 50)", engine=engine, parser=parser)
expect = ds.isel(x=((a > 5) & (b > 50)))
assert_identical(expect, actual)
# query multiple dims via kwargs
actual = ds.query(x="a > 5", y="c < .5", engine=engine, parser=parser)
expect = ds.isel(x=(a > 5), y=(c < 0.5))
assert_identical(expect, actual)
# query multiple dims via kwargs
if parser == "pandas":
actual = ds.query(
x="a > 5", y="c < .5", z="d == 'bar'", engine=engine, parser=parser
)
expect = ds.isel(x=(a > 5), y=(c < 0.5), z=(d == "bar"))
assert_identical(expect, actual)
# query multiple dims via dict
actual = ds.query(dict(x="a > 5", y="c < .5"), engine=engine, parser=parser)
expect = ds.isel(dict(x=(a > 5), y=(c < 0.5)))
assert_identical(expect, actual)
# query multiple dims via dict
if parser == "pandas":
actual = ds.query(
dict(x="a > 5", y="c < .5", z="d == 'bar'"),
engine=engine,
parser=parser,
)
expect = ds.isel(dict(x=(a > 5), y=(c < 0.5), z=(d == "bar")))
assert_identical(expect, actual)
# test error handling
with pytest.raises(ValueError):
ds.query("a > 5") # must be dict or kwargs
with pytest.raises(ValueError):
ds.query(x=(a > 5)) # must be query string
with pytest.raises(IndexError):
ds.query(y="a > 5") # wrong length dimension
with pytest.raises(IndexError):
ds.query(x="c < .5") # wrong length dimension
with pytest.raises(IndexError):
ds.query(x="e > 100") # wrong number of dimensions
with pytest.raises(UndefinedVariableError):
ds.query(x="spam > 50") # name not present
# Py.test tests
@pytest.fixture(params=[None])
def data_set(request):
return create_test_data(request.param)
@pytest.mark.parametrize("test_elements", ([1, 2], np.array([1, 2]), DataArray([1, 2])))
def test_isin(test_elements):
expected = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 1]),
"var3": (("dim1",), [0, 1]),
}
).astype("bool")
result = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 2]),
"var3": (("dim1",), [0, 1]),
}
).isin(test_elements)
assert_equal(result, expected)
@pytest.mark.skipif(not has_dask, reason="requires dask")
@pytest.mark.parametrize("test_elements", ([1, 2], np.array([1, 2]), DataArray([1, 2])))
def test_isin_dask(test_elements):
expected = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 1]),
"var3": (("dim1",), [0, 1]),
}
).astype("bool")
result = (
Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 2]),
"var3": (("dim1",), [0, 1]),
}
)
.chunk(1)
.isin(test_elements)
.compute()
)
assert_equal(result, expected)
def test_isin_dataset():
ds = Dataset({"x": [1, 2]})
with pytest.raises(TypeError):
ds.isin(ds)
@pytest.mark.parametrize(
"unaligned_coords",
(
{"x": [2, 1, 0]},
{"x": (["x"], np.asarray([2, 1, 0]))},
{"x": (["x"], np.asarray([1, 2, 0]))},
{"x": pd.Index([2, 1, 0])},
{"x": Variable(dims="x", data=[0, 2, 1])},
{"x": IndexVariable(dims="x", data=[0, 1, 2])},
{"y": 42},
{"y": ("x", [2, 1, 0])},
{"y": ("x", np.asarray([2, 1, 0]))},
{"y": (["x"], np.asarray([2, 1, 0]))},
),
)
@pytest.mark.parametrize("coords", ({"x": ("x", [0, 1, 2])}, {"x": [0, 1, 2]}))
def test_dataset_constructor_aligns_to_explicit_coords(unaligned_coords, coords):
a = xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords)
expected = xr.Dataset(coords=coords)
expected["a"] = a
result = xr.Dataset({"a": a}, coords=coords)
assert_equal(expected, result)
def test_error_message_on_set_supplied():
with pytest.raises(TypeError, match="has invalid type <class 'set'>"):
xr.Dataset(dict(date=[1, 2, 3], sec={4}))
@pytest.mark.parametrize("unaligned_coords", ({"y": ("b", np.asarray([2, 1, 0]))},))
def test_constructor_raises_with_invalid_coords(unaligned_coords):
with pytest.raises(ValueError, match="not a subset of the DataArray dimensions"):
xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords)
def test_dir_expected_attrs(data_set):
some_expected_attrs = {"pipe", "mean", "isnull", "var1", "dim2", "numbers"}
result = dir(data_set)
assert set(result) >= some_expected_attrs
def test_dir_non_string(data_set):
# add a numbered key to ensure this doesn't break dir
data_set[5] = "foo"
result = dir(data_set)
assert 5 not in result
# GH2172
sample_data = np.random.uniform(size=[2, 2000, 10000])
x = xr.Dataset({"sample_data": (sample_data.shape, sample_data)})
x2 = x["sample_data"]
dir(x2)
def test_dir_unicode(data_set):
data_set["unicode"] = "uni"
result = dir(data_set)
assert "unicode" in result
@pytest.fixture(params=[1])
def ds(request):
if request.param == 1:
return Dataset(
dict(
z1=(["y", "x"], np.random.randn(2, 8)),
z2=(["time", "y"], np.random.randn(10, 2)),
),
dict(
x=("x", np.linspace(0, 1.0, 8)),
time=("time", np.linspace(0, 1.0, 10)),
c=("y", ["a", "b"]),
y=range(2),
),
)
if request.param == 2:
return Dataset(
{
"z1": (["time", "y"], np.random.randn(10, 2)),
"z2": (["time"], np.random.randn(10)),
"z3": (["x", "time"], np.random.randn(8, 10)),
},
{
"x": ("x", np.linspace(0, 1.0, 8)),
"time": ("time", np.linspace(0, 1.0, 10)),
"c": ("y", ["a", "b"]),
"y": range(2),
},
)
def test_coarsen_absent_dims_error(ds):
with pytest.raises(ValueError, match=r"not found in Dataset."):
ds.coarsen(foo=2)
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize(("boundary", "side"), [("trim", "left"), ("pad", "right")])
def test_coarsen(ds, dask, boundary, side):
if dask and has_dask:
ds = ds.chunk({"x": 4})
actual = ds.coarsen(time=2, x=3, boundary=boundary, side=side).max()
assert_equal(
actual["z1"], ds["z1"].coarsen(x=3, boundary=boundary, side=side).max()
)
# coordinate should be mean by default
assert_equal(
actual["time"], ds["time"].coarsen(time=2, boundary=boundary, side=side).mean()
)
@pytest.mark.parametrize("dask", [True, False])
def test_coarsen_coords(ds, dask):
if dask and has_dask:
ds = ds.chunk({"x": 4})
# check if coord_func works
actual = ds.coarsen(time=2, x=3, boundary="trim", coord_func={"time": "max"}).max()
assert_equal(actual["z1"], ds["z1"].coarsen(x=3, boundary="trim").max())
assert_equal(actual["time"], ds["time"].coarsen(time=2, boundary="trim").max())
# raise if exact
with pytest.raises(ValueError):
ds.coarsen(x=3).mean()
# should be no error
ds.isel(x=slice(0, 3 * (len(ds["x"]) // 3))).coarsen(x=3).mean()
# working test with pd.time
da = xr.DataArray(
np.linspace(0, 365, num=364),
dims="time",
coords={"time": pd.date_range("15/12/1999", periods=364)},
)
actual = da.coarsen(time=2).mean()
@requires_cftime
def test_coarsen_coords_cftime():
times = xr.cftime_range("2000", periods=6)
da = xr.DataArray(range(6), [("time", times)])
actual = da.coarsen(time=3).mean()
expected_times = xr.cftime_range("2000-01-02", freq="3D", periods=2)
np.testing.assert_array_equal(actual.time, expected_times)
@pytest.mark.parametrize(
"funcname, argument",
[
("reduce", (np.mean,)),
("mean", ()),
],
)
def test_coarsen_keep_attrs(funcname, argument):
global_attrs = {"units": "test", "long_name": "testing"}
da_attrs = {"da_attr": "test"}
attrs_coords = {"attrs_coords": "test"}
da_not_coarsend_attrs = {"da_not_coarsend_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={
"da": ("coord", data, da_attrs),
"da_not_coarsend": ("no_coord", data, da_not_coarsend_attrs),
},
coords={"coord": ("coord", coords, attrs_coords)},
attrs=global_attrs,
)
# attrs are now kept per default
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
result = func(*argument)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_coarsend.attrs == da_not_coarsend_attrs
assert result.coord.attrs == attrs_coords
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
# discard attrs
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_coarsend.attrs == {}
assert result.coord.attrs == {}
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
# test discard attrs using global option
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_coarsend.attrs == {}
assert result.coord.attrs == {}
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
# keyword takes precedence over global option
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument, keep_attrs=True)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_coarsend.attrs == da_not_coarsend_attrs
assert result.coord.attrs == attrs_coords
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
with set_options(keep_attrs=True):
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_coarsend.attrs == {}
assert result.coord.attrs == {}
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
def test_coarsen_keep_attrs_deprecated():
global_attrs = {"units": "test", "long_name": "testing"}
attrs_da = {"da_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={"da": ("coord", data)},
coords={"coord": coords},
attrs=global_attrs,
)
ds.da.attrs = attrs_da
# deprecated option
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``coarsen`` is deprecated"
):
result = ds.coarsen(dim={"coord": 5}, keep_attrs=False).mean()
assert result.attrs == {}
assert result.da.attrs == {}
# the keep_attrs in the reduction function takes precedence
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``coarsen`` is deprecated"
):
result = ds.coarsen(dim={"coord": 5}, keep_attrs=True).mean(keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
@pytest.mark.slow
@pytest.mark.parametrize("ds", (1, 2), indirect=True)
@pytest.mark.parametrize("window", (1, 2, 3, 4))
@pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median"))
def test_coarsen_reduce(ds, window, name):
# Use boundary="trim" to accomodate all window sizes used in tests
coarsen_obj = ds.coarsen(time=window, boundary="trim")
# add nan prefix to numpy methods to get similar behavior as bottleneck
actual = coarsen_obj.reduce(getattr(np, f"nan{name}"))
expected = getattr(coarsen_obj, name)()
assert_allclose(actual, expected)
# make sure the order of data_var are not changed.
assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())
# Make sure the dimension order is restored
for key, src_var in ds.data_vars.items():
assert src_var.dims == actual[key].dims
@pytest.mark.parametrize(
"funcname, argument",
[
("reduce", (np.mean,)),
("mean", ()),
("construct", ("window_dim",)),
("count", ()),
],
)
def test_rolling_keep_attrs(funcname, argument):
global_attrs = {"units": "test", "long_name": "testing"}
da_attrs = {"da_attr": "test"}
da_not_rolled_attrs = {"da_not_rolled_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={"da": ("coord", data), "da_not_rolled": ("no_coord", data)},
coords={"coord": coords},
attrs=global_attrs,
)
ds.da.attrs = da_attrs
ds.da_not_rolled.attrs = da_not_rolled_attrs
# attrs are now kept per default
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
result = func(*argument)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_rolled.attrs == da_not_rolled_attrs
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
# discard attrs
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
# test discard attrs using global option
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
# keyword takes precedence over global option
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument, keep_attrs=True)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_rolled.attrs == da_not_rolled_attrs
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=True):
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
def test_rolling_keep_attrs_deprecated():
global_attrs = {"units": "test", "long_name": "testing"}
attrs_da = {"da_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={"da": ("coord", data)},
coords={"coord": coords},
attrs=global_attrs,
)
ds.da.attrs = attrs_da
# deprecated option
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``rolling`` is deprecated"
):
result = ds.rolling(dim={"coord": 5}, keep_attrs=False).construct("window_dim")
assert result.attrs == {}
assert result.da.attrs == {}
# the keep_attrs in the reduction function takes precedence
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``rolling`` is deprecated"
):
result = ds.rolling(dim={"coord": 5}, keep_attrs=True).construct(
"window_dim", keep_attrs=False
)
assert result.attrs == {}
assert result.da.attrs == {}
def test_rolling_properties(ds):
# catching invalid args
with pytest.raises(ValueError, match="window must be > 0"):
ds.rolling(time=-2)
with pytest.raises(ValueError, match="min_periods must be greater than zero"):
ds.rolling(time=2, min_periods=0)
with pytest.raises(KeyError, match="time2"):
ds.rolling(time2=2)
@pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median"))
@pytest.mark.parametrize("center", (True, False, None))
@pytest.mark.parametrize("min_periods", (1, None))
@pytest.mark.parametrize("key", ("z1", "z2"))
def test_rolling_wrapped_bottleneck(ds, name, center, min_periods, key):
bn = pytest.importorskip("bottleneck", minversion="1.1")
# Test all bottleneck functions
rolling_obj = ds.rolling(time=7, min_periods=min_periods)
func_name = f"move_{name}"
actual = getattr(rolling_obj, name)()
if key == "z1": # z1 does not depend on 'Time' axis. Stored as it is.
expected = ds[key]
elif key == "z2":
expected = getattr(bn, func_name)(
ds[key].values, window=7, axis=0, min_count=min_periods
)
else:
raise ValueError
assert_array_equal(actual[key].values, expected)
# Test center
rolling_obj = ds.rolling(time=7, center=center)
actual = getattr(rolling_obj, name)()["time"]
assert_equal(actual, ds["time"])
@requires_numbagg
def test_rolling_exp(ds):
result = ds.rolling_exp(time=10, window_type="span").mean()
assert isinstance(result, Dataset)
@requires_numbagg
def test_rolling_exp_keep_attrs(ds):
attrs_global = {"attrs": "global"}
attrs_z1 = {"attr": "z1"}
ds.attrs = attrs_global
ds.z1.attrs = attrs_z1
# attrs are kept per default
result = ds.rolling_exp(time=10).mean()
assert result.attrs == attrs_global
assert result.z1.attrs == attrs_z1
# discard attrs
result = ds.rolling_exp(time=10).mean(keep_attrs=False)
assert result.attrs == {}
assert result.z1.attrs == {}
# test discard attrs using global option
with set_options(keep_attrs=False):
result = ds.rolling_exp(time=10).mean()
assert result.attrs == {}
assert result.z1.attrs == {}
# keyword takes precedence over global option
with set_options(keep_attrs=False):
result = ds.rolling_exp(time=10).mean(keep_attrs=True)
assert result.attrs == attrs_global
assert result.z1.attrs == attrs_z1
with set_options(keep_attrs=True):
result = ds.rolling_exp(time=10).mean(keep_attrs=False)
assert result.attrs == {}
assert result.z1.attrs == {}
with pytest.warns(
UserWarning, match="Passing ``keep_attrs`` to ``rolling_exp`` has no effect."
):
ds.rolling_exp(time=10, keep_attrs=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1, 2, 3))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
def test_rolling_pandas_compat(center, window, min_periods):
df = pd.DataFrame(
{
"x": np.random.randn(20),
"y": np.random.randn(20),
"time": np.linspace(0, 1, 20),
}
)
ds = Dataset.from_dataframe(df)
if min_periods is not None and window < min_periods:
min_periods = window
df_rolling = df.rolling(window, center=center, min_periods=min_periods).mean()
ds_rolling = ds.rolling(index=window, center=center, min_periods=min_periods).mean()
np.testing.assert_allclose(df_rolling["x"].values, ds_rolling["x"].values)
np.testing.assert_allclose(df_rolling.index, ds_rolling["index"])
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
def test_rolling_construct(center, window):
df = pd.DataFrame(
{
"x": np.random.randn(20),
"y": np.random.randn(20),
"time": np.linspace(0, 1, 20),
}
)
ds = Dataset.from_dataframe(df)
df_rolling = df.rolling(window, center=center, min_periods=1).mean()
ds_rolling = ds.rolling(index=window, center=center)
ds_rolling_mean = ds_rolling.construct("window").mean("window")
np.testing.assert_allclose(df_rolling["x"].values, ds_rolling_mean["x"].values)
np.testing.assert_allclose(df_rolling.index, ds_rolling_mean["index"])
# with stride
ds_rolling_mean = ds_rolling.construct("window", stride=2).mean("window")
np.testing.assert_allclose(df_rolling["x"][::2].values, ds_rolling_mean["x"].values)
np.testing.assert_allclose(df_rolling.index[::2], ds_rolling_mean["index"])
# with fill_value
ds_rolling_mean = ds_rolling.construct("window", stride=2, fill_value=0.0).mean(
"window"
)
assert (ds_rolling_mean.isnull().sum() == 0).to_array(dim="vars").all()
assert (ds_rolling_mean["x"] == 0.0).sum() >= 0
@pytest.mark.slow
@pytest.mark.parametrize("ds", (1, 2), indirect=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1, 2, 3))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
@pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median"))
def test_rolling_reduce(ds, center, min_periods, window, name):
if min_periods is not None and window < min_periods:
min_periods = window
if name == "std" and window == 1:
pytest.skip("std with window == 1 is unstable in bottleneck")
rolling_obj = ds.rolling(time=window, center=center, min_periods=min_periods)
# add nan prefix to numpy methods to get similar behavior as bottleneck
actual = rolling_obj.reduce(getattr(np, "nan%s" % name))
expected = getattr(rolling_obj, name)()
assert_allclose(actual, expected)
assert ds.dims == actual.dims
# make sure the order of data_var are not changed.
assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())
# Make sure the dimension order is restored
for key, src_var in ds.data_vars.items():
assert src_var.dims == actual[key].dims
@pytest.mark.parametrize("ds", (2,), indirect=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1))
@pytest.mark.parametrize("name", ("sum", "max"))
@pytest.mark.parametrize("dask", (True, False))
def test_ndrolling_reduce(ds, center, min_periods, name, dask):
if dask and has_dask:
ds = ds.chunk({"x": 4})
rolling_obj = ds.rolling(time=4, x=3, center=center, min_periods=min_periods)
actual = getattr(rolling_obj, name)()
expected = getattr(
getattr(
ds.rolling(time=4, center=center, min_periods=min_periods), name
)().rolling(x=3, center=center, min_periods=min_periods),
name,
)()
assert_allclose(actual, expected)
assert actual.dims == expected.dims
# Do it in the opposite order
expected = getattr(
getattr(
ds.rolling(x=3, center=center, min_periods=min_periods), name
)().rolling(time=4, center=center, min_periods=min_periods),
name,
)()
assert_allclose(actual, expected)
assert actual.dims == expected.dims
@pytest.mark.parametrize("center", (True, False, (True, False)))
@pytest.mark.parametrize("fill_value", (np.nan, 0.0))
@pytest.mark.parametrize("dask", (True, False))
def test_ndrolling_construct(center, fill_value, dask):
da = DataArray(
np.arange(5 * 6 * 7).reshape(5, 6, 7).astype(float),
dims=["x", "y", "z"],
coords={"x": ["a", "b", "c", "d", "e"], "y": np.arange(6)},
)
ds = xr.Dataset({"da": da})
if dask and has_dask:
ds = ds.chunk({"x": 4})
actual = ds.rolling(x=3, z=2, center=center).construct(
x="x1", z="z1", fill_value=fill_value
)
if not isinstance(center, tuple):
center = (center, center)
expected = (
ds.rolling(x=3, center=center[0])
.construct(x="x1", fill_value=fill_value)
.rolling(z=2, center=center[1])
.construct(z="z1", fill_value=fill_value)
)
assert_allclose(actual, expected)
def test_raise_no_warning_for_nan_in_binary_ops():
with pytest.warns(None) as record:
Dataset(data_vars={"x": ("y", [1, 2, np.NaN])}) > 0
assert len(record) == 0
@pytest.mark.filterwarnings("error")
@pytest.mark.parametrize("ds", (2,), indirect=True)
def test_raise_no_warning_assert_close(ds):
assert_allclose(ds, ds)
@pytest.mark.xfail(reason="See https://github.com/pydata/xarray/pull/4369 or docstring")
@pytest.mark.filterwarnings("error")
@pytest.mark.parametrize("ds", (2,), indirect=True)
@pytest.mark.parametrize("name", ("mean", "max"))
def test_raise_no_warning_dask_rolling_assert_close(ds, name):
"""
This is a puzzle — I can't easily find the source of the warning. It
requires `assert_allclose` to be run, for the `ds` param to be 2, and is
different for `mean` and `max`. `sum` raises no warning.
"""
ds = ds.chunk({"x": 4})
rolling_obj = ds.rolling(time=4, x=3)
actual = getattr(rolling_obj, name)()
expected = getattr(getattr(ds.rolling(time=4), name)().rolling(x=3), name)()
assert_allclose(actual, expected)
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("edge_order", [1, 2])
def test_differentiate(dask, edge_order):
rs = np.random.RandomState(42)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.randn(8, 6))},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
# along x
actual = da.differentiate("x", edge_order)
expected_x = xr.DataArray(
np.gradient(da, da["x"], axis=0, edge_order=edge_order),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_x, actual)
assert_equal(
ds["var"].differentiate("x", edge_order=edge_order),
ds.differentiate("x", edge_order=edge_order)["var"],
)
# coordinate should not change
assert_equal(da["x"], actual["x"])
# along y
actual = da.differentiate("y", edge_order)
expected_y = xr.DataArray(
np.gradient(da, da["y"], axis=1, edge_order=edge_order),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_y, actual)
assert_equal(actual, ds.differentiate("y", edge_order=edge_order)["var"])
assert_equal(
ds["var"].differentiate("y", edge_order=edge_order),
ds.differentiate("y", edge_order=edge_order)["var"],
)
with pytest.raises(ValueError):
da.differentiate("x2d")
@pytest.mark.parametrize("dask", [True, False])
def test_differentiate_datetime(dask):
rs = np.random.RandomState(42)
coord = np.array(
[
"2004-07-13",
"2006-01-13",
"2010-08-13",
"2010-09-13",
"2010-10-11",
"2010-12-13",
"2011-02-13",
"2012-08-13",
],
dtype="datetime64",
)
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.randn(8, 6))},
)
if dask and has_dask:
da = da.chunk({"x": 4})
# along x
actual = da.differentiate("x", edge_order=1, datetime_unit="D")
expected_x = xr.DataArray(
np.gradient(
da, da["x"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1
),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_x, actual)
actual2 = da.differentiate("x", edge_order=1, datetime_unit="h")
assert np.allclose(actual, actual2 * 24)
# for datetime variable
actual = da["x"].differentiate("x", edge_order=1, datetime_unit="D")
assert np.allclose(actual, 1.0)
# with different date unit
da = xr.DataArray(coord.astype("datetime64[ms]"), dims=["x"], coords={"x": coord})
actual = da.differentiate("x", edge_order=1)
assert np.allclose(actual, 1.0)
@pytest.mark.skipif(not has_cftime, reason="Test requires cftime.")
@pytest.mark.parametrize("dask", [True, False])
def test_differentiate_cftime(dask):
rs = np.random.RandomState(42)
coord = xr.cftime_range("2000", periods=8, freq="2M")
da = xr.DataArray(
rs.randn(8, 6),
coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.randn(8, 6))},
dims=["time", "y"],
)
if dask and has_dask:
da = da.chunk({"time": 4})
actual = da.differentiate("time", edge_order=1, datetime_unit="D")
expected_data = np.gradient(
da, da["time"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1
)
expected = xr.DataArray(expected_data, coords=da.coords, dims=da.dims)
assert_equal(expected, actual)
actual2 = da.differentiate("time", edge_order=1, datetime_unit="h")
assert_allclose(actual, actual2 * 24)
# Test the differentiation of datetimes themselves
actual = da["time"].differentiate("time", edge_order=1, datetime_unit="D")
assert_allclose(actual, xr.ones_like(da["time"]).astype(float))
@pytest.mark.parametrize("dask", [True, False])
def test_integrate(dask):
rs = np.random.RandomState(42)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={
"x": coord,
"x2": (("x",), rs.randn(8)),
"z": 3,
"x2d": (("x", "y"), rs.randn(8, 6)),
},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
# along x
actual = da.integrate("x")
# coordinate that contains x should be dropped.
expected_x = xr.DataArray(
np.trapz(da.compute(), da["x"], axis=0),
dims=["y"],
coords={k: v for k, v in da.coords.items() if "x" not in v.dims},
)
assert_allclose(expected_x, actual.compute())
assert_equal(ds["var"].integrate("x"), ds.integrate("x")["var"])
# make sure result is also a dask array (if the source is dask array)
assert isinstance(actual.data, type(da.data))
# along y
actual = da.integrate("y")
expected_y = xr.DataArray(
np.trapz(da, da["y"], axis=1),
dims=["x"],
coords={k: v for k, v in da.coords.items() if "y" not in v.dims},
)
assert_allclose(expected_y, actual.compute())
assert_equal(actual, ds.integrate("y")["var"])
assert_equal(ds["var"].integrate("y"), ds.integrate("y")["var"])
# along x and y
actual = da.integrate(("y", "x"))
assert actual.ndim == 0
with pytest.raises(ValueError):
da.integrate("x2d")
with pytest.warns(FutureWarning):
da.integrate(dim="x")
@requires_scipy
@pytest.mark.parametrize("dask", [True, False])
def test_cumulative_integrate(dask):
rs = np.random.RandomState(43)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={
"x": coord,
"x2": (("x",), rs.randn(8)),
"z": 3,
"x2d": (("x", "y"), rs.randn(8, 6)),
},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
# along x
actual = da.cumulative_integrate("x")
# From scipy-1.6.0 cumtrapz is renamed to cumulative_trapezoid, but cumtrapz is
# still provided for backward compatibility
from scipy.integrate import cumtrapz
expected_x = xr.DataArray(
cumtrapz(da.compute(), da["x"], axis=0, initial=0.0),
dims=["x", "y"],
coords=da.coords,
)
assert_allclose(expected_x, actual.compute())
assert_equal(
ds["var"].cumulative_integrate("x"),
ds.cumulative_integrate("x")["var"],
)
# make sure result is also a dask array (if the source is dask array)
assert isinstance(actual.data, type(da.data))
# along y
actual = da.cumulative_integrate("y")
expected_y = xr.DataArray(
cumtrapz(da, da["y"], axis=1, initial=0.0),
dims=["x", "y"],
coords=da.coords,
)
assert_allclose(expected_y, actual.compute())
assert_equal(actual, ds.cumulative_integrate("y")["var"])
assert_equal(
ds["var"].cumulative_integrate("y"),
ds.cumulative_integrate("y")["var"],
)
# along x and y
actual = da.cumulative_integrate(("y", "x"))
assert actual.ndim == 2
with pytest.raises(ValueError):
da.cumulative_integrate("x2d")
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("which_datetime", ["np", "cftime"])
def test_trapz_datetime(dask, which_datetime):
rs = np.random.RandomState(42)
if which_datetime == "np":
coord = np.array(
[
"2004-07-13",
"2006-01-13",
"2010-08-13",
"2010-09-13",
"2010-10-11",
"2010-12-13",
"2011-02-13",
"2012-08-13",
],
dtype="datetime64",
)
else:
if not has_cftime:
pytest.skip("Test requires cftime.")
coord = xr.cftime_range("2000", periods=8, freq="2D")
da = xr.DataArray(
rs.randn(8, 6),
coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.randn(8, 6))},
dims=["time", "y"],
)
if dask and has_dask:
da = da.chunk({"time": 4})
actual = da.integrate("time", datetime_unit="D")
expected_data = np.trapz(
da.data,
duck_array_ops.datetime_to_numeric(da["time"].data, datetime_unit="D"),
axis=0,
)
expected = xr.DataArray(
expected_data,
dims=["y"],
coords={k: v for k, v in da.coords.items() if "time" not in v.dims},
)
assert_allclose(expected, actual.compute())
# make sure result is also a dask array (if the source is dask array)
assert isinstance(actual.data, type(da.data))
actual2 = da.integrate("time", datetime_unit="h")
assert_allclose(actual, actual2 / 24.0)
def test_no_dict():
d = Dataset()
with pytest.raises(AttributeError):
d.__dict__
def test_subclass_slots():
"""Test that Dataset subclasses must explicitly define ``__slots__``.
.. note::
As of 0.13.0, this is actually mitigated into a FutureWarning for any class
defined outside of the xarray package.
"""
with pytest.raises(AttributeError) as e:
class MyDS(Dataset):
pass
assert str(e.value) == "MyDS must explicitly define __slots__"
def test_weakref():
"""Classes with __slots__ are incompatible with the weakref module unless they
explicitly state __weakref__ among their slots
"""
from weakref import ref
ds = Dataset()
r = ref(ds)
assert r() is ds
def test_deepcopy_obj_array():
x0 = Dataset(dict(foo=DataArray(np.array([object()]))))
x1 = deepcopy(x0)
assert x0["foo"].values[0] is not x1["foo"].values[0]
def test_clip(ds):
result = ds.clip(min=0.5)
assert result.min(...) >= 0.5
result = ds.clip(max=0.5)
assert result.max(...) <= 0.5
result = ds.clip(min=0.25, max=0.75)
assert result.min(...) >= 0.25
assert result.max(...) <= 0.75
result = ds.clip(min=ds.mean("y"), max=ds.mean("y"))
assert result.dims == ds.dims
| 36.285235 | 102 | 0.539239 | import pickle
import sys
import warnings
from copy import copy, deepcopy
from io import StringIO
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
from pandas.core.computation.ops import UndefinedVariableError
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.tseries.frequencies import to_offset
import xarray as xr
from xarray import (
DataArray,
Dataset,
IndexVariable,
MergeError,
Variable,
align,
backends,
broadcast,
open_dataset,
set_options,
)
from xarray.coding.cftimeindex import CFTimeIndex
from xarray.core import dtypes, indexing, utils
from xarray.core.common import duck_array_ops, full_like
from xarray.core.indexes import Index
from xarray.core.pycompat import integer_types
from xarray.core.utils import is_scalar
from . import (
InaccessibleArray,
UnexpectedDataAccess,
assert_allclose,
assert_array_equal,
assert_equal,
assert_identical,
has_cftime,
has_dask,
requires_bottleneck,
requires_cftime,
requires_dask,
requires_numbagg,
requires_numexpr,
requires_scipy,
requires_sparse,
source_ndarray,
)
try:
import dask.array as da
except ImportError:
pass
pytestmark = [
pytest.mark.filterwarnings("error:Mean of empty slice"),
pytest.mark.filterwarnings("error:All-NaN (slice|axis) encountered"),
]
def create_test_data(seed=None, add_attrs=True):
rs = np.random.RandomState(seed)
_vars = {
"var1": ["dim1", "dim2"],
"var2": ["dim1", "dim2"],
"var3": ["dim3", "dim1"],
}
_dims = {"dim1": 8, "dim2": 9, "dim3": 10}
obj = Dataset()
obj["dim2"] = ("dim2", 0.5 * np.arange(_dims["dim2"]))
obj["dim3"] = ("dim3", list("abcdefghij"))
obj["time"] = ("time", pd.date_range("2000-01-01", periods=20))
for v, dims in sorted(_vars.items()):
data = rs.normal(size=tuple(_dims[d] for d in dims))
obj[v] = (dims, data)
if add_attrs:
obj[v].attrs = {"foo": "variable"}
obj.coords["numbers"] = (
"dim3",
np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3], dtype="int64"),
)
obj.encoding = {"foo": "bar"}
assert all(obj.data.flags.writeable for obj in obj.variables.values())
return obj
def create_append_test_data(seed=None):
rs = np.random.RandomState(seed)
lat = [2, 1, 0]
lon = [0, 1, 2]
nt1 = 3
nt2 = 2
time1 = pd.date_range("2000-01-01", periods=nt1)
time2 = pd.date_range("2000-02-01", periods=nt2)
string_var = np.array(["ae", "bc", "df"], dtype=object)
string_var_to_append = np.array(["asdf", "asdfg"], dtype=object)
unicode_var = ["áó", "áó", "áó"]
datetime_var = np.array(
["2019-01-01", "2019-01-02", "2019-01-03"], dtype="datetime64[s]"
)
datetime_var_to_append = np.array(
["2019-01-04", "2019-01-05"], dtype="datetime64[s]"
)
bool_var = np.array([True, False, True], dtype=bool)
bool_var_to_append = np.array([False, True], dtype=bool)
ds = xr.Dataset(
data_vars={
"da": xr.DataArray(
rs.rand(3, 3, nt1),
coords=[lat, lon, time1],
dims=["lat", "lon", "time"],
),
"string_var": xr.DataArray(string_var, coords=[time1], dims=["time"]),
"unicode_var": xr.DataArray(
unicode_var, coords=[time1], dims=["time"]
).astype(np.unicode_),
"datetime_var": xr.DataArray(datetime_var, coords=[time1], dims=["time"]),
"bool_var": xr.DataArray(bool_var, coords=[time1], dims=["time"]),
}
)
ds_to_append = xr.Dataset(
data_vars={
"da": xr.DataArray(
rs.rand(3, 3, nt2),
coords=[lat, lon, time2],
dims=["lat", "lon", "time"],
),
"string_var": xr.DataArray(
string_var_to_append, coords=[time2], dims=["time"]
),
"unicode_var": xr.DataArray(
unicode_var[:nt2], coords=[time2], dims=["time"]
).astype(np.unicode_),
"datetime_var": xr.DataArray(
datetime_var_to_append, coords=[time2], dims=["time"]
),
"bool_var": xr.DataArray(bool_var_to_append, coords=[time2], dims=["time"]),
}
)
ds_with_new_var = xr.Dataset(
data_vars={
"new_var": xr.DataArray(
rs.rand(3, 3, nt1 + nt2),
coords=[lat, lon, time1.append(time2)],
dims=["lat", "lon", "time"],
)
}
)
assert all(objp.data.flags.writeable for objp in ds.variables.values())
assert all(objp.data.flags.writeable for objp in ds_to_append.variables.values())
return ds, ds_to_append, ds_with_new_var
def create_test_multiindex():
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
return Dataset({}, {"x": mindex})
def create_test_stacked_array():
x = DataArray(pd.Index(np.r_[:10], name="x"))
y = DataArray(pd.Index(np.r_[:20], name="y"))
a = x * y
b = x * y * y
return a, b
class InaccessibleVariableDataStore(backends.InMemoryDataStore):
def __init__(self):
super().__init__()
self._indexvars = set()
def store(self, variables, *args, **kwargs):
super().store(variables, *args, **kwargs)
for k, v in variables.items():
if isinstance(v, IndexVariable):
self._indexvars.add(k)
def get_variables(self):
def lazy_inaccessible(k, v):
if k in self._indexvars:
return v
data = indexing.LazilyIndexedArray(InaccessibleArray(v.values))
return Variable(v.dims, data, v.attrs)
return {k: lazy_inaccessible(k, v) for k, v in self._variables.items()}
class TestDataset:
def test_repr(self):
data = create_test_data(seed=123)
data.attrs["foo"] = "bar"
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (dim2: 9, dim3: 10, time: 20, dim1: 8)
Coordinates:
* dim2 (dim2) float64 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
* dim3 (dim3) %s 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 ... 2000-01-20
numbers (dim3) int64 0 1 2 0 0 1 1 2 2 3
Dimensions without coordinates: dim1
Data variables:
var1 (dim1, dim2) float64 -1.086 0.9973 0.283 ... 0.1995 0.4684 -0.8312
var2 (dim1, dim2) float64 1.162 -1.097 -2.123 ... 0.1302 1.267 0.3328
var3 (dim3, dim1) float64 0.5565 -0.2121 0.4563 ... -0.2452 -0.3616
Attributes:
foo: bar"""
% data["dim3"].dtype
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
with set_options(display_width=100):
max_len = max(map(len, repr(data).split("\n")))
assert 90 < max_len < 100
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: ()
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(Dataset()).split("\n"))
print(actual)
assert expected == actual
data = Dataset({"foo": ("x", np.ones(10))}).mean()
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: ()
Data variables:
foo float64 1.0"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
# verify long attributes are truncated
data = Dataset(attrs={"foo": "bar" * 1000})
assert len(repr(data)) < 1000
def test_repr_multiindex(self):
data = create_test_multiindex()
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) MultiIndex
- level_1 (x) object 'a' 'a' 'b' 'b'
- level_2 (x) int64 1 2 1 2
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
# verify that long level names are not truncated
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("a_quite_long_level_name", "level_2")
)
data = Dataset({}, {"x": mindex})
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) MultiIndex
- a_quite_long_level_name (x) object 'a' 'a' 'b' 'b'
- level_2 (x) int64 1 2 1 2
Data variables:
*empty*"""
)
actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
print(actual)
assert expected == actual
def test_repr_period_index(self):
data = create_test_data(seed=456)
data.coords["time"] = pd.period_range("2000-01-01", periods=20, freq="B")
# check that creating the repr doesn't raise an error
repr(data)
def test_unicode_data(self):
data = Dataset({"foø": ["ba®"]}, attrs={"å": "∑"})
repr(data)
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (foø: 1)
Coordinates:
* foø (foø) %cU3 %r
Data variables:
*empty*
Attributes:
å: ∑"""
% (byteorder, "ba®")
)
actual = str(data)
assert expected == actual
def test_repr_nep18(self):
class Array:
def __init__(self):
self.shape = (2,)
self.dtype = np.dtype(np.float64)
def __array_function__(self, *args, **kwargs):
pass
def __repr__(self):
return "Custom\nArray"
dataset = Dataset({"foo": ("x", Array())})
expected = dedent(
"""\
<xarray.Dataset>
Dimensions: (x: 2)
Dimensions without coordinates: x
Data variables:
foo (x) float64 Custom Array"""
)
assert expected == repr(dataset)
def test_info(self):
ds = create_test_data(seed=123)
ds = ds.drop_vars("dim3")
ds.attrs["unicode_attr"] = "ba®"
ds.attrs["string_attr"] = "bar"
buf = StringIO()
ds.info(buf=buf)
expected = dedent(
"""\
xarray.Dataset {
dimensions:
\tdim2 = 9 ;
\ttime = 20 ;
\tdim1 = 8 ;
\tdim3 = 10 ;
variables:
\tfloat64 dim2(dim2) ;
\tdatetime64[ns] time(time) ;
\tfloat64 var1(dim1, dim2) ;
\t\tvar1:foo = variable ;
\tfloat64 var2(dim1, dim2) ;
\t\tvar2:foo = variable ;
\tfloat64 var3(dim3, dim1) ;
\t\tvar3:foo = variable ;
\tint64 numbers(dim3) ;
// global attributes:
\t:unicode_attr = ba® ;
\t:string_attr = bar ;
}"""
)
actual = buf.getvalue()
assert expected == actual
buf.close()
def test_constructor(self):
x1 = ("x", 2 * np.arange(100))
x2 = ("x", np.arange(1000))
z = (["x", "y"], np.arange(1000).reshape(100, 10))
with pytest.raises(ValueError, match=r"conflicting sizes"):
Dataset({"a": x1, "b": x2})
with pytest.raises(ValueError, match=r"disallows such variables"):
Dataset({"a": x1, "x": z})
with pytest.raises(TypeError, match=r"tuple of form"):
Dataset({"x": (1, 2, 3, 4, 5, 6, 7)})
with pytest.raises(ValueError, match=r"already exists as a scalar"):
Dataset({"x": 0, "y": ("x", [1, 2, 3])})
expected = Dataset({"x": x1, "z": z})
actual = Dataset({"z": expected["z"]})
assert_identical(expected, actual)
def test_constructor_invalid_dims(self):
with pytest.raises(MergeError):
Dataset(
data_vars=dict(v=("y", [1, 2, 3, 4])),
coords=dict(y=DataArray([0.1, 0.2, 0.3, 0.4], dims="x")),
)
def test_constructor_1d(self):
expected = Dataset({"x": (["x"], 5.0 + np.arange(5))})
actual = Dataset({"x": 5.0 + np.arange(5)})
assert_identical(expected, actual)
actual = Dataset({"x": [5, 6, 7, 8, 9]})
assert_identical(expected, actual)
def test_constructor_0d(self):
expected = Dataset({"x": ([], 1)})
for arg in [1, np.array(1), expected["x"]]:
actual = Dataset({"x": arg})
assert_identical(expected, actual)
class Arbitrary:
pass
d = pd.Timestamp("2000-01-01T12")
args = [
True,
None,
3.4,
np.nan,
"hello",
b"raw",
np.datetime64("2000-01-01"),
d,
d.to_pydatetime(),
Arbitrary(),
]
for arg in args:
print(arg)
expected = Dataset({"x": ([], arg)})
actual = Dataset({"x": arg})
assert_identical(expected, actual)
def test_constructor_deprecated(self):
with pytest.raises(ValueError, match=r"DataArray dimensions"):
DataArray([1, 2, 3], coords={"x": [0, 1, 2]})
def test_constructor_auto_align(self):
a = DataArray([1, 2], [("x", [0, 1])])
b = DataArray([3, 4], [("x", [1, 2])])
expected = Dataset(
{"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]}
)
actual = Dataset({"a": a, "b": b})
assert_identical(expected, actual)
assert isinstance(actual.variables["x"], IndexVariable)
c = ("y", [3, 4])
expected2 = expected.merge({"c": c})
actual = Dataset({"a": a, "b": b, "c": c})
assert_identical(expected2, actual)
d = ("x", [3, 2, 1])
expected3 = expected.merge({"d": d})
actual = Dataset({"a": a, "b": b, "d": d})
assert_identical(expected3, actual)
e = ("x", [0, 0])
with pytest.raises(ValueError, match=r"conflicting sizes"):
Dataset({"a": a, "b": b, "e": e})
def test_constructor_pandas_sequence(self):
ds = self.make_example_math_dataset()
pandas_objs = {
var_name: ds[var_name].to_pandas() for var_name in ["foo", "bar"]
}
ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)
del ds_based_on_pandas["x"]
assert_equal(ds, ds_based_on_pandas)
rearranged_index = reversed(pandas_objs["foo"].index)
pandas_objs["foo"] = pandas_objs["foo"].reindex(rearranged_index)
ds_based_on_pandas = Dataset(pandas_objs, ds.coords, attrs=ds.attrs)
del ds_based_on_pandas["x"]
assert_equal(ds, ds_based_on_pandas)
def test_constructor_pandas_single(self):
das = [
DataArray(np.random.rand(4), dims=["a"]),
DataArray(np.random.rand(4, 3), dims=["a", "b"]),
]
for a in das:
pandas_obj = a.to_pandas()
ds_based_on_pandas = Dataset(pandas_obj)
for dim in ds_based_on_pandas.data_vars:
assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim])
def test_constructor_compat(self):
data = {"x": DataArray(0, coords={"y": 1}), "y": ("z", [1, 1, 1])}
expected = Dataset({"x": 0}, {"y": ("z", [1, 1, 1])})
actual = Dataset(data)
assert_identical(expected, actual)
data = {"y": ("z", [1, 1, 1]), "x": DataArray(0, coords={"y": 1})}
actual = Dataset(data)
assert_identical(expected, actual)
original = Dataset(
{"a": (("x", "y"), np.ones((2, 3)))},
{"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]},
)
expected = Dataset(
{"a": ("x", np.ones(2)), "b": ("y", np.ones(3))},
{"c": (("x", "y"), np.zeros((2, 3))), "x": [0, 1]},
)
actual = Dataset(
{"a": original["a"][:, 0], "b": original["a"][0].drop_vars("x")}
)
assert_identical(expected, actual)
data = {"x": DataArray(0, coords={"y": 3}), "y": ("z", [1, 1, 1])}
with pytest.raises(MergeError):
Dataset(data)
data = {"x": DataArray(0, coords={"y": 1}), "y": [1, 1]}
actual = Dataset(data)
expected = Dataset({"x": 0}, {"y": [1, 1]})
assert_identical(expected, actual)
def test_constructor_with_coords(self):
with pytest.raises(ValueError, match=r"found in both data_vars and"):
Dataset({"a": ("x", [1])}, {"a": ("x", [1])})
ds = Dataset({}, {"a": ("x", [1])})
assert not ds.data_vars
assert list(ds.coords.keys()) == ["a"]
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=("level_1", "level_2")
)
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
Dataset({}, {"x": mindex, "y": mindex})
Dataset({}, {"x": mindex, "level_1": range(4)})
def test_properties(self):
ds = create_test_data()
assert ds.dims == {"dim1": 8, "dim2": 9, "dim3": 10, "time": 20}
assert ds.sizes == ds.dims
assert isinstance(ds.dims, utils.Frozen)
assert isinstance(ds.dims.mapping, dict)
assert type(ds.dims.mapping) is dict
assert list(ds) == list(ds.data_vars)
assert list(ds.keys()) == list(ds.data_vars)
assert "aasldfjalskdfj" not in ds.variables
assert "dim1" in repr(ds.variables)
assert len(ds) == 3
assert bool(ds)
assert list(ds.data_vars) == ["var1", "var2", "var3"]
assert list(ds.data_vars.keys()) == ["var1", "var2", "var3"]
assert "var1" in ds.data_vars
assert "dim1" not in ds.data_vars
assert "numbers" not in ds.data_vars
assert len(ds.data_vars) == 3
assert set(ds.xindexes) == {"dim2", "dim3", "time"}
assert len(ds.xindexes) == 3
assert "dim2" in repr(ds.xindexes)
assert all([isinstance(idx, Index) for idx in ds.xindexes.values()])
assert set(ds.indexes) == {"dim2", "dim3", "time"}
assert len(ds.indexes) == 3
assert "dim2" in repr(ds.indexes)
assert all([isinstance(idx, pd.Index) for idx in ds.indexes.values()])
assert list(ds.coords) == ["dim2", "dim3", "time", "numbers"]
assert "dim2" in ds.coords
assert "numbers" in ds.coords
assert "var1" not in ds.coords
assert "dim1" not in ds.coords
assert len(ds.coords) == 4
assert Dataset({"x": np.int64(1), "y": np.float32([1, 2])}).nbytes == 16
def test_asarray(self):
ds = Dataset({"x": 0})
with pytest.raises(TypeError, match=r"cannot directly convert"):
np.asarray(ds)
def test_get_index(self):
ds = Dataset({"foo": (("x", "y"), np.zeros((2, 3)))}, coords={"x": ["a", "b"]})
assert ds.get_index("x").equals(pd.Index(["a", "b"]))
assert ds.get_index("y").equals(pd.Index([0, 1, 2]))
with pytest.raises(KeyError):
ds.get_index("z")
def test_attr_access(self):
ds = Dataset(
{"tmin": ("x", [42], {"units": "Celcius"})}, attrs={"title": "My test data"}
)
assert_identical(ds.tmin, ds["tmin"])
assert_identical(ds.tmin.x, ds.x)
assert ds.title == ds.attrs["title"]
assert ds.tmin.units == ds["tmin"].attrs["units"]
assert {"tmin", "title"} <= set(dir(ds))
assert "units" in set(dir(ds.tmin))
ds.attrs["tmin"] = -999
assert ds.attrs["tmin"] == -999
assert_identical(ds.tmin, ds["tmin"])
def test_variable(self):
a = Dataset()
d = np.random.random((10, 3))
a["foo"] = (("time", "x"), d)
assert "foo" in a.variables
assert "foo" in a
a["bar"] = (("time", "x"), d)
assert list(a.variables) == ["foo", "bar"]
assert_array_equal(a["foo"].values, d)
with pytest.raises(ValueError):
a["qux"] = (("time", "x"), d.T)
def test_modify_inplace(self):
a = Dataset()
vec = np.random.random((10,))
attributes = {"foo": "bar"}
a["x"] = ("x", vec, attributes)
assert "x" in a.coords
assert isinstance(a.coords["x"].to_index(), pd.Index)
assert_identical(a.coords["x"].variable, a.variables["x"])
b = Dataset()
b["x"] = ("x", vec, attributes)
assert_identical(a["x"], b["x"])
assert a.dims == b.dims
# this should work
a["x"] = ("x", vec[:5])
a["z"] = ("x", np.arange(5))
with pytest.raises(ValueError):
# now it shouldn't, since there is a conflicting length
a["x"] = ("x", vec[:4])
arr = np.random.random((10, 1))
scal = np.array(0)
with pytest.raises(ValueError):
a["y"] = ("y", arr)
with pytest.raises(ValueError):
a["y"] = ("y", scal)
assert "y" not in a.dims
def test_coords_properties(self):
data = Dataset(
{
"x": ("x", np.array([-1, -2], "int64")),
"y": ("y", np.array([0, 1, 2], "int64")),
"foo": (["x", "y"], np.random.randn(2, 3)),
},
{"a": ("x", np.array([4, 5], "int64")), "b": np.int64(-10)},
)
assert 4 == len(data.coords)
assert ["x", "y", "a", "b"] == list(data.coords)
assert_identical(data.coords["x"].variable, data["x"].variable)
assert_identical(data.coords["y"].variable, data["y"].variable)
assert "x" in data.coords
assert "a" in data.coords
assert 0 not in data.coords
assert "foo" not in data.coords
with pytest.raises(KeyError):
data.coords["foo"]
with pytest.raises(KeyError):
data.coords[0]
expected = dedent(
"""\
Coordinates:
* x (x) int64 -1 -2
* y (y) int64 0 1 2
a (x) int64 4 5
b int64 -10"""
)
actual = repr(data.coords)
assert expected == actual
assert {"x": 2, "y": 3} == data.coords.dims
def test_coords_modify(self):
data = Dataset(
{
"x": ("x", [-1, -2]),
"y": ("y", [0, 1, 2]),
"foo": (["x", "y"], np.random.randn(2, 3)),
},
{"a": ("x", [4, 5]), "b": -10},
)
actual = data.copy(deep=True)
actual.coords["x"] = ("x", ["a", "b"])
assert_array_equal(actual["x"], ["a", "b"])
actual = data.copy(deep=True)
actual.coords["z"] = ("z", ["a", "b"])
assert_array_equal(actual["z"], ["a", "b"])
actual = data.copy(deep=True)
with pytest.raises(ValueError, match=r"conflicting sizes"):
actual.coords["x"] = ("x", [-1])
assert_identical(actual, data)
actual = data.copy()
del actual.coords["b"]
expected = data.reset_coords("b", drop=True)
assert_identical(expected, actual)
with pytest.raises(KeyError):
del data.coords["not_found"]
with pytest.raises(KeyError):
del data.coords["foo"]
actual = data.copy(deep=True)
actual.coords.update({"c": 11})
expected = data.merge({"c": 11}).set_coords("c")
assert_identical(expected, actual)
del actual.coords["x"]
assert "x" not in actual.xindexes
def test_update_index(self):
actual = Dataset(coords={"x": [1, 2, 3]})
actual["x"] = ["a", "b", "c"]
assert actual.xindexes["x"].equals(pd.Index(["a", "b", "c"]))
def test_coords_setitem_with_new_dimension(self):
actual = Dataset()
actual.coords["foo"] = ("x", [1, 2, 3])
expected = Dataset(coords={"foo": ("x", [1, 2, 3])})
assert_identical(expected, actual)
def test_coords_setitem_multiindex(self):
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data.coords["level_1"] = range(4)
def test_coords_set(self):
one_coord = Dataset({"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])})
two_coords = Dataset({"zzz": ("x", [2])}, {"x": ("x", [0]), "yy": ("x", [1])})
all_coords = Dataset(
coords={"x": ("x", [0]), "yy": ("x", [1]), "zzz": ("x", [2])}
)
actual = one_coord.set_coords("x")
assert_identical(one_coord, actual)
actual = one_coord.set_coords(["x"])
assert_identical(one_coord, actual)
actual = one_coord.set_coords("yy")
assert_identical(two_coords, actual)
actual = one_coord.set_coords(["yy", "zzz"])
assert_identical(all_coords, actual)
actual = one_coord.reset_coords()
assert_identical(one_coord, actual)
actual = two_coords.reset_coords()
assert_identical(one_coord, actual)
actual = all_coords.reset_coords()
assert_identical(one_coord, actual)
actual = all_coords.reset_coords(["yy", "zzz"])
assert_identical(one_coord, actual)
actual = all_coords.reset_coords("zzz")
assert_identical(two_coords, actual)
with pytest.raises(ValueError, match=r"cannot remove index"):
one_coord.reset_coords("x")
actual = all_coords.reset_coords("zzz", drop=True)
expected = all_coords.drop_vars("zzz")
assert_identical(expected, actual)
expected = two_coords.drop_vars("zzz")
assert_identical(expected, actual)
def test_coords_to_dataset(self):
orig = Dataset({"foo": ("y", [-1, 0, 1])}, {"x": 10, "y": [2, 3, 4]})
expected = Dataset(coords={"x": 10, "y": [2, 3, 4]})
actual = orig.coords.to_dataset()
assert_identical(expected, actual)
def test_coords_merge(self):
orig_coords = Dataset(coords={"a": ("x", [1, 2]), "x": [0, 1]}).coords
other_coords = Dataset(coords={"b": ("x", ["a", "b"]), "x": [0, 1]}).coords
expected = Dataset(
coords={"a": ("x", [1, 2]), "b": ("x", ["a", "b"]), "x": [0, 1]}
)
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"x": ("x", ["a"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"x": ("x", ["a", "b"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"x": ("x", ["a", "b", "c"])}).coords
with pytest.raises(MergeError):
orig_coords.merge(other_coords)
other_coords = Dataset(coords={"a": ("x", [8, 9])}).coords
expected = Dataset(coords={"x": range(2)})
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"x": np.nan}).coords
actual = orig_coords.merge(other_coords)
assert_identical(orig_coords.to_dataset(), actual)
actual = other_coords.merge(orig_coords)
assert_identical(orig_coords.to_dataset(), actual)
def test_coords_merge_mismatched_shape(self):
orig_coords = Dataset(coords={"a": ("x", [1, 1])}).coords
other_coords = Dataset(coords={"a": 1}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
other_coords = Dataset(coords={"a": ("y", [1])}).coords
expected = Dataset(coords={"a": (["x", "y"], [[1], [1]])})
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
actual = other_coords.merge(orig_coords)
assert_identical(expected.transpose(), actual)
orig_coords = Dataset(coords={"a": ("x", [np.nan])}).coords
other_coords = Dataset(coords={"a": np.nan}).coords
expected = orig_coords.to_dataset()
actual = orig_coords.merge(other_coords)
assert_identical(expected, actual)
def test_data_vars_properties(self):
ds = Dataset()
ds["foo"] = (("x",), [1.0])
ds["bar"] = 2.0
assert set(ds.data_vars) == {"foo", "bar"}
assert "foo" in ds.data_vars
assert "x" not in ds.data_vars
assert_identical(ds["foo"], ds.data_vars["foo"])
expected = dedent(
"""\
Data variables:
foo (x) float64 1.0
bar float64 2.0"""
)
actual = repr(ds.data_vars)
assert expected == actual
def test_equals_and_identical(self):
data = create_test_data(seed=42)
assert data.equals(data)
assert data.identical(data)
data2 = create_test_data(seed=42)
data2.attrs["foobar"] = "baz"
assert data.equals(data2)
assert not data.identical(data2)
del data2["time"]
assert not data.equals(data2)
data = create_test_data(seed=42).rename({"var1": None})
assert data.equals(data)
assert data.identical(data)
data2 = data.reset_coords()
assert not data2.equals(data)
assert not data2.identical(data)
def test_equals_failures(self):
data = create_test_data()
assert not data.equals("foo")
assert not data.identical(123)
assert not data.broadcast_equals({1: 2})
def test_broadcast_equals(self):
data1 = Dataset(coords={"x": 0})
data2 = Dataset(coords={"x": [0]})
assert data1.broadcast_equals(data2)
assert not data1.equals(data2)
assert not data1.identical(data2)
def test_attrs(self):
data = create_test_data(seed=42)
data.attrs = {"foobar": "baz"}
assert data.attrs["foobar"], "baz"
assert isinstance(data.attrs, dict)
@requires_dask
def test_chunk(self):
data = create_test_data()
for v in data.variables.values():
assert isinstance(v.data, np.ndarray)
assert data.chunks == {}
reblocked = data.chunk()
for k, v in reblocked.variables.items():
if k in reblocked.dims:
assert isinstance(v.data, np.ndarray)
else:
assert isinstance(v.data, da.Array)
expected_chunks = {"dim1": (8,), "dim2": (9,), "dim3": (10,)}
assert reblocked.chunks == expected_chunks
def get_dask_names(ds):
return {k: v.data.name for k, v in ds.items()}
orig_dask_names = get_dask_names(reblocked)
reblocked = data.chunk({"time": 5, "dim1": 5, "dim2": 5, "dim3": 5})
expected_chunks = {"dim1": (5, 3), "dim2": (5, 4), "dim3": (5, 5)}
assert reblocked.chunks == expected_chunks
# make sure dask names change when rechunking by different amounts
# regression test for GH3350
new_dask_names = get_dask_names(reblocked)
for k, v in new_dask_names.items():
assert v != orig_dask_names[k]
reblocked = data.chunk(expected_chunks)
assert reblocked.chunks == expected_chunks
# reblock on already blocked data
orig_dask_names = get_dask_names(reblocked)
reblocked = reblocked.chunk(expected_chunks)
new_dask_names = get_dask_names(reblocked)
assert reblocked.chunks == expected_chunks
assert_identical(reblocked, data)
# recuhnking with same chunk sizes should not change names
for k, v in new_dask_names.items():
assert v == orig_dask_names[k]
with pytest.raises(ValueError, match=r"some chunks"):
data.chunk({"foo": 10})
@requires_dask
def test_dask_is_lazy(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
ds = open_dataset(store).chunk()
with pytest.raises(UnexpectedDataAccess):
ds.load()
with pytest.raises(UnexpectedDataAccess):
ds["var1"].values
# these should not raise UnexpectedDataAccess:
ds.var1.data
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
ds.transpose()
ds.mean()
ds.fillna(0)
ds.rename({"dim1": "foobar"})
ds.set_coords("var1")
ds.drop_vars("var1")
def test_isel(self):
data = create_test_data()
slicers = {"dim1": slice(None, None, 2), "dim2": slice(0, 2)}
ret = data.isel(**slicers)
# Verify that only the specified dimension was altered
assert list(data.dims) == list(ret.dims)
for d in data.dims:
if d in slicers:
assert ret.dims[d] == np.arange(data.dims[d])[slicers[d]].size
else:
assert data.dims[d] == ret.dims[d]
# Verify that the data is what we expect
for v in data.variables:
assert data[v].dims == ret[v].dims
assert data[v].attrs == ret[v].attrs
slice_list = [slice(None)] * data[v].values.ndim
for d, s in slicers.items():
if d in data[v].dims:
inds = np.nonzero(np.array(data[v].dims) == d)[0]
for ind in inds:
slice_list[ind] = s
expected = data[v].values[tuple(slice_list)]
actual = ret[v].values
np.testing.assert_array_equal(expected, actual)
with pytest.raises(ValueError):
data.isel(not_a_dim=slice(0, 2))
with pytest.raises(
ValueError,
match=r"Dimensions {'not_a_dim'} do not exist. Expected "
r"one or more of "
r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*",
):
data.isel(not_a_dim=slice(0, 2))
with pytest.warns(
UserWarning,
match=r"Dimensions {'not_a_dim'} do not exist. "
r"Expected one or more of "
r"[\w\W]*'dim\d'[\w\W]*'dim\d'[\w\W]*'time'[\w\W]*'dim\d'[\w\W]*",
):
data.isel(not_a_dim=slice(0, 2), missing_dims="warn")
assert_identical(data, data.isel(not_a_dim=slice(0, 2), missing_dims="ignore"))
ret = data.isel(dim1=0)
assert {"time": 20, "dim2": 9, "dim3": 10} == ret.dims
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(ret.xindexes)
ret = data.isel(time=slice(2), dim1=0, dim2=slice(5))
assert {"time": 2, "dim2": 5, "dim3": 10} == ret.dims
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(ret.xindexes)
ret = data.isel(time=0, dim1=0, dim2=slice(5))
assert {"dim2": 5, "dim3": 10} == ret.dims
assert set(data.data_vars) == set(ret.data_vars)
assert set(data.coords) == set(ret.coords)
assert set(data.xindexes) == set(list(ret.xindexes) + ["time"])
def test_isel_fancy(self):
# isel with fancy indexing.
data = create_test_data()
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
actual = data.isel(
dim1=(("test_coord",), pdim1),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert "test_coord" in actual.dims
assert actual.coords["test_coord"].shape == (len(pdim1),)
# Should work with DataArray
actual = data.isel(
dim1=DataArray(pdim1, dims="test_coord"),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert "test_coord" in actual.dims
assert actual.coords["test_coord"].shape == (len(pdim1),)
expected = data.isel(
dim1=(("test_coord",), pdim1),
dim2=(("test_coord",), pdim2),
dim3=(("test_coord",), pdim3),
)
assert_identical(actual, expected)
# DataArray with coordinate
idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)})
idx2 = DataArray(pdim2, dims=["b"], coords={"b": np.random.randn(3)})
idx3 = DataArray(pdim3, dims=["c"], coords={"c": np.random.randn(3)})
# Should work with DataArray
actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)
assert "a" in actual.dims
assert "b" in actual.dims
assert "c" in actual.dims
assert "time" in actual.coords
assert "dim2" in actual.coords
assert "dim3" in actual.coords
expected = data.isel(
dim1=(("a",), pdim1), dim2=(("b",), pdim2), dim3=(("c",), pdim3)
)
expected = expected.assign_coords(a=idx1["a"], b=idx2["b"], c=idx3["c"])
assert_identical(actual, expected)
idx1 = DataArray(pdim1, dims=["a"], coords={"a": np.random.randn(3)})
idx2 = DataArray(pdim2, dims=["a"])
idx3 = DataArray(pdim3, dims=["a"])
# Should work with DataArray
actual = data.isel(dim1=idx1, dim2=idx2, dim3=idx3)
assert "a" in actual.dims
assert "time" in actual.coords
assert "dim2" in actual.coords
assert "dim3" in actual.coords
expected = data.isel(
dim1=(("a",), pdim1), dim2=(("a",), pdim2), dim3=(("a",), pdim3)
)
expected = expected.assign_coords(a=idx1["a"])
assert_identical(actual, expected)
actual = data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2))
assert "points" in actual.dims
assert "dim3" in actual.dims
assert "dim3" not in actual.data_vars
np.testing.assert_array_equal(data["dim2"][pdim2], actual["dim2"])
# test that the order of the indexers doesn't matter
assert_identical(
data.isel(dim1=(("points",), pdim1), dim2=(("points",), pdim2)),
data.isel(dim2=(("points",), pdim2), dim1=(("points",), pdim1)),
)
with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"):
data.isel(dim1=(("points",), [1, 2]), dim2=(("points",), [1, 2, 3]))
with pytest.raises(TypeError, match=r"cannot use a Dataset"):
data.isel(dim1=Dataset({"points": [1, 2]}))
# test to be sure we keep around variables that were not indexed
ds = Dataset({"x": [1, 2, 3, 4], "y": 0})
actual = ds.isel(x=(("points",), [0, 1, 2]))
assert_identical(ds["y"], actual["y"])
# tests using index or DataArray as indexers
stations = Dataset()
stations["station"] = (("station",), ["A", "B", "C"])
stations["dim1s"] = (("station",), [1, 2, 3])
stations["dim2s"] = (("station",), [4, 5, 1])
actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"])
assert "station" in actual.coords
assert "station" in actual.dims
assert_identical(actual["station"].drop_vars(["dim2"]), stations["station"])
with pytest.raises(ValueError, match=r"conflicting values for "):
data.isel(
dim1=DataArray(
[0, 1, 2], dims="station", coords={"station": [0, 1, 2]}
),
dim2=DataArray(
[0, 1, 2], dims="station", coords={"station": [0, 1, 3]}
),
)
# multi-dimensional selection
stations = Dataset()
stations["a"] = (("a",), ["A", "B", "C"])
stations["b"] = (("b",), [0, 1])
stations["dim1s"] = (("a", "b"), [[1, 2], [2, 3], [3, 4]])
stations["dim2s"] = (("a",), [4, 5, 1])
actual = data.isel(dim1=stations["dim1s"], dim2=stations["dim2s"])
assert "a" in actual.coords
assert "a" in actual.dims
assert "b" in actual.coords
assert "b" in actual.dims
assert "dim2" in actual.coords
assert "a" in actual["dim2"].dims
assert_identical(actual["a"].drop_vars(["dim2"]), stations["a"])
assert_identical(actual["b"], stations["b"])
expected_var1 = data["var1"].variable[
stations["dim1s"].variable, stations["dim2s"].variable
]
expected_var2 = data["var2"].variable[
stations["dim1s"].variable, stations["dim2s"].variable
]
expected_var3 = data["var3"].variable[slice(None), stations["dim1s"].variable]
assert_equal(actual["a"].drop_vars("dim2"), stations["a"])
assert_array_equal(actual["var1"], expected_var1)
assert_array_equal(actual["var2"], expected_var2)
assert_array_equal(actual["var3"], expected_var3)
def test_isel_dataarray(self):
data = create_test_data()
# indexing with DataArray with same-name coordinates.
indexing_da = DataArray(
np.arange(1, 4), dims=["dim1"], coords={"dim1": np.random.randn(3)}
)
actual = data.isel(dim1=indexing_da)
assert_identical(indexing_da["dim1"], actual["dim1"])
assert_identical(data["dim2"], actual["dim2"])
# Conflict in the dimension coordinate
indexing_da = DataArray(
np.arange(1, 4), dims=["dim2"], coords={"dim2": np.random.randn(3)}
)
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
actual = data.isel(dim2=indexing_da)
# Also the case for DataArray
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
actual = data["var2"].isel(dim2=indexing_da)
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
data["dim2"].isel(dim2=indexing_da)
# same name coordinate which does not conflict
indexing_da = DataArray(
np.arange(1, 4), dims=["dim2"], coords={"dim2": data["dim2"].values[1:4]}
)
actual = data.isel(dim2=indexing_da)
assert_identical(actual["dim2"], indexing_da["dim2"])
# Silently drop conflicted (non-dimensional) coordinate of indexer
indexing_da = DataArray(
np.arange(1, 4),
dims=["dim2"],
coords={
"dim2": data["dim2"].values[1:4],
"numbers": ("dim2", np.arange(2, 5)),
},
)
actual = data.isel(dim2=indexing_da)
assert_identical(actual["numbers"], data["numbers"])
# boolean data array with coordinate with the same name
indexing_da = DataArray(
np.arange(1, 10), dims=["dim2"], coords={"dim2": data["dim2"].values}
)
indexing_da = indexing_da < 3
actual = data.isel(dim2=indexing_da)
assert_identical(actual["dim2"], data["dim2"][:2])
# boolean data array with non-dimensioncoordinate
indexing_da = DataArray(
np.arange(1, 10),
dims=["dim2"],
coords={
"dim2": data["dim2"].values,
"non_dim": (("dim2",), np.random.randn(9)),
"non_dim2": 0,
},
)
indexing_da = indexing_da < 3
actual = data.isel(dim2=indexing_da)
assert_identical(
actual["dim2"].drop_vars("non_dim").drop_vars("non_dim2"), data["dim2"][:2]
)
assert_identical(actual["non_dim"], indexing_da["non_dim"][:2])
assert_identical(actual["non_dim2"], indexing_da["non_dim2"])
# non-dimension coordinate will be also attached
indexing_da = DataArray(
np.arange(1, 4),
dims=["dim2"],
coords={"non_dim": (("dim2",), np.random.randn(3))},
)
actual = data.isel(dim2=indexing_da)
assert "non_dim" in actual
assert "non_dim" in actual.coords
# Index by a scalar DataArray
indexing_da = DataArray(3, dims=[], coords={"station": 2})
actual = data.isel(dim2=indexing_da)
assert "station" in actual
actual = data.isel(dim2=indexing_da["station"])
assert "station" in actual
# indexer generated from coordinates
indexing_ds = Dataset({}, coords={"dim2": [0, 1, 2]})
with pytest.raises(IndexError, match=r"dimension coordinate 'dim2'"):
actual = data.isel(dim2=indexing_ds["dim2"])
def test_sel(self):
data = create_test_data()
int_slicers = {"dim1": slice(None, None, 2), "dim2": slice(2), "dim3": slice(3)}
loc_slicers = {
"dim1": slice(None, None, 2),
"dim2": slice(0, 0.5),
"dim3": slice("a", "c"),
}
assert_equal(data.isel(**int_slicers), data.sel(**loc_slicers))
data["time"] = ("time", pd.date_range("2000-01-01", periods=20))
assert_equal(data.isel(time=0), data.sel(time="2000-01-01"))
assert_equal(
data.isel(time=slice(10)), data.sel(time=slice("2000-01-01", "2000-01-10"))
)
assert_equal(data, data.sel(time=slice("1999", "2005")))
times = pd.date_range("2000-01-01", periods=3)
assert_equal(data.isel(time=slice(3)), data.sel(time=times))
assert_equal(
data.isel(time=slice(3)), data.sel(time=(data["time.dayofyear"] <= 3))
)
td = pd.to_timedelta(np.arange(3), unit="days")
data = Dataset({"x": ("td", np.arange(3)), "td": td})
assert_equal(data, data.sel(td=td))
assert_equal(data, data.sel(td=slice("3 days")))
assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0 days")))
assert_equal(data.isel(td=0), data.sel(td=pd.Timedelta("0h")))
assert_equal(data.isel(td=slice(1, 3)), data.sel(td=slice("1 days", "2 days")))
def test_sel_dataarray(self):
data = create_test_data()
ind = DataArray([0.0, 0.5, 1.0], dims=["dim2"])
actual = data.sel(dim2=ind)
assert_equal(actual, data.isel(dim2=[0, 1, 2]))
# with different dimension
ind = DataArray([0.0, 0.5, 1.0], dims=["new_dim"])
actual = data.sel(dim2=ind)
expected = data.isel(dim2=Variable("new_dim", [0, 1, 2]))
assert "new_dim" in actual.dims
assert_equal(actual, expected)
# Multi-dimensional
ind = DataArray([[0.0], [0.5], [1.0]], dims=["new_dim", "new_dim2"])
actual = data.sel(dim2=ind)
expected = data.isel(dim2=Variable(("new_dim", "new_dim2"), [[0], [1], [2]]))
assert "new_dim" in actual.dims
assert "new_dim2" in actual.dims
assert_equal(actual, expected)
# with coordinate
ind = DataArray(
[0.0, 0.5, 1.0], dims=["new_dim"], coords={"new_dim": ["a", "b", "c"]}
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2]).rename({"dim2": "new_dim"})
assert "new_dim" in actual.dims
assert "new_dim" in actual.coords
assert_equal(
actual.drop_vars("new_dim").drop_vars("dim2"), expected.drop_vars("new_dim")
)
assert_equal(actual["new_dim"].drop_vars("dim2"), ind["new_dim"])
# with conflicted coordinate (silently ignored)
ind = DataArray(
[0.0, 0.5, 1.0], dims=["dim2"], coords={"dim2": ["a", "b", "c"]}
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2])
assert_equal(actual, expected)
# with conflicted coordinate (silently ignored)
ind = DataArray(
[0.0, 0.5, 1.0],
dims=["new_dim"],
coords={"new_dim": ["a", "b", "c"], "dim2": 3},
)
actual = data.sel(dim2=ind)
assert_equal(
actual["new_dim"].drop_vars("dim2"), ind["new_dim"].drop_vars("dim2")
)
expected = data.isel(dim2=[0, 1, 2])
expected["dim2"] = (("new_dim"), expected["dim2"].values)
assert_equal(actual["dim2"].drop_vars("new_dim"), expected["dim2"])
assert actual["var1"].dims == ("dim1", "new_dim")
# with non-dimensional coordinate
ind = DataArray(
[0.0, 0.5, 1.0],
dims=["dim2"],
coords={
"dim2": ["a", "b", "c"],
"numbers": ("dim2", [0, 1, 2]),
"new_dim": ("dim2", [1.1, 1.2, 1.3]),
},
)
actual = data.sel(dim2=ind)
expected = data.isel(dim2=[0, 1, 2])
assert_equal(actual.drop_vars("new_dim"), expected)
assert np.allclose(actual["new_dim"].values, ind["new_dim"].values)
def test_sel_dataarray_mindex(self):
midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two"))
mds = xr.Dataset(
{"var": (("x", "y"), np.random.rand(6, 3))},
coords={"x": midx, "y": range(3)},
)
actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="x"))
actual_sel = mds.sel(x=DataArray(midx[:3], dims="x"))
assert actual_isel["x"].dims == ("x",)
assert actual_sel["x"].dims == ("x",)
assert_identical(actual_isel, actual_sel)
actual_isel = mds.isel(x=xr.DataArray(np.arange(3), dims="z"))
actual_sel = mds.sel(x=Variable("z", midx[:3]))
assert actual_isel["x"].dims == ("z",)
assert actual_sel["x"].dims == ("z",)
assert_identical(actual_isel, actual_sel)
# with coordinate
actual_isel = mds.isel(
x=xr.DataArray(np.arange(3), dims="z", coords={"z": [0, 1, 2]})
)
actual_sel = mds.sel(
x=xr.DataArray(midx[:3], dims="z", coords={"z": [0, 1, 2]})
)
assert actual_isel["x"].dims == ("z",)
assert actual_sel["x"].dims == ("z",)
assert_identical(actual_isel, actual_sel)
# Vectorized indexing with level-variables raises an error
with pytest.raises(ValueError, match=r"Vectorized selection is "):
mds.sel(one=["a", "b"])
with pytest.raises(
ValueError,
match=r"Vectorized selection is not available along MultiIndex variable: x",
):
mds.sel(
x=xr.DataArray(
[np.array(midx[:2]), np.array(midx[-2:])], dims=["a", "b"]
)
)
def test_sel_categorical(self):
ind = pd.Series(["foo", "bar"], dtype="category")
df = pd.DataFrame({"ind": ind, "values": [1, 2]})
ds = df.set_index("ind").to_xarray()
actual = ds.sel(ind="bar")
expected = ds.isel(ind=1)
assert_identical(expected, actual)
def test_sel_categorical_error(self):
ind = pd.Series(["foo", "bar"], dtype="category")
df = pd.DataFrame({"ind": ind, "values": [1, 2]})
ds = df.set_index("ind").to_xarray()
with pytest.raises(ValueError):
ds.sel(ind="bar", method="nearest")
with pytest.raises(ValueError):
ds.sel(ind="bar", tolerance="nearest")
def test_categorical_index(self):
cat = pd.CategoricalIndex(
["foo", "bar", "foo"],
categories=["foo", "bar", "baz", "qux", "quux", "corge"],
)
ds = xr.Dataset(
{"var": ("cat", np.arange(3))},
coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 1])},
)
# test slice
actual = ds.sel(cat="foo")
expected = ds.isel(cat=[0, 2])
assert_identical(expected, actual)
# make sure the conversion to the array works
actual = ds.sel(cat="foo")["cat"].values
assert (actual == np.array(["foo", "foo"])).all()
ds = ds.set_index(index=["cat", "c"])
actual = ds.unstack("index")
assert actual["var"].shape == (2, 2)
def test_categorical_reindex(self):
cat = pd.CategoricalIndex(
["foo", "bar", "baz"],
categories=["foo", "bar", "baz", "qux", "quux", "corge"],
)
ds = xr.Dataset(
{"var": ("cat", np.arange(3))},
coords={"cat": ("cat", cat), "c": ("cat", [0, 1, 2])},
)
actual = ds.reindex(cat=["foo"])["cat"].values
assert (actual == np.array(["foo"])).all()
def test_categorical_multiindex(self):
i1 = pd.Series([0, 0])
cat = pd.CategoricalDtype(categories=["foo", "baz", "bar"])
i2 = pd.Series(["baz", "bar"], dtype=cat)
df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2]}).set_index(
["i1", "i2"]
)
actual = df.to_xarray()
assert actual["values"].shape == (1, 2)
def test_sel_drop(self):
data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
expected = Dataset({"foo": 1})
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.sel(x=0, drop=False)
assert_identical(expected, selected)
data = Dataset({"foo": ("x", [1, 2, 3])})
expected = Dataset({"foo": 1})
selected = data.sel(x=0, drop=True)
assert_identical(expected, selected)
def test_isel_drop(self):
data = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
expected = Dataset({"foo": 1})
selected = data.isel(x=0, drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.isel(x=0, drop=False)
assert_identical(expected, selected)
def test_head(self):
data = create_test_data()
expected = data.isel(time=slice(5), dim2=slice(6))
actual = data.head(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel(time=slice(0))
actual = data.head(time=0)
assert_equal(expected, actual)
expected = data.isel({dim: slice(6) for dim in data.dims})
actual = data.head(6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(5) for dim in data.dims})
actual = data.head()
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.head([3])
with pytest.raises(TypeError, match=r"expected integer type"):
data.head(dim2=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
data.head(time=-3)
def test_tail(self):
data = create_test_data()
expected = data.isel(time=slice(-5, None), dim2=slice(-6, None))
actual = data.tail(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel(dim1=slice(0))
actual = data.tail(dim1=0)
assert_equal(expected, actual)
expected = data.isel({dim: slice(-6, None) for dim in data.dims})
actual = data.tail(6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(-5, None) for dim in data.dims})
actual = data.tail()
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.tail([3])
with pytest.raises(TypeError, match=r"expected integer type"):
data.tail(dim2=3.1)
with pytest.raises(ValueError, match=r"expected positive int"):
data.tail(time=-3)
def test_thin(self):
data = create_test_data()
expected = data.isel(time=slice(None, None, 5), dim2=slice(None, None, 6))
actual = data.thin(time=5, dim2=6)
assert_equal(expected, actual)
expected = data.isel({dim: slice(None, None, 6) for dim in data.dims})
actual = data.thin(6)
assert_equal(expected, actual)
with pytest.raises(TypeError, match=r"either dict-like or a single int"):
data.thin([3])
with pytest.raises(TypeError, match=r"expected integer type"):
data.thin(dim2=3.1)
with pytest.raises(ValueError, match=r"cannot be zero"):
data.thin(time=0)
with pytest.raises(ValueError, match=r"expected positive int"):
data.thin(time=-3)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_sel_fancy(self):
data = create_test_data()
# add in a range() index
data["dim1"] = data.dim1
pdim1 = [1, 2, 3]
pdim2 = [4, 5, 1]
pdim3 = [1, 2, 3]
expected = data.isel(
dim1=Variable(("test_coord",), pdim1),
dim2=Variable(("test_coord",), pdim2),
dim3=Variable(("test_coord"), pdim3),
)
actual = data.sel(
dim1=Variable(("test_coord",), data.dim1[pdim1]),
dim2=Variable(("test_coord",), data.dim2[pdim2]),
dim3=Variable(("test_coord",), data.dim3[pdim3]),
)
assert_identical(expected, actual)
# DataArray Indexer
idx_t = DataArray(
data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_2 = DataArray(
data["dim2"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_3 = DataArray(
data["dim3"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)
expected = data.isel(
time=Variable(("a",), [3, 2, 1]),
dim2=Variable(("a",), [3, 2, 1]),
dim3=Variable(("a",), [3, 2, 1]),
)
expected = expected.assign_coords(a=idx_t["a"])
assert_identical(expected, actual)
idx_t = DataArray(
data["time"][[3, 2, 1]].values, dims=["a"], coords={"a": ["a", "b", "c"]}
)
idx_2 = DataArray(
data["dim2"][[2, 1, 3]].values, dims=["b"], coords={"b": [0, 1, 2]}
)
idx_3 = DataArray(
data["dim3"][[1, 2, 1]].values, dims=["c"], coords={"c": [0.0, 1.1, 2.2]}
)
actual = data.sel(time=idx_t, dim2=idx_2, dim3=idx_3)
expected = data.isel(
time=Variable(("a",), [3, 2, 1]),
dim2=Variable(("b",), [2, 1, 3]),
dim3=Variable(("c",), [1, 2, 1]),
)
expected = expected.assign_coords(a=idx_t["a"], b=idx_2["b"], c=idx_3["c"])
assert_identical(expected, actual)
# test from sel_points
data = Dataset({"foo": (("x", "y"), np.arange(9).reshape(3, 3))})
data.coords.update({"x": [0, 1, 2], "y": [0, 1, 2]})
expected = Dataset(
{"foo": ("points", [0, 4, 8])},
coords={
"x": Variable(("points",), [0, 1, 2]),
"y": Variable(("points",), [0, 1, 2]),
},
)
actual = data.sel(
x=Variable(("points",), [0, 1, 2]), y=Variable(("points",), [0, 1, 2])
)
assert_identical(expected, actual)
expected.coords.update({"x": ("points", [0, 1, 2]), "y": ("points", [0, 1, 2])})
actual = data.sel(
x=Variable(("points",), [0.1, 1.1, 2.5]),
y=Variable(("points",), [0, 1.2, 2.0]),
method="pad",
)
assert_identical(expected, actual)
idx_x = DataArray([0, 1, 2], dims=["a"], coords={"a": ["a", "b", "c"]})
idx_y = DataArray([0, 2, 1], dims=["b"], coords={"b": [0, 3, 6]})
expected_ary = data["foo"][[0, 1, 2], [0, 2, 1]]
actual = data.sel(x=idx_x, y=idx_y)
assert_array_equal(expected_ary, actual["foo"])
assert_identical(actual["a"].drop_vars("x"), idx_x["a"])
assert_identical(actual["b"].drop_vars("y"), idx_y["b"])
with pytest.raises(KeyError):
data.sel(x=[2.5], y=[2.0], method="pad", tolerance=1e-3)
def test_sel_method(self):
data = create_test_data()
expected = data.sel(dim2=1)
actual = data.sel(dim2=0.95, method="nearest")
assert_identical(expected, actual)
actual = data.sel(dim2=0.95, method="nearest", tolerance=1)
assert_identical(expected, actual)
with pytest.raises(KeyError):
actual = data.sel(dim2=np.pi, method="nearest", tolerance=0)
expected = data.sel(dim2=[1.5])
actual = data.sel(dim2=[1.45], method="backfill")
assert_identical(expected, actual)
with pytest.raises(NotImplementedError, match=r"slice objects"):
data.sel(dim2=slice(1, 3), method="ffill")
with pytest.raises(TypeError, match=r"``method``"):
# this should not pass silently
data.sel(method=data)
# cannot pass method if there is no associated coordinate
with pytest.raises(ValueError, match=r"cannot supply"):
data.sel(dim1=0, method="nearest")
def test_loc(self):
data = create_test_data()
expected = data.sel(dim3="a")
actual = data.loc[dict(dim3="a")]
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"can only lookup dict"):
data.loc["a"]
def test_selection_multiindex(self):
mindex = pd.MultiIndex.from_product(
[["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three")
)
mdata = Dataset(data_vars={"var": ("x", range(8))}, coords={"x": mindex})
def test_sel(lab_indexer, pos_indexer, replaced_idx=False, renamed_dim=None):
ds = mdata.sel(x=lab_indexer)
expected_ds = mdata.isel(x=pos_indexer)
if not replaced_idx:
assert_identical(ds, expected_ds)
else:
if renamed_dim:
assert ds["var"].dims[0] == renamed_dim
ds = ds.rename({renamed_dim: "x"})
assert_identical(ds["var"].variable, expected_ds["var"].variable)
assert not ds["x"].equals(expected_ds["x"])
test_sel(("a", 1, -1), 0)
test_sel(("b", 2, -2), -1)
test_sel(("a", 1), [0, 1], replaced_idx=True, renamed_dim="three")
test_sel(("a",), range(4), replaced_idx=True)
test_sel("a", range(4), replaced_idx=True)
test_sel([("a", 1, -1), ("b", 2, -2)], [0, 7])
test_sel(slice("a", "b"), range(8))
test_sel(slice(("a", 1), ("b", 1)), range(6))
test_sel({"one": "a", "two": 1, "three": -1}, 0)
test_sel({"one": "a", "two": 1}, [0, 1], replaced_idx=True, renamed_dim="three")
test_sel({"one": "a"}, range(4), replaced_idx=True)
assert_identical(mdata.loc[{"x": {"one": "a"}}], mdata.sel(x={"one": "a"}))
assert_identical(mdata.loc[{"x": "a"}], mdata.sel(x="a"))
assert_identical(mdata.loc[{"x": ("a", 1)}], mdata.sel(x=("a", 1)))
assert_identical(mdata.loc[{"x": ("a", 1, -1)}], mdata.sel(x=("a", 1, -1)))
assert_identical(mdata.sel(x={"one": "a", "two": 1}), mdata.sel(one="a", two=1))
def test_broadcast_like(self):
original1 = DataArray(
np.random.randn(5), [("x", range(5))], name="a"
).to_dataset()
original2 = DataArray(np.random.randn(6), [("y", range(6))], name="b")
expected1, expected2 = broadcast(original1, original2)
assert_identical(
original1.broadcast_like(original2), expected1.transpose("y", "x")
)
assert_identical(original2.broadcast_like(original1), expected2)
def test_to_pandas(self):
# 0D -> series
actual = Dataset({"a": 1, "b": 2}).to_pandas()
expected = pd.Series([1, 2], ["a", "b"])
assert_array_equal(actual, expected)
# 1D -> dataframe
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
actual = ds.to_pandas()
expected = ds.to_dataframe()
assert expected.equals(actual), (expected, actual)
# 2D -> error
x2d = np.random.randn(10, 10)
y2d = np.random.randn(10, 10)
with pytest.raises(ValueError, match=r"cannot convert Datasets"):
Dataset({"a": (["t", "r"], x2d), "b": (["t", "r"], y2d)}).to_pandas()
def test_reindex_like(self):
data = create_test_data()
data["letters"] = ("dim3", 10 * ["a"])
expected = data.isel(dim1=slice(10), time=slice(13))
actual = data.reindex_like(expected)
assert_identical(actual, expected)
expected = data.copy(deep=True)
expected["dim3"] = ("dim3", list("cdefghijkl"))
expected["var3"][:-2] = expected["var3"][2:].values
expected["var3"][-2:] = np.nan
expected["letters"] = expected["letters"].astype(object)
expected["letters"][-2:] = np.nan
expected["numbers"] = expected["numbers"].astype(float)
expected["numbers"][:-2] = expected["numbers"][2:].values
expected["numbers"][-2:] = np.nan
actual = data.reindex_like(expected)
assert_identical(actual, expected)
def test_reindex(self):
data = create_test_data()
assert_identical(data, data.reindex())
expected = data.assign_coords(dim1=data["dim1"])
actual = data.reindex(dim1=data["dim1"])
assert_identical(actual, expected)
actual = data.reindex(dim1=data["dim1"].values)
assert_identical(actual, expected)
actual = data.reindex(dim1=data["dim1"].to_index())
assert_identical(actual, expected)
with pytest.raises(
ValueError, match=r"cannot reindex or align along dimension"
):
data.reindex(dim1=data["dim1"][:5])
expected = data.isel(dim2=slice(5))
actual = data.reindex(dim2=data["dim2"][:5])
assert_identical(actual, expected)
# test dict-like argument
actual = data.reindex({"dim2": data["dim2"]})
expected = data
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"cannot specify both"):
data.reindex({"x": 0}, x=0)
with pytest.raises(ValueError, match=r"dictionary"):
data.reindex("foo")
# invalid dimension
with pytest.raises(ValueError, match=r"invalid reindex dim"):
data.reindex(invalid=0)
# out of order
expected = data.sel(dim2=data["dim2"][:5:-1])
actual = data.reindex(dim2=data["dim2"][:5:-1])
assert_identical(actual, expected)
# multiple fill values
expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(
var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),
var2=lambda ds: ds.var2.copy(data=[[-20, -20, -20, -20]] * len(ds.dim1)),
)
actual = data.reindex(
dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10, "var2": -20}
)
assert_identical(actual, expected)
# use the default value
expected = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1]).assign(
var1=lambda ds: ds.var1.copy(data=[[-10, -10, -10, -10]] * len(ds.dim1)),
var2=lambda ds: ds.var2.copy(
data=[[np.nan, np.nan, np.nan, np.nan]] * len(ds.dim1)
),
)
actual = data.reindex(dim2=[0.1, 2.1, 3.1, 4.1], fill_value={"var1": -10})
assert_identical(actual, expected)
# regression test for #279
expected = Dataset({"x": ("time", np.random.randn(5))}, {"time": range(5)})
time2 = DataArray(np.arange(5), dims="time2")
with pytest.raises(ValueError):
actual = expected.reindex(time=time2)
# another regression test
ds = Dataset(
{"foo": (["x", "y"], np.zeros((3, 4)))}, {"x": range(3), "y": range(4)}
)
expected = Dataset(
{"foo": (["x", "y"], np.zeros((3, 2)))}, {"x": [0, 1, 3], "y": [0, 1]}
)
expected["foo"][-1] = np.nan
actual = ds.reindex(x=[0, 1, 3], y=[0, 1])
assert_identical(expected, actual)
def test_reindex_warning(self):
data = create_test_data()
with pytest.raises(ValueError):
# DataArray with different dimension raises Future warning
ind = xr.DataArray([0.0, 1.0], dims=["new_dim"], name="ind")
data.reindex(dim2=ind)
# Should not warn
ind = xr.DataArray([0.0, 1.0], dims=["dim2"], name="ind")
with pytest.warns(None) as ws:
data.reindex(dim2=ind)
assert len(ws) == 0
def test_reindex_variables_copied(self):
data = create_test_data()
reindexed_data = data.reindex(copy=False)
for k in data.variables:
assert reindexed_data.variables[k] is not data.variables[k]
def test_reindex_method(self):
ds = Dataset({"x": ("y", [10, 20]), "y": [0, 1]})
y = [-0.5, 0.5, 1.5]
actual = ds.reindex(y=y, method="backfill")
expected = Dataset({"x": ("y", [10, 20, np.nan]), "y": y})
assert_identical(expected, actual)
actual = ds.reindex(y=y, method="backfill", tolerance=0.1)
expected = Dataset({"x": ("y", 3 * [np.nan]), "y": y})
assert_identical(expected, actual)
actual = ds.reindex(y=y, method="pad")
expected = Dataset({"x": ("y", [np.nan, 10, 20]), "y": y})
assert_identical(expected, actual)
alt = Dataset({"y": y})
actual = ds.reindex_like(alt, method="pad")
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}])
def test_reindex_fill_value(self, fill_value):
ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]})
y = [0, 1, 2]
actual = ds.reindex(y=y, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_x = fill_value_z = np.nan
elif isinstance(fill_value, dict):
fill_value_x = fill_value["x"]
fill_value_z = fill_value["z"]
else:
fill_value_x = fill_value_z = fill_value
expected = Dataset(
{
"x": ("y", [10, 20, fill_value_x]),
"z": ("y", [-20, -10, fill_value_z]),
"y": y,
}
)
assert_identical(expected, actual)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"x": 2, "z": 1}])
def test_reindex_like_fill_value(self, fill_value):
ds = Dataset({"x": ("y", [10, 20]), "z": ("y", [-20, -10]), "y": [0, 1]})
y = [0, 1, 2]
alt = Dataset({"y": y})
actual = ds.reindex_like(alt, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_x = fill_value_z = np.nan
elif isinstance(fill_value, dict):
fill_value_x = fill_value["x"]
fill_value_z = fill_value["z"]
else:
fill_value_x = fill_value_z = fill_value
expected = Dataset(
{
"x": ("y", [10, 20, fill_value_x]),
"z": ("y", [-20, -10, fill_value_z]),
"y": y,
}
)
assert_identical(expected, actual)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_reindex_str_dtype(self, dtype):
data = Dataset({"data": ("x", [1, 2]), "x": np.array(["a", "b"], dtype=dtype)})
actual = data.reindex(x=data.x)
expected = data
assert_identical(expected, actual)
assert actual.x.dtype == expected.x.dtype
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": 2, "bar": 1}])
def test_align_fill_value(self, fill_value):
x = Dataset({"foo": DataArray([1, 2], dims=["x"], coords={"x": [1, 2]})})
y = Dataset({"bar": DataArray([1, 2], dims=["x"], coords={"x": [1, 3]})})
x2, y2 = align(x, y, join="outer", fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_foo = fill_value_bar = np.nan
elif isinstance(fill_value, dict):
fill_value_foo = fill_value["foo"]
fill_value_bar = fill_value["bar"]
else:
fill_value_foo = fill_value_bar = fill_value
expected_x2 = Dataset(
{
"foo": DataArray(
[1, 2, fill_value_foo], dims=["x"], coords={"x": [1, 2, 3]}
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[1, fill_value_bar, 2], dims=["x"], coords={"x": [1, 2, 3]}
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align(self):
left = create_test_data()
right = left.copy(deep=True)
right["dim3"] = ("dim3", list("cdefghijkl"))
right["var3"][:-2] = right["var3"][2:].values
right["var3"][-2:] = np.random.randn(*right["var3"][-2:].shape)
right["numbers"][:-2] = right["numbers"][2:].values
right["numbers"][-2:] = -10
intersection = list("cdefghij")
union = list("abcdefghijkl")
left2, right2 = align(left, right, join="inner")
assert_array_equal(left2["dim3"], intersection)
assert_identical(left2, right2)
left2, right2 = align(left, right, join="outer")
assert_array_equal(left2["dim3"], union)
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(left2["var3"][-2:]).all()
assert np.isnan(right2["var3"][:2]).all()
left2, right2 = align(left, right, join="left")
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_equal(left2["dim3"].variable, left["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(right2["var3"][:2]).all()
left2, right2 = align(left, right, join="right")
assert_equal(left2["dim3"].variable, right2["dim3"].variable)
assert_equal(left2["dim3"].variable, right["dim3"].variable)
assert_identical(left2.sel(dim3=intersection), right2.sel(dim3=intersection))
assert np.isnan(left2["var3"][-2:]).all()
with pytest.raises(ValueError, match=r"invalid value for join"):
align(left, right, join="foobar")
with pytest.raises(TypeError):
align(left, right, foo="bar")
def test_align_exact(self):
left = xr.Dataset(coords={"x": [0, 1]})
right = xr.Dataset(coords={"x": [1, 2]})
left1, left2 = xr.align(left, left, join="exact")
assert_identical(left1, left)
assert_identical(left2, left)
with pytest.raises(ValueError, match=r"indexes .* not equal"):
xr.align(left, right, join="exact")
def test_align_override(self):
left = xr.Dataset(coords={"x": [0, 1, 2]})
right = xr.Dataset(coords={"x": [0.1, 1.1, 2.1], "y": [1, 2, 3]})
expected_right = xr.Dataset(coords={"x": [0, 1, 2], "y": [1, 2, 3]})
new_left, new_right = xr.align(left, right, join="override")
assert_identical(left, new_left)
assert_identical(new_right, expected_right)
new_left, new_right = xr.align(left, right, exclude="x", join="override")
assert_identical(left, new_left)
assert_identical(right, new_right)
new_left, new_right = xr.align(
left.isel(x=0, drop=True), right, exclude="x", join="override"
)
assert_identical(left.isel(x=0, drop=True), new_left)
assert_identical(right, new_right)
with pytest.raises(ValueError, match=r"Indexes along dimension 'x' don't have"):
xr.align(left.isel(x=0).expand_dims("x"), right, join="override")
def test_align_exclude(self):
x = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]}
)
}
)
y = Dataset(
{
"bar": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 3], "y": [5, 6]}
)
}
)
x2, y2 = align(x, y, exclude=["y"], join="outer")
expected_x2 = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4], [np.nan, np.nan]],
dims=["x", "y"],
coords={"x": [1, 2, 3], "y": [3, 4]},
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[[1, 2], [np.nan, np.nan], [3, 4]],
dims=["x", "y"],
coords={"x": [1, 2, 3], "y": [5, 6]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align_nocopy(self):
x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [1, 2, 3])])})
y = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
expected_x2 = x
expected_y2 = Dataset(
{"foo": DataArray([1, 2, np.nan], coords=[("x", [1, 2, 3])])}
)
x2, y2 = align(x, y, copy=False, join="outer")
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
assert source_ndarray(x["foo"].data) is source_ndarray(x2["foo"].data)
x2, y2 = align(x, y, copy=True, join="outer")
assert source_ndarray(x["foo"].data) is not source_ndarray(x2["foo"].data)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_align_indexes(self):
x = Dataset({"foo": DataArray([1, 2, 3], dims="x", coords=[("x", [1, 2, 3])])})
(x2,) = align(x, indexes={"x": [2, 3, 1]})
expected_x2 = Dataset(
{"foo": DataArray([2, 3, 1], dims="x", coords={"x": [2, 3, 1]})}
)
assert_identical(expected_x2, x2)
def test_align_non_unique(self):
x = Dataset({"foo": ("x", [3, 4, 5]), "x": [0, 0, 1]})
x1, x2 = align(x, x)
assert_identical(x1, x)
assert_identical(x2, x)
y = Dataset({"bar": ("x", [6, 7]), "x": [0, 1]})
with pytest.raises(ValueError, match=r"cannot reindex or align"):
align(x, y)
def test_align_str_dtype(self):
a = Dataset({"foo": ("x", [0, 1]), "x": ["a", "b"]})
b = Dataset({"foo": ("x", [1, 2]), "x": ["b", "c"]})
expected_a = Dataset({"foo": ("x", [0, 1, np.NaN]), "x": ["a", "b", "c"]})
expected_b = Dataset({"foo": ("x", [np.NaN, 1, 2]), "x": ["a", "b", "c"]})
actual_a, actual_b = xr.align(a, b, join="outer")
assert_identical(expected_a, actual_a)
assert expected_a.x.dtype == actual_a.x.dtype
assert_identical(expected_b, actual_b)
assert expected_b.x.dtype == actual_b.x.dtype
def test_broadcast(self):
ds = Dataset(
{"foo": 0, "bar": ("x", [1]), "baz": ("y", [2, 3])}, {"c": ("x", [4])}
)
expected = Dataset(
{
"foo": (("x", "y"), [[0, 0]]),
"bar": (("x", "y"), [[1, 1]]),
"baz": (("x", "y"), [[2, 3]]),
},
{"c": ("x", [4])},
)
(actual,) = broadcast(ds)
assert_identical(expected, actual)
ds_x = Dataset({"foo": ("x", [1])})
ds_y = Dataset({"bar": ("y", [2, 3])})
expected_x = Dataset({"foo": (("x", "y"), [[1, 1]])})
expected_y = Dataset({"bar": (("x", "y"), [[2, 3]])})
actual_x, actual_y = broadcast(ds_x, ds_y)
assert_identical(expected_x, actual_x)
assert_identical(expected_y, actual_y)
array_y = ds_y["bar"]
expected_y = expected_y["bar"]
actual_x, actual_y = broadcast(ds_x, array_y)
assert_identical(expected_x, actual_x)
assert_identical(expected_y, actual_y)
def test_broadcast_nocopy(self):
x = Dataset({"foo": (("x", "y"), [[1, 1]])})
y = Dataset({"bar": ("y", [2, 3])})
(actual_x,) = broadcast(x)
assert_identical(x, actual_x)
assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
actual_x, actual_y = broadcast(x, y)
assert_identical(x, actual_x)
assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
def test_broadcast_exclude(self):
x = Dataset(
{
"foo": DataArray(
[[1, 2], [3, 4]], dims=["x", "y"], coords={"x": [1, 2], "y": [3, 4]}
),
"bar": DataArray(5),
}
)
y = Dataset(
{
"foo": DataArray(
[[1, 2]], dims=["z", "y"], coords={"z": [1], "y": [5, 6]}
)
}
)
x2, y2 = broadcast(x, y, exclude=["y"])
expected_x2 = Dataset(
{
"foo": DataArray(
[[[1, 2]], [[3, 4]]],
dims=["x", "z", "y"],
coords={"z": [1], "x": [1, 2], "y": [3, 4]},
),
"bar": DataArray(
[[5], [5]], dims=["x", "z"], coords={"x": [1, 2], "z": [1]}
),
}
)
expected_y2 = Dataset(
{
"foo": DataArray(
[[[1, 2]], [[1, 2]]],
dims=["x", "z", "y"],
coords={"z": [1], "x": [1, 2], "y": [5, 6]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_broadcast_misaligned(self):
x = Dataset({"foo": DataArray([1, 2, 3], coords=[("x", [-1, -2, -3])])})
y = Dataset(
{
"bar": DataArray(
[[1, 2], [3, 4]],
dims=["y", "x"],
coords={"y": [1, 2], "x": [10, -3]},
)
}
)
x2, y2 = broadcast(x, y)
expected_x2 = Dataset(
{
"foo": DataArray(
[[3, 3], [2, 2], [1, 1], [np.nan, np.nan]],
dims=["x", "y"],
coords={"y": [1, 2], "x": [-3, -2, -1, 10]},
)
}
)
expected_y2 = Dataset(
{
"bar": DataArray(
[[2, 4], [np.nan, np.nan], [np.nan, np.nan], [1, 3]],
dims=["x", "y"],
coords={"y": [1, 2], "x": [-3, -2, -1, 10]},
)
}
)
assert_identical(expected_x2, x2)
assert_identical(expected_y2, y2)
def test_variable_indexing(self):
data = create_test_data()
v = data["var1"]
d1 = data["dim1"]
d2 = data["dim2"]
assert_equal(v, v[d1.values])
assert_equal(v, v[d1])
assert_equal(v[:3], v[d1 < 3])
assert_equal(v[:, 3:], v[:, d2 >= 1.5])
assert_equal(v[:3, 3:], v[d1 < 3, d2 >= 1.5])
assert_equal(v[:3, :2], v[range(3), range(2)])
assert_equal(v[:3, :2], v.loc[d1[:3], d2[:2]])
def test_drop_variables(self):
data = create_test_data()
assert_identical(data, data.drop_vars([]))
expected = Dataset({k: data[k] for k in data.variables if k != "time"})
actual = data.drop_vars("time")
assert_identical(expected, actual)
actual = data.drop_vars(["time"])
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"cannot be found"):
data.drop_vars("not_found_here")
actual = data.drop_vars("not_found_here", errors="ignore")
assert_identical(data, actual)
actual = data.drop_vars(["not_found_here"], errors="ignore")
assert_identical(data, actual)
actual = data.drop_vars(["time", "not_found_here"], errors="ignore")
assert_identical(expected, actual)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop("not_found_here", errors="ignore")
assert_identical(data, actual)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop(["not_found_here"], errors="ignore")
assert_identical(data, actual)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop(["time", "not_found_here"], errors="ignore")
assert_identical(expected, actual)
with pytest.warns(PendingDeprecationWarning):
actual = data.drop({"time", "not_found_here"}, errors="ignore")
assert_identical(expected, actual)
def test_drop_index_labels(self):
data = Dataset({"A": (["x", "y"], np.random.randn(2, 3)), "x": ["a", "b"]})
with pytest.warns(DeprecationWarning):
actual = data.drop(["a"], dim="x")
expected = data.isel(x=[1])
assert_identical(expected, actual)
with pytest.warns(DeprecationWarning):
actual = data.drop(["a", "b"], dim="x")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
with pytest.raises(KeyError):
with pytest.warns(DeprecationWarning):
data.drop(["c"], dim="x")
with pytest.warns(DeprecationWarning):
actual = data.drop(["c"], dim="x", errors="ignore")
assert_identical(data, actual)
with pytest.raises(ValueError):
with pytest.warns(DeprecationWarning):
data.drop(["c"], dim="x", errors="wrong_value")
with pytest.warns(DeprecationWarning):
actual = data.drop(["a", "b", "c"], "x", errors="ignore")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
actual = data.drop_sel(x=DataArray(["a", "b", "c"]), errors="ignore")
expected = data.isel(x=slice(0, 0))
assert_identical(expected, actual)
with pytest.warns(DeprecationWarning):
data.drop(DataArray(["a", "b", "c"]), dim="x", errors="ignore")
assert_identical(expected, actual)
actual = data.drop_sel(y=[1])
expected = data.isel(y=[0, 2])
assert_identical(expected, actual)
with pytest.raises(KeyError, match=r"not found in axis"):
data.drop_sel(x=0)
def test_drop_labels_by_keyword(self):
data = Dataset(
{"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)}
)
assert len(data.coords["x"]) == 2
with pytest.warns(DeprecationWarning):
ds1 = data.drop(["a"], dim="x")
ds2 = data.drop_sel(x="a")
ds3 = data.drop_sel(x=["a"])
ds4 = data.drop_sel(x=["a", "b"])
ds5 = data.drop_sel(x=["a", "b"], y=range(0, 6, 2))
arr = DataArray(range(3), dims=["c"])
with pytest.warns(FutureWarning):
data.drop(arr.coords)
with pytest.warns(FutureWarning):
data.drop(arr.xindexes)
assert_array_equal(ds1.coords["x"], ["b"])
assert_array_equal(ds2.coords["x"], ["b"])
assert_array_equal(ds3.coords["x"], ["b"])
assert ds4.coords["x"].size == 0
assert ds5.coords["x"].size == 0
assert_array_equal(ds5.coords["y"], [1, 3, 5])
with pytest.raises(ValueError):
data.drop(labels=["a"], x="a")
with pytest.raises(ValueError):
data.drop(labels=["a"], dim="x", x="a")
warnings.filterwarnings("ignore", r"\W*drop")
with pytest.raises(ValueError):
data.drop(dim="x", x="a")
def test_drop_labels_by_position(self):
data = Dataset(
{"A": (["x", "y"], np.random.randn(2, 6)), "x": ["a", "b"], "y": range(6)}
)
assert len(data.coords["x"]) == 2
actual = data.drop_isel(x=0)
expected = data.drop_sel(x="a")
assert_identical(expected, actual)
actual = data.drop_isel(x=[0])
expected = data.drop_sel(x=["a"])
assert_identical(expected, actual)
actual = data.drop_isel(x=[0, 1])
expected = data.drop_sel(x=["a", "b"])
assert_identical(expected, actual)
assert actual.coords["x"].size == 0
actual = data.drop_isel(x=[0, 1], y=range(0, 6, 2))
expected = data.drop_sel(x=["a", "b"], y=range(0, 6, 2))
assert_identical(expected, actual)
assert actual.coords["x"].size == 0
with pytest.raises(KeyError):
data.drop_isel(z=1)
def test_drop_dims(self):
data = xr.Dataset(
{
"A": (["x", "y"], np.random.randn(2, 3)),
"B": ("x", np.random.randn(2)),
"x": ["a", "b"],
"z": np.pi,
}
)
actual = data.drop_dims("x")
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
actual = data.drop_dims("y")
expected = data.drop_vars("A")
assert_identical(expected, actual)
actual = data.drop_dims(["x", "y"])
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
with pytest.raises((ValueError, KeyError)):
data.drop_dims("z")
with pytest.raises((ValueError, KeyError)):
data.drop_dims(None)
actual = data.drop_dims("z", errors="ignore")
assert_identical(data, actual)
actual = data.drop_dims(None, errors="ignore")
assert_identical(data, actual)
with pytest.raises(ValueError):
actual = data.drop_dims("z", errors="wrong_value")
actual = data.drop_dims(["x", "y", "z"], errors="ignore")
expected = data.drop_vars(["A", "B", "x"])
assert_identical(expected, actual)
def test_copy(self):
data = create_test_data()
data.attrs["Test"] = [1, 2, 3]
for copied in [data.copy(deep=False), copy(data)]:
assert_identical(data, copied)
assert data.encoding == copied.encoding
for k in data.data_vars:
v0 = data.variables[k]
v1 = copied.variables[k]
assert source_ndarray(v0.data) is source_ndarray(v1.data)
copied["foo"] = ("z", np.arange(5))
assert "foo" not in data
copied.attrs["foo"] = "bar"
assert "foo" not in data.attrs
assert data.attrs["Test"] is copied.attrs["Test"]
for copied in [data.copy(deep=True), deepcopy(data)]:
assert_identical(data, copied)
for k, v0 in data.variables.items():
v1 = copied.variables[k]
assert v0 is not v1
assert data.attrs["Test"] is not copied.attrs["Test"]
def test_copy_with_data(self):
orig = create_test_data()
new_data = {k: np.random.randn(*v.shape) for k, v in orig.data_vars.items()}
actual = orig.copy(data=new_data)
expected = orig.copy()
for k, v in new_data.items():
expected[k].data = v
assert_identical(expected, actual)
@pytest.mark.xfail(raises=AssertionError)
@pytest.mark.parametrize(
"deep, expected_orig",
[
[
True,
xr.DataArray(
xr.IndexVariable("a", np.array([1, 2])),
coords={"a": [1, 2]},
dims=["a"],
),
],
[
False,
xr.DataArray(
xr.IndexVariable("a", np.array([999, 2])),
coords={"a": [999, 2]},
dims=["a"],
),
],
],
)
def test_copy_coords(self, deep, expected_orig):
ds = xr.DataArray(
np.ones([2, 2, 2]),
coords={"a": [1, 2], "b": ["x", "y"], "c": [0, 1]},
dims=["a", "b", "c"],
name="value",
).to_dataset()
ds_cp = ds.copy(deep=deep)
ds_cp.coords["a"].data[0] = 999
expected_cp = xr.DataArray(
xr.IndexVariable("a", np.array([999, 2])),
coords={"a": [999, 2]},
dims=["a"],
)
assert_identical(ds_cp.coords["a"], expected_cp)
assert_identical(ds.coords["a"], expected_orig)
def test_copy_with_data_errors(self):
orig = create_test_data()
new_var1 = np.arange(orig["var1"].size).reshape(orig["var1"].shape)
with pytest.raises(ValueError, match=r"Data must be dict-like"):
orig.copy(data=new_var1)
with pytest.raises(ValueError, match=r"only contain variables in original"):
orig.copy(data={"not_in_original": new_var1})
with pytest.raises(ValueError, match=r"contain all variables in original"):
orig.copy(data={"var1": new_var1})
def test_rename(self):
data = create_test_data()
newnames = {"var1": "renamed_var1", "dim2": "renamed_dim2"}
renamed = data.rename(newnames)
variables = dict(data.variables)
for k, v in newnames.items():
variables[v] = variables.pop(k)
for k, v in variables.items():
dims = list(v.dims)
for name, newname in newnames.items():
if name in dims:
dims[dims.index(name)] = newname
assert_equal(
Variable(dims, v.values, v.attrs),
renamed[k].variable.to_base_variable(),
)
assert v.encoding == renamed[k].encoding
assert type(v) is type(renamed.variables[k])
assert "var1" not in renamed
assert "dim2" not in renamed
with pytest.raises(ValueError, match=r"cannot rename 'not_a_var'"):
data.rename({"not_a_var": "nada"})
with pytest.raises(ValueError, match=r"'var1' conflicts"):
data.rename({"var2": "var1"})
var1 = data["var1"]
data["var1"] = (var1.dims, InaccessibleArray(var1.values))
renamed = data.rename(newnames)
with pytest.raises(UnexpectedDataAccess):
renamed["renamed_var1"].values
renamed_kwargs = data.rename(**newnames)
assert_identical(renamed, renamed_kwargs)
def test_rename_old_name(self):
data = create_test_data()
with pytest.raises(ValueError, match=r"'samecol' conflicts"):
data.rename({"var1": "samecol", "var2": "samecol"})
data.rename({"var1": "var2", "var2": "var1"})
def test_rename_same_name(self):
data = create_test_data()
newnames = {"var1": "var1", "dim2": "dim2"}
renamed = data.rename(newnames)
assert_identical(renamed, data)
def test_rename_dims(self):
original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42})
expected = Dataset(
{"x": ("x_new", [0, 1, 2]), "y": ("x_new", [10, 11, 12]), "z": 42}
)
expected = expected.set_coords("x")
dims_dict = {"x": "x_new"}
actual = original.rename_dims(dims_dict)
assert_identical(expected, actual)
actual_2 = original.rename_dims(**dims_dict)
assert_identical(expected, actual_2)
# Test to raise ValueError
dims_dict_bad = {"x_bad": "x_new"}
with pytest.raises(ValueError):
original.rename_dims(dims_dict_bad)
with pytest.raises(ValueError):
original.rename_dims({"x": "z"})
def test_rename_vars(self):
original = Dataset({"x": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42})
expected = Dataset(
{"x_new": ("x", [0, 1, 2]), "y": ("x", [10, 11, 12]), "z": 42}
)
expected = expected.set_coords("x_new")
name_dict = {"x": "x_new"}
actual = original.rename_vars(name_dict)
assert_identical(expected, actual)
actual_2 = original.rename_vars(**name_dict)
assert_identical(expected, actual_2)
# Test to raise ValueError
names_dict_bad = {"x_bad": "x_new"}
with pytest.raises(ValueError):
original.rename_vars(names_dict_bad)
def test_rename_multiindex(self):
mindex = pd.MultiIndex.from_tuples(
[([1, 2]), ([3, 4])], names=["level0", "level1"]
)
data = Dataset({}, {"x": mindex})
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data.rename({"x": "level0"})
@requires_cftime
def test_rename_does_not_change_CFTimeIndex_type(self):
# make sure CFTimeIndex is not converted to DatetimeIndex #3522
time = xr.cftime_range(start="2000", periods=6, freq="2MS", calendar="noleap")
orig = Dataset(coords={"time": time})
renamed = orig.rename(time="time_new")
assert "time_new" in renamed.xindexes
# TODO: benbovy - flexible indexes: update when CFTimeIndex
# inherits from xarray.Index
assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), CFTimeIndex)
assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new"
# check original has not changed
assert "time" in orig.xindexes
assert isinstance(orig.xindexes["time"].to_pandas_index(), CFTimeIndex)
assert orig.xindexes["time"].to_pandas_index().name == "time"
# note: rename_dims(time="time_new") drops "ds.indexes"
renamed = orig.rename_dims()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex)
renamed = orig.rename_vars()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), CFTimeIndex)
def test_rename_does_not_change_DatetimeIndex_type(self):
# make sure DatetimeIndex is conderved on rename
time = pd.date_range(start="2000", periods=6, freq="2MS")
orig = Dataset(coords={"time": time})
renamed = orig.rename(time="time_new")
assert "time_new" in renamed.xindexes
# TODO: benbovy - flexible indexes: update when DatetimeIndex
# inherits from xarray.Index?
assert isinstance(renamed.xindexes["time_new"].to_pandas_index(), DatetimeIndex)
assert renamed.xindexes["time_new"].to_pandas_index().name == "time_new"
# check original has not changed
assert "time" in orig.xindexes
assert isinstance(orig.xindexes["time"].to_pandas_index(), DatetimeIndex)
assert orig.xindexes["time"].to_pandas_index().name == "time"
# note: rename_dims(time="time_new") drops "ds.indexes"
renamed = orig.rename_dims()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex)
renamed = orig.rename_vars()
assert isinstance(renamed.xindexes["time"].to_pandas_index(), DatetimeIndex)
def test_swap_dims(self):
original = Dataset({"x": [1, 2, 3], "y": ("x", list("abc")), "z": 42})
expected = Dataset({"z": 42}, {"x": ("y", [1, 2, 3]), "y": list("abc")})
actual = original.swap_dims({"x": "y"})
assert_identical(expected, actual)
assert isinstance(actual.variables["y"], IndexVariable)
assert isinstance(actual.variables["x"], Variable)
pd.testing.assert_index_equal(
actual.xindexes["y"].to_pandas_index(),
expected.xindexes["y"].to_pandas_index(),
)
roundtripped = actual.swap_dims({"y": "x"})
assert_identical(original.set_coords("y"), roundtripped)
with pytest.raises(ValueError, match=r"cannot swap"):
original.swap_dims({"y": "x"})
with pytest.raises(ValueError, match=r"replacement dimension"):
original.swap_dims({"x": "z"})
expected = Dataset(
{"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])}
)
actual = original.swap_dims({"x": "u"})
assert_identical(expected, actual)
# as kwargs
expected = Dataset(
{"y": ("u", list("abc")), "z": 42}, coords={"x": ("u", [1, 2, 3])}
)
actual = original.swap_dims(x="u")
assert_identical(expected, actual)
# handle multiindex case
idx = pd.MultiIndex.from_arrays([list("aab"), list("yzz")], names=["y1", "y2"])
original = Dataset({"x": [1, 2, 3], "y": ("x", idx), "z": 42})
expected = Dataset({"z": 42}, {"x": ("y", [1, 2, 3]), "y": idx})
actual = original.swap_dims({"x": "y"})
assert_identical(expected, actual)
assert isinstance(actual.variables["y"], IndexVariable)
assert isinstance(actual.variables["x"], Variable)
pd.testing.assert_index_equal(
actual.xindexes["y"].to_pandas_index(),
expected.xindexes["y"].to_pandas_index(),
)
def test_expand_dims_error(self):
original = Dataset(
{
"x": ("a", np.random.randn(3)),
"y": (["b", "a"], np.random.randn(4, 3)),
"z": ("a", np.random.randn(3)),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
with pytest.raises(ValueError, match=r"already exists"):
original.expand_dims(dim=["x"])
# Make sure it raises true error also for non-dimensional coordinates
# which has dimension.
original = original.set_coords("z")
with pytest.raises(ValueError, match=r"already exists"):
original.expand_dims(dim=["z"])
original = Dataset(
{
"x": ("a", np.random.randn(3)),
"y": (["b", "a"], np.random.randn(4, 3)),
"z": ("a", np.random.randn(3)),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
with pytest.raises(TypeError, match=r"value of new dimension"):
original.expand_dims({"d": 3.2})
with pytest.raises(ValueError, match=r"both keyword and positional"):
original.expand_dims({"d": 4}, e=4)
def test_expand_dims_int(self):
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
actual = original.expand_dims(["z"], [1])
expected = Dataset(
{
"x": original["x"].expand_dims("z", 1),
"y": original["y"].expand_dims("z", 1),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
# make sure squeeze restores the original data set.
roundtripped = actual.squeeze("z")
assert_identical(original, roundtripped)
# another test with a negative axis
actual = original.expand_dims(["z"], [-1])
expected = Dataset(
{
"x": original["x"].expand_dims("z", -1),
"y": original["y"].expand_dims("z", -1),
},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
assert_identical(expected, actual)
# make sure squeeze restores the original data set.
roundtripped = actual.squeeze("z")
assert_identical(original, roundtripped)
def test_expand_dims_coords(self):
original = Dataset({"x": ("a", np.array([1, 2, 3]))})
expected = Dataset(
{"x": (("b", "a"), np.array([[1, 2, 3], [1, 2, 3]]))}, coords={"b": [1, 2]}
)
actual = original.expand_dims(dict(b=[1, 2]))
assert_identical(expected, actual)
assert "b" not in original._coord_names
def test_expand_dims_existing_scalar_coord(self):
original = Dataset({"x": 1}, {"a": 2})
expected = Dataset({"x": (("a",), [1])}, {"a": [2]})
actual = original.expand_dims("a")
assert_identical(expected, actual)
def test_isel_expand_dims_roundtrip(self):
original = Dataset({"x": (("a",), [1])}, {"a": [2]})
actual = original.isel(a=0).expand_dims("a")
assert_identical(actual, original)
def test_expand_dims_mixed_int_and_coords(self):
# Test expanding one dimension to have size > 1 that doesn't have
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
)
actual = original.expand_dims({"d": 4, "e": ["l", "m", "n"]})
expected = Dataset(
{
"x": xr.DataArray(
original["x"].values * np.ones([4, 3, 3]),
coords=dict(d=range(4), e=["l", "m", "n"], a=np.linspace(0, 1, 3)),
dims=["d", "e", "a"],
).drop_vars("d"),
"y": xr.DataArray(
original["y"].values * np.ones([4, 3, 4, 3]),
coords=dict(
d=range(4),
e=["l", "m", "n"],
b=np.linspace(0, 1, 4),
a=np.linspace(0, 1, 3),
),
dims=["d", "e", "b", "a"],
).drop_vars("d"),
},
coords={"c": np.linspace(0, 1, 5)},
)
assert_identical(actual, expected)
def test_expand_dims_kwargs_python36plus(self):
original = Dataset(
{"x": ("a", np.random.randn(3)), "y": (["b", "a"], np.random.randn(4, 3))},
coords={
"a": np.linspace(0, 1, 3),
"b": np.linspace(0, 1, 4),
"c": np.linspace(0, 1, 5),
},
attrs={"key": "entry"},
)
other_way = original.expand_dims(e=["l", "m", "n"])
other_way_expected = Dataset(
{
"x": xr.DataArray(
original["x"].values * np.ones([3, 3]),
coords=dict(e=["l", "m", "n"], a=np.linspace(0, 1, 3)),
dims=["e", "a"],
),
"y": xr.DataArray(
original["y"].values * np.ones([3, 4, 3]),
coords=dict(
e=["l", "m", "n"],
b=np.linspace(0, 1, 4),
a=np.linspace(0, 1, 3),
),
dims=["e", "b", "a"],
),
},
coords={"c": np.linspace(0, 1, 5)},
attrs={"key": "entry"},
)
assert_identical(other_way_expected, other_way)
def test_set_index(self):
expected = create_test_multiindex()
mindex = expected["x"].to_index()
indexes = [mindex.get_level_values(n) for n in mindex.names]
coords = {idx.name: ("x", idx) for idx in indexes}
ds = Dataset({}, coords=coords)
obj = ds.set_index(x=mindex.names)
assert_identical(obj, expected)
ds = Dataset(data_vars={"x_var": ("x", [0, 1, 2])})
expected = Dataset(coords={"x": [0, 1, 2]})
assert_identical(ds.set_index(x="x_var"), expected)
# Issue 3176: Ensure clear error message on key error.
with pytest.raises(ValueError) as excinfo:
ds.set_index(foo="bar")
assert str(excinfo.value) == "bar is not the name of an existing variable."
def test_reset_index(self):
ds = create_test_multiindex()
mindex = ds["x"].to_index()
indexes = [mindex.get_level_values(n) for n in mindex.names]
coords = {idx.name: ("x", idx) for idx in indexes}
expected = Dataset({}, coords=coords)
obj = ds.reset_index("x")
assert_identical(obj, expected)
def test_reset_index_keep_attrs(self):
coord_1 = DataArray([1, 2], dims=["coord_1"], attrs={"attrs": True})
ds = Dataset({}, {"coord_1": coord_1})
expected = Dataset({}, {"coord_1_": coord_1})
obj = ds.reset_index("coord_1")
assert_identical(expected, obj)
def test_reorder_levels(self):
ds = create_test_multiindex()
mindex = ds["x"].to_index()
midx = mindex.reorder_levels(["level_2", "level_1"])
expected = Dataset({}, coords={"x": midx})
reindexed = ds.reorder_levels(x=["level_2", "level_1"])
assert_identical(reindexed, expected)
ds = Dataset({}, coords={"x": [1, 2]})
with pytest.raises(ValueError, match=r"has no MultiIndex"):
ds.reorder_levels(x=["level_1", "level_2"])
def test_stack(self):
ds = Dataset(
{"a": ("x", [0, 1]), "b": (("x", "y"), [[0, 1], [2, 3]]), "y": ["a", "b"]}
)
exp_index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["x", "y"])
expected = Dataset(
{"a": ("z", [0, 0, 1, 1]), "b": ("z", [0, 1, 2, 3]), "z": exp_index}
)
actual = ds.stack(z=["x", "y"])
assert_identical(expected, actual)
actual = ds.stack(z=[...])
assert_identical(expected, actual)
# non list dims with ellipsis
actual = ds.stack(z=(...,))
assert_identical(expected, actual)
# ellipsis with given dim
actual = ds.stack(z=[..., "y"])
assert_identical(expected, actual)
exp_index = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["y", "x"])
expected = Dataset(
{"a": ("z", [0, 1, 0, 1]), "b": ("z", [0, 2, 1, 3]), "z": exp_index}
)
actual = ds.stack(z=["y", "x"])
assert_identical(expected, actual)
def test_unstack(self):
index = pd.MultiIndex.from_product([[0, 1], ["a", "b"]], names=["x", "y"])
ds = Dataset({"b": ("z", [0, 1, 2, 3]), "z": index})
expected = Dataset(
{"b": (("x", "y"), [[0, 1], [2, 3]]), "x": [0, 1], "y": ["a", "b"]}
)
for dim in ["z", ["z"], None]:
actual = ds.unstack(dim)
assert_identical(actual, expected)
def test_unstack_errors(self):
ds = Dataset({"x": [1, 2, 3]})
with pytest.raises(ValueError, match=r"does not contain the dimensions"):
ds.unstack("foo")
with pytest.raises(ValueError, match=r"do not have a MultiIndex"):
ds.unstack("x")
def test_unstack_fill_value(self):
ds = xr.Dataset(
{"var": (("x",), np.arange(6)), "other_var": (("x",), np.arange(3, 9))},
coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)},
)
# make ds incomplete
ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"])
# test fill_value
actual = ds.unstack("index", fill_value=-1)
expected = ds.unstack("index").fillna(-1).astype(int)
assert actual["var"].dtype == int
assert_equal(actual, expected)
actual = ds["var"].unstack("index", fill_value=-1)
expected = ds["var"].unstack("index").fillna(-1).astype(int)
assert_equal(actual, expected)
actual = ds.unstack("index", fill_value={"var": -1, "other_var": 1})
expected = ds.unstack("index").fillna({"var": -1, "other_var": 1}).astype(int)
assert_equal(actual, expected)
@requires_sparse
def test_unstack_sparse(self):
ds = xr.Dataset(
{"var": (("x",), np.arange(6))},
coords={"x": [0, 1, 2] * 2, "y": (("x",), ["a"] * 3 + ["b"] * 3)},
)
# make ds incomplete
ds = ds.isel(x=[0, 2, 3, 4]).set_index(index=["x", "y"])
# test fill_value
actual = ds.unstack("index", sparse=True)
expected = ds.unstack("index")
assert actual["var"].variable._to_dense().equals(expected["var"].variable)
assert actual["var"].data.density < 1.0
actual = ds["var"].unstack("index", sparse=True)
expected = ds["var"].unstack("index")
assert actual.variable._to_dense().equals(expected.variable)
assert actual.data.density < 1.0
def test_stack_unstack_fast(self):
ds = Dataset(
{
"a": ("x", [0, 1]),
"b": (("x", "y"), [[0, 1], [2, 3]]),
"x": [0, 1],
"y": ["a", "b"],
}
)
actual = ds.stack(z=["x", "y"]).unstack("z")
assert actual.broadcast_equals(ds)
actual = ds[["b"]].stack(z=["x", "y"]).unstack("z")
assert actual.identical(ds[["b"]])
def test_stack_unstack_slow(self):
ds = Dataset(
{
"a": ("x", [0, 1]),
"b": (("x", "y"), [[0, 1], [2, 3]]),
"x": [0, 1],
"y": ["a", "b"],
}
)
stacked = ds.stack(z=["x", "y"])
actual = stacked.isel(z=slice(None, None, -1)).unstack("z")
assert actual.broadcast_equals(ds)
stacked = ds[["b"]].stack(z=["x", "y"])
actual = stacked.isel(z=slice(None, None, -1)).unstack("z")
assert actual.identical(ds[["b"]])
def test_to_stacked_array_invalid_sample_dims(self):
data = xr.Dataset(
data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])},
coords={"y": ["u", "v", "w"]},
)
with pytest.raises(ValueError):
data.to_stacked_array("features", sample_dims=["y"])
def test_to_stacked_array_name(self):
name = "adf9d"
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims, name=name)
assert y.name == name
def test_to_stacked_array_dtype_dims(self):
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims)
# TODO: benbovy - flexible indexes: update when MultiIndex has its own class
# inherited from xarray.Index
assert y.xindexes["features"].to_pandas_index().levels[1].dtype == D.y.dtype
assert y.dims == ("x", "features")
def test_to_stacked_array_to_unstacked_dataset(self):
# single dimension: regression test for GH4049
arr = xr.DataArray(np.arange(3), coords=[("x", [0, 1, 2])])
data = xr.Dataset({"a": arr, "b": arr})
stacked = data.to_stacked_array("y", sample_dims=["x"])
unstacked = stacked.to_unstacked_dataset("y")
assert_identical(unstacked, data)
# make a two dimensional dataset
a, b = create_test_stacked_array()
D = xr.Dataset({"a": a, "b": b})
sample_dims = ["x"]
y = D.to_stacked_array("features", sample_dims).transpose("x", "features")
x = y.to_unstacked_dataset("features")
assert_identical(D, x)
# test on just one sample
x0 = y[0].to_unstacked_dataset("features")
d0 = D.isel(x=0)
assert_identical(d0, x0)
def test_to_stacked_array_to_unstacked_dataset_different_dimension(self):
# test when variables have different dimensionality
a, b = create_test_stacked_array()
sample_dims = ["x"]
D = xr.Dataset({"a": a, "b": b.isel(y=0)})
y = D.to_stacked_array("features", sample_dims)
x = y.to_unstacked_dataset("features")
assert_identical(D, x)
def test_update(self):
data = create_test_data(seed=0)
expected = data.copy()
var2 = Variable("dim1", np.arange(8))
actual = data.update({"var2": var2})
expected["var2"] = var2
assert_identical(expected, actual)
actual = data.copy()
actual_result = actual.update(data)
assert actual_result is actual
assert_identical(expected, actual)
other = Dataset(attrs={"new": "attr"})
actual = data.copy()
actual.update(other)
assert_identical(expected, actual)
def test_update_overwrite_coords(self):
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update(Dataset(coords={"b": 4}))
expected = Dataset({"a": ("x", [1, 2])}, {"b": 4})
assert_identical(data, expected)
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update(Dataset({"c": 5}, coords={"b": 4}))
expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 4})
assert_identical(data, expected)
data = Dataset({"a": ("x", [1, 2])}, {"b": 3})
data.update({"c": DataArray(5, coords={"b": 4})})
expected = Dataset({"a": ("x", [1, 2]), "c": 5}, {"b": 3})
assert_identical(data, expected)
def test_update_auto_align(self):
ds = Dataset({"x": ("t", [3, 4])}, {"t": [0, 1]})
expected = Dataset({"x": ("t", [3, 4]), "y": ("t", [np.nan, 5])}, {"t": [0, 1]})
actual = ds.copy()
other = {"y": ("t", [5]), "t": [1]}
with pytest.raises(ValueError, match=r"conflicting sizes"):
actual.update(other)
actual.update(Dataset(other))
assert_identical(expected, actual)
actual = ds.copy()
other = Dataset({"y": ("t", [5]), "t": [100]})
actual.update(other)
expected = Dataset(
{"x": ("t", [3, 4]), "y": ("t", [np.nan] * 2)}, {"t": [0, 1]}
)
assert_identical(expected, actual)
def test_getitem(self):
data = create_test_data()
assert isinstance(data["var1"], DataArray)
assert_equal(data["var1"].variable, data.variables["var1"])
with pytest.raises(KeyError):
data["notfound"]
with pytest.raises(KeyError):
data[["var1", "notfound"]]
actual = data[["var1", "var2"]]
expected = Dataset({"var1": data["var1"], "var2": data["var2"]})
assert_equal(expected, actual)
actual = data["numbers"]
expected = DataArray(
data["numbers"].variable,
{"dim3": data["dim3"], "numbers": data["numbers"]},
dims="dim3",
name="numbers",
)
assert_identical(expected, actual)
actual = data[dict(dim1=0)]
expected = data.isel(dim1=0)
assert_identical(expected, actual)
def test_getitem_hashable(self):
data = create_test_data()
data[(3, 4)] = data["var1"] + 1
expected = data["var1"] + 1
expected.name = (3, 4)
assert_identical(expected, data[(3, 4)])
with pytest.raises(KeyError, match=r"('var1', 'var2')"):
data[("var1", "var2")]
def test_virtual_variables_default_coords(self):
dataset = Dataset({"foo": ("x", range(10))})
expected = DataArray(range(10), dims="x", name="x")
actual = dataset["x"]
assert_identical(expected, actual)
assert isinstance(actual.variable, IndexVariable)
actual = dataset[["x", "foo"]]
expected = dataset.assign_coords(x=range(10))
assert_identical(expected, actual)
def test_virtual_variables_time(self):
# access virtual variables
data = create_test_data()
expected = DataArray(
1 + np.arange(20), coords=[data["time"]], dims="time", name="dayofyear"
)
assert_array_equal(
data["time.month"].values, data.variables["time"].to_index().month
)
assert_array_equal(data["time.season"].values, "DJF")
# test virtual variable math
assert_array_equal(data["time.dayofyear"] + 1, 2 + np.arange(20))
assert_array_equal(np.sin(data["time.dayofyear"]), np.sin(1 + np.arange(20)))
# ensure they become coordinates
expected = Dataset({}, {"dayofyear": data["time.dayofyear"]})
actual = data[["time.dayofyear"]]
assert_equal(expected, actual)
# non-coordinate variables
ds = Dataset({"t": ("x", pd.date_range("2000-01-01", periods=3))})
assert (ds["t.year"] == 2000).all()
def test_virtual_variable_same_name(self):
# regression test for GH367
times = pd.date_range("2000-01-01", freq="H", periods=5)
data = Dataset({"time": times})
actual = data["time.time"]
expected = DataArray(times.time, [("time", times)], name="time")
assert_identical(actual, expected)
def test_virtual_variable_multiindex(self):
# access multi-index levels as virtual variables
data = create_test_multiindex()
expected = DataArray(
["a", "a", "b", "b"],
name="level_1",
coords=[data["x"].to_index()],
dims="x",
)
assert_identical(expected, data["level_1"])
# combine multi-index level and datetime
dr_index = pd.date_range("1/1/2011", periods=4, freq="H")
mindex = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b"], dr_index], names=("level_str", "level_date")
)
data = Dataset({}, {"x": mindex})
expected = DataArray(
mindex.get_level_values("level_date").hour,
name="hour",
coords=[mindex],
dims="x",
)
assert_identical(expected, data["level_date.hour"])
# attribute style access
assert_identical(data.level_str, data["level_str"])
def test_time_season(self):
ds = Dataset({"t": pd.date_range("2000-01-01", periods=12, freq="M")})
seas = ["DJF"] * 2 + ["MAM"] * 3 + ["JJA"] * 3 + ["SON"] * 3 + ["DJF"]
assert_array_equal(seas, ds["t.season"])
def test_slice_virtual_variable(self):
data = create_test_data()
assert_equal(
data["time.dayofyear"][:10].variable, Variable(["time"], 1 + np.arange(10))
)
assert_equal(data["time.dayofyear"][0].variable, Variable([], 1))
def test_setitem(self):
# assign a variable
var = Variable(["dim1"], np.random.randn(8))
data1 = create_test_data()
data1["A"] = var
data2 = data1.copy()
data2["A"] = var
assert_identical(data1, data2)
# assign a dataset array
dv = 2 * data2["A"]
data1["B"] = dv.variable
data2["B"] = dv
assert_identical(data1, data2)
# can't assign an ND array without dimensions
with pytest.raises(ValueError, match=r"without explicit dimension names"):
data2["C"] = var.values.reshape(2, 4)
data1["C"] = var.values
data2["C"] = ("C", var.values)
assert_identical(data1, data2)
data1["scalar"] = 0
data2["scalar"] = ([], 0)
assert_identical(data1, data2)
with pytest.raises(ValueError, match=r"already exists as a scalar"):
data1["newvar"] = ("scalar", [3, 4, 5])
# can't resize a used dimension
with pytest.raises(ValueError, match=r"arguments without labels"):
data1["dim1"] = data1["dim1"][:5]
data1["A"] = 3 * data2["A"]
assert_equal(data1["A"], 3 * data2["A"])
data3 = data1[["var1", "var2"]]
data3["var3"] = data3.var1.isel(dim1=0)
data4 = data3.copy()
err_msg = (
"can only set locations defined by dictionaries from Dataset.loc. Got: a"
)
with pytest.raises(TypeError, match=err_msg):
data1.loc["a"] = 0
err_msg = r"Variables \['A', 'B', 'scalar'\] in new values not available in original dataset:"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": 1}] = data1[{"dim2": 2}]
err_msg = "Variable 'var3': indexer {'dim2': 0} not available"
with pytest.raises(ValueError, match=err_msg):
data1[{"dim2": 0}] = 0.0
err_msg = "Variable 'var1': indexer {'dim2': 10} not available"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": 10}] = data3[{"dim2": 2}]
err_msg = "Variable 'var1': dimension 'dim2' appears in new values"
with pytest.raises(KeyError, match=err_msg):
data4[{"dim2": 2}] = data3[{"dim2": [2]}]
err_msg = (
"Variable 'var2': dimension order differs between original and new data"
)
data3["var2"] = data3["var2"].T
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3]}]
data3["var2"] = data3["var2"].T
err_msg = "indexes along dimension 'dim2' are not equal"
with pytest.raises(ValueError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3[{"dim2": [2, 3, 4]}]
err_msg = "Dataset assignment only accepts DataArrays, Datasets, and scalars."
with pytest.raises(TypeError, match=err_msg):
data4[{"dim2": [2, 3]}] = data3["var1"][{"dim2": [3, 4]}].values
data5 = data4.astype(str)
data5["var4"] = data4["var1"]
err_msg = "could not convert string to float: 'a'"
with pytest.raises(ValueError, match=err_msg):
data5[{"dim2": 1}] = "a"
data4[{"dim2": 0}] = 0.0
data4[{"dim2": 1}] = data3[{"dim2": 2}]
data4.loc[{"dim2": 1.5}] = 1.0
data4.loc[{"dim2": 2.0}] = data3.loc[{"dim2": 2.5}]
for v, dat3 in data3.items():
dat4 = data4[v]
assert_array_equal(dat4[{"dim2": 0}], 0.0)
assert_array_equal(dat4[{"dim2": 1}], dat3[{"dim2": 2}])
assert_array_equal(dat4.loc[{"dim2": 1.5}], 1.0)
assert_array_equal(dat4.loc[{"dim2": 2.0}], dat3.loc[{"dim2": 2.5}])
unchanged = [1.0, 2.5, 3.0, 3.5, 4.0]
assert_identical(
dat4.loc[{"dim2": unchanged}], dat3.loc[{"dim2": unchanged}]
)
def test_setitem_pandas(self):
ds = self.make_example_math_dataset()
ds["x"] = np.arange(3)
ds_copy = ds.copy()
ds_copy["bar"] = ds["bar"].to_pandas()
assert_equal(ds, ds_copy)
def test_setitem_auto_align(self):
ds = Dataset()
ds["x"] = ("y", range(3))
ds["y"] = 1 + np.arange(3)
expected = Dataset({"x": ("y", range(3)), "y": 1 + np.arange(3)})
assert_identical(ds, expected)
ds["y"] = DataArray(range(3), dims="y")
expected = Dataset({"x": ("y", range(3))}, {"y": range(3)})
assert_identical(ds, expected)
ds["x"] = DataArray([1, 2], coords=[("y", [0, 1])])
expected = Dataset({"x": ("y", [1, 2, np.nan])}, {"y": range(3)})
assert_identical(ds, expected)
ds["x"] = 42
expected = Dataset({"x": 42, "y": range(3)})
assert_identical(ds, expected)
ds["x"] = DataArray([4, 5, 6, 7], coords=[("y", [0, 1, 2, 3])])
expected = Dataset({"x": ("y", [4, 5, 6])}, {"y": range(3)})
assert_identical(ds, expected)
def test_setitem_dimension_override(self):
ds = xr.Dataset({"x": [0, 1, 2]})
ds["x"] = ds["x"][:2]
expected = Dataset({"x": [0, 1]})
assert_identical(ds, expected)
ds = xr.Dataset({"x": [0, 1, 2]})
ds["x"] = np.array([0, 1])
assert_identical(ds, expected)
ds = xr.Dataset({"x": [0, 1, 2]})
ds.coords["x"] = [0, 1]
assert_identical(ds, expected)
def test_setitem_with_coords(self):
ds = create_test_data()
other = DataArray(
np.arange(10), dims="dim3", coords={"numbers": ("dim3", np.arange(10))}
)
expected = ds.copy()
expected["var3"] = other.drop_vars("numbers")
actual = ds.copy()
actual["var3"] = other
assert_identical(expected, actual)
assert "numbers" in other.coords
other = ds["var3"].isel(dim3=slice(1, -1))
other["numbers"] = ("dim3", np.arange(8))
actual = ds.copy()
actual["var3"] = other
assert "numbers" in other.coords
expected = ds.copy()
expected["var3"] = ds["var3"].isel(dim3=slice(1, -1))
assert_identical(expected, actual)
other = ds["var3"].isel(dim3=slice(1, -1))
other["numbers"] = ("dim3", np.arange(8))
other["position"] = ("dim3", np.arange(8))
actual = ds.copy()
actual["var3"] = other
assert "position" in actual
assert "position" in other.coords
actual = ds.copy()
other = actual["numbers"]
other[0] = 10
actual["numbers"] = other
assert actual["numbers"][0] == 10
ds = Dataset(
{"var": ("x", [1, 2, 3])},
coords={"x": [0, 1, 2], "z1": ("x", [1, 2, 3]), "z2": ("x", [1, 2, 3])},
)
ds["var"] = ds["var"] * 2
assert np.allclose(ds["var"], [2, 4, 6])
def test_setitem_align_new_indexes(self):
ds = Dataset({"foo": ("x", [1, 2, 3])}, {"x": [0, 1, 2]})
ds["bar"] = DataArray([2, 3, 4], [("x", [1, 2, 3])])
expected = Dataset(
{"foo": ("x", [1, 2, 3]), "bar": ("x", [np.nan, 2, 3])}, {"x": [0, 1, 2]}
)
assert_identical(ds, expected)
@pytest.mark.parametrize("dtype", [str, bytes])
def test_setitem_str_dtype(self, dtype):
ds = xr.Dataset(coords={"x": np.array(["x", "y"], dtype=dtype)})
ds["foo"] = xr.DataArray(np.array([0, 0]), dims=["x"])
assert np.issubdtype(ds.x.dtype, dtype)
def test_setitem_using_list(self):
var1 = Variable(["dim1"], np.random.randn(8))
var2 = Variable(["dim1"], np.random.randn(8))
actual = create_test_data()
expected = actual.copy()
expected["A"] = var1
expected["B"] = var2
actual[["A", "B"]] = [var1, var2]
assert_identical(actual, expected)
dv = 2 * expected[["A", "B"]]
actual[["C", "D"]] = [d.variable for d in dv.data_vars.values()]
expected[["C", "D"]] = dv
assert_identical(actual, expected)
@pytest.mark.parametrize(
"var_list, data, error_regex",
[
(
["A", "B"],
[Variable(["dim1"], np.random.randn(8))],
r"Different lengths",
),
([], [Variable(["dim1"], np.random.randn(8))], r"Empty list of variables"),
(["A", "B"], xr.DataArray([1, 2]), r"assign single DataArray"),
],
)
def test_setitem_using_list_errors(self, var_list, data, error_regex):
actual = create_test_data()
with pytest.raises(ValueError, match=error_regex):
actual[var_list] = data
def test_assign(self):
ds = Dataset()
actual = ds.assign(x=[0, 1, 2], y=2)
expected = Dataset({"x": [0, 1, 2], "y": 2})
assert_identical(actual, expected)
assert list(actual.variables) == ["x", "y"]
assert_identical(ds, Dataset())
actual = actual.assign(y=lambda ds: ds.x ** 2)
expected = Dataset({"y": ("x", [0, 1, 4]), "x": [0, 1, 2]})
assert_identical(actual, expected)
actual = actual.assign_coords(z=2)
expected = Dataset({"y": ("x", [0, 1, 4])}, {"z": 2, "x": [0, 1, 2]})
assert_identical(actual, expected)
ds = Dataset({"a": ("x", range(3))}, {"b": ("x", ["A"] * 2 + ["B"])})
actual = ds.groupby("b").assign(c=lambda ds: 2 * ds.a)
expected = ds.merge({"c": ("x", [0, 2, 4])})
assert_identical(actual, expected)
actual = ds.groupby("b").assign(c=lambda ds: ds.a.sum())
expected = ds.merge({"c": ("x", [1, 1, 2])})
assert_identical(actual, expected)
actual = ds.groupby("b").assign_coords(c=lambda ds: ds.a.sum())
expected = expected.set_coords("c")
assert_identical(actual, expected)
def test_assign_coords(self):
ds = Dataset()
actual = ds.assign(x=[0, 1, 2], y=2)
actual = actual.assign_coords(x=list("abc"))
expected = Dataset({"x": list("abc"), "y": 2})
assert_identical(actual, expected)
actual = ds.assign(x=[0, 1, 2], y=[2, 3])
actual = actual.assign_coords({"y": [2.0, 3.0]})
expected = ds.assign(x=[0, 1, 2], y=[2.0, 3.0])
assert_identical(actual, expected)
def test_assign_attrs(self):
expected = Dataset(attrs=dict(a=1, b=2))
new = Dataset()
actual = new.assign_attrs(a=1, b=2)
assert_identical(actual, expected)
assert new.attrs == {}
expected.attrs["c"] = 3
new_actual = actual.assign_attrs({"c": 3})
assert_identical(new_actual, expected)
assert actual.attrs == dict(a=1, b=2)
def test_assign_multiindex_level(self):
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data.assign(level_1=range(4))
data.assign_coords(level_1=range(4))
with pytest.raises(ValueError):
data["y"] = ("level_1", [0, 1])
def test_merge_multiindex_level(self):
data = create_test_multiindex()
other = Dataset({"z": ("level_1", [0, 1])})
with pytest.raises(ValueError):
data.merge(other)
other = Dataset({"level_1": ("x", [0, 1])})
with pytest.raises(ValueError):
data.merge(other)
def test_setitem_original_non_unique_index(self):
original = Dataset({"data": ("x", np.arange(5))}, coords={"x": [0, 1, 2, 0, 1]})
expected = Dataset({"data": ("x", np.arange(5))}, {"x": range(5)})
actual = original.copy()
actual["x"] = list(range(5))
assert_identical(actual, expected)
actual = original.copy()
actual["x"] = ("x", list(range(5)))
assert_identical(actual, expected)
actual = original.copy()
actual.coords["x"] = list(range(5))
assert_identical(actual, expected)
def test_setitem_both_non_unique_index(self):
names = ["joaquin", "manolo", "joaquin"]
values = np.random.randint(0, 256, (3, 4, 4))
array = DataArray(
values, dims=["name", "row", "column"], coords=[names, range(4), range(4)]
)
expected = Dataset({"first": array, "second": array})
actual = array.rename("first").to_dataset()
actual["second"] = array
assert_identical(expected, actual)
def test_setitem_multiindex_level(self):
data = create_test_multiindex()
with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
data["level_1"] = range(4)
def test_delitem(self):
data = create_test_data()
all_items = set(data.variables)
assert set(data.variables) == all_items
del data["var1"]
assert set(data.variables) == all_items - {"var1"}
del data["numbers"]
assert set(data.variables) == all_items - {"var1", "numbers"}
assert "numbers" not in data.coords
expected = Dataset()
actual = Dataset({"y": ("x", [1, 2])})
del actual["y"]
assert_identical(expected, actual)
def test_squeeze(self):
data = Dataset({"foo": (["x", "y", "z"], [[[1], [2]]])})
for args in [[], [["x"]], [["x", "z"]]]:
def get_args(v):
return [set(args[0]) & set(v.dims)] if args else []
expected = Dataset(
{k: v.squeeze(*get_args(v)) for k, v in data.variables.items()}
)
expected = expected.set_coords(data.coords)
assert_identical(expected, data.squeeze(*args))
with pytest.raises(ValueError, match=r"cannot select a dimension"):
data.squeeze("y")
def test_squeeze_drop(self):
data = Dataset({"foo": ("x", [1])}, {"x": [0]})
expected = Dataset({"foo": 1})
selected = data.squeeze(drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": 1}, {"x": 0})
selected = data.squeeze(drop=False)
assert_identical(expected, selected)
data = Dataset({"foo": (("x", "y"), [[1]])}, {"x": [0], "y": [0]})
expected = Dataset({"foo": 1})
selected = data.squeeze(drop=True)
assert_identical(expected, selected)
expected = Dataset({"foo": ("x", [1])}, {"x": [0]})
selected = data.squeeze(dim="y", drop=True)
assert_identical(expected, selected)
data = Dataset({"foo": (("x",), [])}, {"x": []})
selected = data.squeeze(drop=True)
assert_identical(data, selected)
def test_groupby(self):
data = Dataset(
{"z": (["x", "y"], np.random.randn(3, 5))},
{"x": ("x", list("abc")), "c": ("x", [0, 1, 0]), "y": range(5)},
)
groupby = data.groupby("x")
assert len(groupby) == 3
expected_groups = {"a": 0, "b": 1, "c": 2}
assert groupby.groups == expected_groups
expected_items = [
("a", data.isel(x=0)),
("b", data.isel(x=1)),
("c", data.isel(x=2)),
]
for actual, expected in zip(groupby, expected_items):
assert actual[0] == expected[0]
assert_equal(actual[1], expected[1])
def identity(x):
return x
for k in ["x", "c", "y"]:
actual = data.groupby(k, squeeze=False).map(identity)
assert_equal(data, actual)
def test_groupby_returns_new_type(self):
data = Dataset({"z": (["x", "y"], np.random.randn(3, 5))})
actual = data.groupby("x").map(lambda ds: ds["z"])
expected = data["z"]
assert_identical(expected, actual)
actual = data["z"].groupby("x").map(lambda x: x.to_dataset())
expected = data
assert_identical(expected, actual)
def test_groupby_iter(self):
data = create_test_data()
for n, (t, sub) in enumerate(list(data.groupby("dim1"))[:3]):
assert data["dim1"][n] == t
assert_equal(data["var1"][n], sub["var1"])
assert_equal(data["var2"][n], sub["var2"])
assert_equal(data["var3"][:, n], sub["var3"])
def test_groupby_errors(self):
data = create_test_data()
with pytest.raises(TypeError, match=r"`group` must be"):
data.groupby(np.arange(10))
with pytest.raises(ValueError, match=r"length does not match"):
data.groupby(data["dim1"][:3])
with pytest.raises(TypeError, match=r"`group` must be"):
data.groupby(data.coords["dim1"].to_index())
def test_groupby_reduce(self):
data = Dataset(
{
"xy": (["x", "y"], np.random.randn(3, 4)),
"xonly": ("x", np.random.randn(3)),
"yonly": ("y", np.random.randn(4)),
"letters": ("y", ["a", "a", "b", "b"]),
}
)
expected = data.mean("y")
expected["yonly"] = expected["yonly"].variable.set_dims({"x": 3})
actual = data.groupby("x").mean(...)
assert_allclose(expected, actual)
actual = data.groupby("x").mean("y")
assert_allclose(expected, actual)
letters = data["letters"]
expected = Dataset(
{
"xy": data["xy"].groupby(letters).mean(...),
"xonly": (data["xonly"].mean().variable.set_dims({"letters": 2})),
"yonly": data["yonly"].groupby(letters).mean(),
}
)
actual = data.groupby("letters").mean(...)
assert_allclose(expected, actual)
def test_groupby_math(self):
def reorder_dims(x):
return x.transpose("dim1", "dim2", "dim3", "time")
ds = create_test_data()
ds["dim1"] = ds["dim1"]
for squeeze in [True, False]:
grouped = ds.groupby("dim1", squeeze=squeeze)
expected = reorder_dims(ds + ds.coords["dim1"])
actual = grouped + ds.coords["dim1"]
assert_identical(expected, reorder_dims(actual))
actual = ds.coords["dim1"] + grouped
assert_identical(expected, reorder_dims(actual))
ds2 = 2 * ds
expected = reorder_dims(ds + ds2)
actual = grouped + ds2
assert_identical(expected, reorder_dims(actual))
actual = ds2 + grouped
assert_identical(expected, reorder_dims(actual))
grouped = ds.groupby("numbers")
zeros = DataArray([0, 0, 0, 0], [("numbers", range(4))])
expected = (ds + Variable("dim3", np.zeros(10))).transpose(
"dim3", "dim1", "dim2", "time"
)
actual = grouped + zeros
assert_equal(expected, actual)
actual = zeros + grouped
assert_equal(expected, actual)
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
grouped + ds
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
ds + grouped
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + 1
with pytest.raises(TypeError, match=r"only support binary ops"):
grouped + grouped
with pytest.raises(TypeError, match=r"in-place operations"):
ds += grouped
ds = Dataset(
{
"x": ("time", np.arange(100)),
"time": pd.date_range("2000-01-01", periods=100),
}
)
with pytest.raises(ValueError, match=r"incompat.* grouped binary"):
ds + ds.groupby("time.month")
def test_groupby_math_virtual(self):
ds = Dataset(
{"x": ("t", [1, 2, 3])}, {"t": pd.date_range("20100101", periods=3)}
)
grouped = ds.groupby("t.day")
actual = grouped - grouped.mean(...)
expected = Dataset({"x": ("t", [0, 0, 0])}, ds[["t", "t.day"]])
assert_identical(actual, expected)
def test_groupby_nan(self):
ds = Dataset({"foo": ("x", [1, 2, 3, 4])}, {"bar": ("x", [1, 1, 2, np.nan])})
actual = ds.groupby("bar").mean(...)
expected = Dataset({"foo": ("bar", [1.5, 3]), "bar": [1, 2]})
assert_identical(actual, expected)
def test_groupby_order(self):
ds = Dataset()
for vn in ["a", "b", "c"]:
ds[vn] = DataArray(np.arange(10), dims=["t"])
data_vars_ref = list(ds.data_vars.keys())
ds = ds.groupby("t").mean(...)
data_vars = list(ds.data_vars.keys())
assert data_vars == data_vars_ref
def test_resample_and_first(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
actual = ds.resample(time="1D").first(keep_attrs=True)
expected = ds.isel(time=[0, 4, 8])
assert_identical(expected, actual)
expected_time = pd.date_range("2000-01-01", freq="3H", periods=19)
expected = ds.reindex(time=expected_time)
actual = ds.resample(time="3H")
for how in ["mean", "sum", "first", "last"]:
method = getattr(actual, how)
result = method()
assert_equal(expected, result)
for method in [np.mean]:
result = actual.reduce(method)
assert_equal(expected, result)
def test_resample_min_count(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds["foo"] = xr.where(ds["foo"] > 2.0, np.nan, ds["foo"])
actual = ds.resample(time="1D").sum(min_count=1)
expected = xr.concat(
[
ds.isel(time=slice(i * 4, (i + 1) * 4)).sum("time", min_count=1)
for i in range(3)
],
dim=actual["time"],
)
assert_equal(expected, actual)
def test_resample_by_mean_with_keep_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").mean(keep_attrs=True)
actual = resampled_ds["bar"].attrs
expected = ds["bar"].attrs
assert expected == actual
actual = resampled_ds.attrs
expected = ds.attrs
assert expected == actual
with pytest.warns(
UserWarning, match="Passing ``keep_attrs`` to ``resample`` has no effect."
):
ds.resample(time="1D", keep_attrs=True)
def test_resample_loffset(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
# ref https://github.com/pydata/xarray/pull/4537
actual = ds.resample(time="24H", loffset="-12H").mean().bar
expected_ = ds.bar.to_series().resample("24H").mean()
expected_.index += to_offset("-12H")
expected = DataArray.from_series(expected_)
assert_identical(actual, expected)
def test_resample_by_mean_discarding_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").mean(keep_attrs=False)
assert resampled_ds["bar"].attrs == {}
assert resampled_ds.attrs == {}
def test_resample_by_last_discarding_attrs(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
ds.attrs["dsmeta"] = "dsdata"
resampled_ds = ds.resample(time="1D").last(keep_attrs=False)
assert resampled_ds["bar"].attrs == {}
assert resampled_ds.attrs == {}
@requires_scipy
def test_resample_drop_nondim_coords(self):
xs = np.arange(6)
ys = np.arange(3)
times = pd.date_range("2000-01-01", freq="6H", periods=5)
data = np.tile(np.arange(5), (6, 3, 1))
xx, yy = np.meshgrid(xs * 5, ys * 2.5)
tt = np.arange(len(times), dtype=int)
array = DataArray(data, {"time": times, "x": xs, "y": ys}, ("x", "y", "time"))
xcoord = DataArray(xx.T, {"x": xs, "y": ys}, ("x", "y"))
ycoord = DataArray(yy.T, {"x": xs, "y": ys}, ("x", "y"))
tcoord = DataArray(tt, {"time": times}, ("time",))
ds = Dataset({"data": array, "xc": xcoord, "yc": ycoord, "tc": tcoord})
ds = ds.set_coords(["xc", "yc", "tc"])
# Re-sample
actual = ds.resample(time="12H").mean("time")
assert "tc" not in actual.coords
# Up-sample - filling
actual = ds.resample(time="1H").ffill()
assert "tc" not in actual.coords
# Up-sample - interpolation
actual = ds.resample(time="1H").interpolate("linear")
assert "tc" not in actual.coords
def test_resample_old_api(self):
times = pd.date_range("2000-01-01", freq="6H", periods=10)
ds = Dataset(
{
"foo": (["time", "x", "y"], np.random.randn(10, 5, 3)),
"bar": ("time", np.random.randn(10), {"meta": "data"}),
"time": times,
}
)
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", "time")
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", dim="time", how="mean")
with pytest.raises(TypeError, match=r"resample\(\) no longer supports"):
ds.resample("1D", dim="time")
def test_resample_ds_da_are_the_same(self):
time = pd.date_range("2000-01-01", freq="6H", periods=365 * 4)
ds = xr.Dataset(
{
"foo": (("time", "x"), np.random.randn(365 * 4, 5)),
"time": time,
"x": np.arange(5),
}
)
assert_identical(
ds.resample(time="M").mean()["foo"], ds.foo.resample(time="M").mean()
)
def test_ds_resample_apply_func_args(self):
def func(arg1, arg2, arg3=0.0):
return arg1.mean("time") + arg2 + arg3
times = pd.date_range("2000", freq="D", periods=3)
ds = xr.Dataset({"foo": ("time", [1.0, 1.0, 1.0]), "time": times})
expected = xr.Dataset({"foo": ("time", [3.0, 3.0, 3.0]), "time": times})
actual = ds.resample(time="D").map(func, args=(1.0,), arg3=1.0)
assert_identical(expected, actual)
def test_to_array(self):
ds = Dataset(
{"a": 1, "b": ("x", [1, 2, 3])},
coords={"c": 42},
attrs={"Conventions": "None"},
)
data = [[1, 1, 1], [1, 2, 3]]
coords = {"c": 42, "variable": ["a", "b"]}
dims = ("variable", "x")
expected = DataArray(data, coords, dims, attrs=ds.attrs)
actual = ds.to_array()
assert_identical(expected, actual)
actual = ds.to_array("abc", name="foo")
expected = expected.rename({"variable": "abc"}).rename("foo")
assert_identical(expected, actual)
def test_to_and_from_dataframe(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = pd.DataFrame(
np.array([x, y]).T, columns=["a", "b"], index=pd.Index(t, name="t")
)
actual = ds.to_dataframe()
# use the .equals method to check all DataFrame metadata
assert expected.equals(actual), (expected, actual)
# verify coords are included
actual = ds.set_coords("b").to_dataframe()
assert expected.equals(actual), (expected, actual)
# check roundtrip
assert_identical(ds, Dataset.from_dataframe(actual))
# test a case with a MultiIndex
w = np.random.randn(2, 3)
ds = Dataset({"w": (("x", "y"), w)})
ds["y"] = ("y", list("abc"))
exp_index = pd.MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1], ["a", "b", "c", "a", "b", "c"]], names=["x", "y"]
)
expected = pd.DataFrame(w.reshape(-1), columns=["w"], index=exp_index)
actual = ds.to_dataframe()
assert expected.equals(actual)
# check roundtrip
assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual))
# Check multiindex reordering
new_order = ["x", "y"]
actual = ds.to_dataframe(dim_order=new_order)
assert expected.equals(actual)
new_order = ["y", "x"]
exp_index = pd.MultiIndex.from_arrays(
[["a", "a", "b", "b", "c", "c"], [0, 1, 0, 1, 0, 1]], names=["y", "x"]
)
expected = pd.DataFrame(
w.transpose().reshape(-1), columns=["w"], index=exp_index
)
actual = ds.to_dataframe(dim_order=new_order)
assert expected.equals(actual)
invalid_order = ["x"]
with pytest.raises(
ValueError, match="does not match the set of dimensions of this"
):
ds.to_dataframe(dim_order=invalid_order)
invalid_order = ["x", "z"]
with pytest.raises(
ValueError, match="does not match the set of dimensions of this"
):
ds.to_dataframe(dim_order=invalid_order)
# check pathological cases
df = pd.DataFrame([1])
actual = Dataset.from_dataframe(df)
expected = Dataset({0: ("index", [1])}, {"index": [0]})
assert_identical(expected, actual)
df = pd.DataFrame()
actual = Dataset.from_dataframe(df)
expected = Dataset(coords={"index": []})
assert_identical(expected, actual)
# GH697
df = pd.DataFrame({"A": []})
actual = Dataset.from_dataframe(df)
expected = Dataset({"A": DataArray([], dims=("index",))}, {"index": []})
assert_identical(expected, actual)
# regression test for GH278
# use int64 to ensure consistent results for the pandas .equals method
# on windows (which requires the same dtype)
ds = Dataset({"x": pd.Index(["bar"]), "a": ("y", np.array([1], "int64"))}).isel(
x=0
)
# use .loc to ensure consistent results on Python 3
actual = ds.to_dataframe().loc[:, ["a", "x"]]
expected = pd.DataFrame(
[[1, "bar"]], index=pd.Index([0], name="y"), columns=["a", "x"]
)
assert expected.equals(actual), (expected, actual)
ds = Dataset({"x": np.array([0], "int64"), "y": np.array([1], "int64")})
actual = ds.to_dataframe()
idx = pd.MultiIndex.from_arrays([[0], [1]], names=["x", "y"])
expected = pd.DataFrame([[]], index=idx)
assert expected.equals(actual), (expected, actual)
def test_from_dataframe_categorical(self):
cat = pd.CategoricalDtype(
categories=["foo", "bar", "baz", "qux", "quux", "corge"]
)
i1 = pd.Series(["foo", "bar", "foo"], dtype=cat)
i2 = pd.Series(["bar", "bar", "baz"], dtype=cat)
df = pd.DataFrame({"i1": i1, "i2": i2, "values": [1, 2, 3]})
ds = df.set_index("i1").to_xarray()
assert len(ds["i1"]) == 3
ds = df.set_index(["i1", "i2"]).to_xarray()
assert len(ds["i1"]) == 2
assert len(ds["i2"]) == 2
@requires_sparse
def test_from_dataframe_sparse(self):
import sparse
df_base = pd.DataFrame(
{"x": range(10), "y": list("abcdefghij"), "z": np.arange(0, 100, 10)}
)
ds_sparse = Dataset.from_dataframe(df_base.set_index("x"), sparse=True)
ds_dense = Dataset.from_dataframe(df_base.set_index("x"), sparse=False)
assert isinstance(ds_sparse["y"].data, sparse.COO)
assert isinstance(ds_sparse["z"].data, sparse.COO)
ds_sparse["y"].data = ds_sparse["y"].data.todense()
ds_sparse["z"].data = ds_sparse["z"].data.todense()
assert_identical(ds_dense, ds_sparse)
ds_sparse = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=True)
ds_dense = Dataset.from_dataframe(df_base.set_index(["x", "y"]), sparse=False)
assert isinstance(ds_sparse["z"].data, sparse.COO)
ds_sparse["z"].data = ds_sparse["z"].data.todense()
assert_identical(ds_dense, ds_sparse)
def test_to_and_from_empty_dataframe(self):
# GH697
expected = pd.DataFrame({"foo": []})
ds = Dataset.from_dataframe(expected)
assert len(ds["foo"]) == 0
actual = ds.to_dataframe()
assert len(actual) == 0
assert expected.equals(actual)
def test_from_dataframe_multiindex(self):
index = pd.MultiIndex.from_product([["a", "b"], [1, 2, 3]], names=["x", "y"])
df = pd.DataFrame({"z": np.arange(6)}, index=index)
expected = Dataset(
{"z": (("x", "y"), [[0, 1, 2], [3, 4, 5]])},
coords={"x": ["a", "b"], "y": [1, 2, 3]},
)
actual = Dataset.from_dataframe(df)
assert_identical(actual, expected)
df2 = df.iloc[[3, 2, 1, 0, 4, 5], :]
actual = Dataset.from_dataframe(df2)
assert_identical(actual, expected)
df3 = df.iloc[:4, :]
expected3 = Dataset(
{"z": (("x", "y"), [[0, 1, 2], [3, np.nan, np.nan]])},
coords={"x": ["a", "b"], "y": [1, 2, 3]},
)
actual = Dataset.from_dataframe(df3)
assert_identical(actual, expected3)
df_nonunique = df.iloc[[0, 0], :]
with pytest.raises(ValueError, match=r"non-unique MultiIndex"):
Dataset.from_dataframe(df_nonunique)
def test_from_dataframe_unsorted_levels(self):
# regression test for GH-4186
index = pd.MultiIndex(
levels=[["b", "a"], ["foo"]], codes=[[0, 1], [0, 0]], names=["lev1", "lev2"]
)
df = pd.DataFrame({"c1": [0, 2], "c2": [1, 3]}, index=index)
expected = Dataset(
{
"c1": (("lev1", "lev2"), [[0], [2]]),
"c2": (("lev1", "lev2"), [[1], [3]]),
},
coords={"lev1": ["b", "a"], "lev2": ["foo"]},
)
actual = Dataset.from_dataframe(df)
assert_identical(actual, expected)
def test_from_dataframe_non_unique_columns(self):
# regression test for GH449
df = pd.DataFrame(np.zeros((2, 2)))
df.columns = ["foo", "foo"]
with pytest.raises(ValueError, match=r"non-unique columns"):
Dataset.from_dataframe(df)
def test_convert_dataframe_with_many_types_and_multiindex(self):
# regression test for GH737
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
}
)
df.index = pd.MultiIndex.from_product([["a"], range(3)], names=["one", "two"])
roundtripped = Dataset.from_dataframe(df).to_dataframe()
# we can't do perfectly, but we should be at least as faithful as
expected = df.apply(np.asarray)
assert roundtripped.equals(expected)
def test_to_and_from_dict(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
ds = Dataset({"a": ("t", x), "b": ("t", y), "t": ("t", t)})
expected = {
"coords": {"t": {"dims": ("t",), "data": t, "attrs": {}}},
"attrs": {},
"dims": {"t": 10},
"data_vars": {
"a": {"dims": ("t",), "data": x.tolist(), "attrs": {}},
"b": {"dims": ("t",), "data": y.tolist(), "attrs": {}},
},
}
actual = ds.to_dict()
assert expected == actual
assert_identical(ds, Dataset.from_dict(actual))
expected_no_data = expected.copy()
del expected_no_data["coords"]["t"]["data"]
del expected_no_data["data_vars"]["a"]["data"]
del expected_no_data["data_vars"]["b"]["data"]
endiantype = "<U1" if sys.byteorder == "little" else ">U1"
expected_no_data["coords"]["t"].update({"dtype": endiantype, "shape": (10,)})
expected_no_data["data_vars"]["a"].update({"dtype": "float64", "shape": (10,)})
expected_no_data["data_vars"]["b"].update({"dtype": "float64", "shape": (10,)})
actual_no_data = ds.to_dict(data=False)
assert expected_no_data == actual_no_data
expected_ds = ds.set_coords("b")
actual = Dataset.from_dict(expected_ds.to_dict())
assert_identical(expected_ds, actual)
d = {
"coords": {"t": {"dims": "t", "data": t}},
"dims": "t",
"data_vars": {"a": {"dims": "t", "data": x}, "b": {"dims": "t", "data": y}},
}
assert_identical(ds, Dataset.from_dict(d))
d = {
"a": {"dims": "t", "data": x},
"t": {"data": t, "dims": "t"},
"b": {"dims": "t", "data": y},
}
assert_identical(ds, Dataset.from_dict(d))
d = {
"a": {"data": x},
"t": {"data": t, "dims": "t"},
"b": {"dims": "t", "data": y},
}
with pytest.raises(
ValueError, match=r"cannot convert dict without the key 'dims'"
):
Dataset.from_dict(d)
def test_to_and_from_dict_with_time_dim(self):
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
t = pd.date_range("20130101", periods=10)
lat = [77.7, 83.2, 76]
ds = Dataset(
{
"a": (["t", "lat"], x),
"b": (["t", "lat"], y),
"t": ("t", t),
"lat": ("lat", lat),
}
)
roundtripped = Dataset.from_dict(ds.to_dict())
assert_identical(ds, roundtripped)
def test_to_and_from_dict_with_nan_nat(self):
x = np.random.randn(10, 3)
y = np.random.randn(10, 3)
y[2] = np.nan
t = pd.Series(pd.date_range("20130101", periods=10))
t[2] = np.nan
lat = [77.7, 83.2, 76]
ds = Dataset(
{
"a": (["t", "lat"], x),
"b": (["t", "lat"], y),
"t": ("t", t),
"lat": ("lat", lat),
}
)
roundtripped = Dataset.from_dict(ds.to_dict())
assert_identical(ds, roundtripped)
def test_to_dict_with_numpy_attrs(self):
x = np.random.randn(10)
y = np.random.randn(10)
t = list("abcdefghij")
attrs = {
"created": np.float64(1998),
"coords": np.array([37, -110.1, 100]),
"maintainer": "bar",
}
ds = Dataset({"a": ("t", x, attrs), "b": ("t", y, attrs), "t": ("t", t)})
expected_attrs = {
"created": attrs["created"].item(),
"coords": attrs["coords"].tolist(),
"maintainer": "bar",
}
actual = ds.to_dict()
# check that they are identical
assert expected_attrs == actual["data_vars"]["a"]["attrs"]
def test_pickle(self):
data = create_test_data()
roundtripped = pickle.loads(pickle.dumps(data))
assert_identical(data, roundtripped)
# regression test for #167:
assert data.dims == roundtripped.dims
def test_lazy_load(self):
store = InaccessibleVariableDataStore()
create_test_data().dump_to_store(store)
for decode_cf in [True, False]:
ds = open_dataset(store, decode_cf=decode_cf)
with pytest.raises(UnexpectedDataAccess):
ds.load()
with pytest.raises(UnexpectedDataAccess):
ds["var1"].values
# these should not raise UnexpectedDataAccess:
ds.isel(time=10)
ds.isel(time=slice(10), dim1=[0]).isel(dim1=0, dim2=-1)
def test_dropna(self):
x = np.random.randn(4, 4)
x[::2, 0] = np.nan
y = np.random.randn(4)
y[-1] = np.nan
ds = Dataset({"foo": (("a", "b"), x), "bar": (("b", y))})
expected = ds.isel(a=slice(1, None, 2))
actual = ds.dropna("a")
assert_identical(actual, expected)
expected = ds.isel(b=slice(1, 3))
actual = ds.dropna("b")
assert_identical(actual, expected)
actual = ds.dropna("b", subset=["foo", "bar"])
assert_identical(actual, expected)
expected = ds.isel(b=slice(1, None))
actual = ds.dropna("b", subset=["foo"])
assert_identical(actual, expected)
expected = ds.isel(b=slice(3))
actual = ds.dropna("b", subset=["bar"])
assert_identical(actual, expected)
actual = ds.dropna("a", subset=[])
assert_identical(actual, ds)
actual = ds.dropna("a", subset=["bar"])
assert_identical(actual, ds)
actual = ds.dropna("a", how="all")
assert_identical(actual, ds)
actual = ds.dropna("b", how="all", subset=["bar"])
expected = ds.isel(b=[0, 1, 2])
assert_identical(actual, expected)
actual = ds.dropna("b", thresh=1, subset=["bar"])
assert_identical(actual, expected)
actual = ds.dropna("b", thresh=2)
assert_identical(actual, ds)
actual = ds.dropna("b", thresh=4)
expected = ds.isel(b=[1, 2, 3])
assert_identical(actual, expected)
actual = ds.dropna("a", thresh=3)
expected = ds.isel(a=[1, 3])
assert_identical(actual, ds)
with pytest.raises(ValueError, match=r"a single dataset dimension"):
ds.dropna("foo")
with pytest.raises(ValueError, match=r"invalid how"):
ds.dropna("a", how="somehow")
with pytest.raises(TypeError, match=r"must specify how or thresh"):
ds.dropna("a", how=None)
def test_fillna(self):
ds = Dataset({"a": ("x", [np.nan, 1, np.nan, 3])}, {"x": [0, 1, 2, 3]})
# fill with -1
actual = ds.fillna(-1)
expected = Dataset({"a": ("x", [-1, 1, -1, 3])}, {"x": [0, 1, 2, 3]})
assert_identical(expected, actual)
actual = ds.fillna({"a": -1})
assert_identical(expected, actual)
other = Dataset({"a": -1})
actual = ds.fillna(other)
assert_identical(expected, actual)
actual = ds.fillna({"a": other.a})
assert_identical(expected, actual)
# fill with range(4)
b = DataArray(range(4), coords=[("x", range(4))])
actual = ds.fillna(b)
expected = b.rename("a").to_dataset()
assert_identical(expected, actual)
actual = ds.fillna(expected)
assert_identical(expected, actual)
actual = ds.fillna(range(4))
assert_identical(expected, actual)
actual = ds.fillna(b[:3])
assert_identical(expected, actual)
# okay to only include some data variables
ds["b"] = np.nan
actual = ds.fillna({"a": -1})
expected = Dataset(
{"a": ("x", [-1, 1, -1, 3]), "b": np.nan}, {"x": [0, 1, 2, 3]}
)
assert_identical(expected, actual)
# but new data variables is not okay
with pytest.raises(ValueError, match=r"must be contained"):
ds.fillna({"x": 0})
# empty argument should be OK
result = ds.fillna({})
assert_identical(ds, result)
result = ds.fillna(Dataset(coords={"c": 42}))
expected = ds.assign_coords(c=42)
assert_identical(expected, result)
# groupby
expected = Dataset({"a": ("x", range(4))}, {"x": [0, 1, 2, 3]})
for target in [ds, expected]:
target.coords["b"] = ("x", [0, 0, 1, 1])
actual = ds.groupby("b").fillna(DataArray([0, 2], dims="b"))
assert_identical(expected, actual)
actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])}))
assert_identical(expected, actual)
# attrs with groupby
ds.attrs["attr"] = "ds"
ds.a.attrs["attr"] = "da"
actual = ds.groupby("b").fillna(Dataset({"a": ("b", [0, 2])}))
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
da = DataArray(range(5), name="a", attrs={"attr": "da"})
actual = da.fillna(1)
assert actual.name == "a"
assert actual.attrs == da.attrs
ds = Dataset({"a": da}, attrs={"attr": "ds"})
actual = ds.fillna({"a": 1})
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
@pytest.mark.parametrize(
"func", [lambda x: x.clip(0, 1), lambda x: np.float64(1.0) * x, np.abs, abs]
)
def test_propagate_attrs(self, func):
da = DataArray(range(5), name="a", attrs={"attr": "da"})
ds = Dataset({"a": da}, attrs={"attr": "ds"})
# test defaults
assert func(ds).attrs == ds.attrs
with set_options(keep_attrs=False):
assert func(ds).attrs != ds.attrs
assert func(ds).a.attrs != ds.a.attrs
with set_options(keep_attrs=False):
assert func(ds).attrs != ds.attrs
assert func(ds).a.attrs != ds.a.attrs
with set_options(keep_attrs=True):
assert func(ds).attrs == ds.attrs
assert func(ds).a.attrs == ds.a.attrs
def test_where(self):
ds = Dataset({"a": ("x", range(5))})
expected = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(ds > 1)
assert_identical(expected, actual)
actual = ds.where(ds.a > 1)
assert_identical(expected, actual)
actual = ds.where(ds.a.values > 1)
assert_identical(expected, actual)
actual = ds.where(True)
assert_identical(ds, actual)
expected = ds.copy(deep=True)
expected["a"].values = [np.nan] * 5
actual = ds.where(False)
assert_identical(expected, actual)
# 2d
ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])})
expected = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0)
assert_identical(expected, actual)
# groupby
ds = Dataset({"a": ("x", range(5))}, {"c": ("x", [0, 0, 1, 1, 1])})
cond = Dataset({"a": ("c", [True, False])})
expected = ds.copy(deep=True)
expected["a"].values = [0, 1] + [np.nan] * 3
actual = ds.groupby("c").where(cond)
assert_identical(expected, actual)
# attrs with groupby
ds.attrs["attr"] = "ds"
ds.a.attrs["attr"] = "da"
actual = ds.groupby("c").where(cond)
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
# attrs
da = DataArray(range(5), name="a", attrs={"attr": "da"})
actual = da.where(da.values > 1)
assert actual.name == "a"
assert actual.attrs == da.attrs
ds = Dataset({"a": da}, attrs={"attr": "ds"})
actual = ds.where(ds > 0)
assert actual.attrs == ds.attrs
assert actual.a.name == "a"
assert actual.a.attrs == ds.a.attrs
# lambda
ds = Dataset({"a": ("x", range(5))})
expected = Dataset({"a": ("x", [np.nan, np.nan, 2, 3, 4])})
actual = ds.where(lambda x: x > 1)
assert_identical(expected, actual)
def test_where_other(self):
ds = Dataset({"a": ("x", range(5))}, {"x": range(5)})
expected = Dataset({"a": ("x", [-1, -1, 2, 3, 4])}, {"x": range(5)})
actual = ds.where(ds > 1, -1)
assert_equal(expected, actual)
assert actual.a.dtype == int
actual = ds.where(lambda x: x > 1, -1)
assert_equal(expected, actual)
with pytest.raises(ValueError, match=r"cannot set"):
ds.where(ds > 1, other=0, drop=True)
with pytest.raises(ValueError, match=r"indexes .* are not equal"):
ds.where(ds > 1, ds.isel(x=slice(3)))
with pytest.raises(ValueError, match=r"exact match required"):
ds.where(ds > 1, ds.assign(b=2))
def test_where_drop(self):
# if drop=True
# 1d
# data array case
array = DataArray(range(5), coords=[range(5)], dims=["x"])
expected = DataArray(range(5)[2:], coords=[range(5)[2:]], dims=["x"])
actual = array.where(array > 1, drop=True)
assert_identical(expected, actual)
# dataset case
ds = Dataset({"a": array})
expected = Dataset({"a": expected})
actual = ds.where(ds > 1, drop=True)
assert_identical(expected, actual)
actual = ds.where(ds.a > 1, drop=True)
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"must be a"):
ds.where(np.arange(5) > 1, drop=True)
# 1d with odd coordinates
array = DataArray(
np.array([2, 7, 1, 8, 3]), coords=[np.array([3, 1, 4, 5, 9])], dims=["x"]
)
expected = DataArray(
np.array([7, 8, 3]), coords=[np.array([1, 5, 9])], dims=["x"]
)
actual = array.where(array > 2, drop=True)
assert_identical(expected, actual)
# 1d multiple variables
ds = Dataset({"a": (("x"), [0, 1, 2, 3]), "b": (("x"), [4, 5, 6, 7])})
expected = Dataset(
{"a": (("x"), [np.nan, 1, 2, 3]), "b": (("x"), [4, 5, 6, np.nan])}
)
actual = ds.where((ds > 0) & (ds < 7), drop=True)
assert_identical(expected, actual)
# 2d
ds = Dataset({"a": (("x", "y"), [[0, 1], [2, 3]])})
expected = Dataset({"a": (("x", "y"), [[np.nan, 1], [2, 3]])})
actual = ds.where(ds > 0, drop=True)
assert_identical(expected, actual)
# 2d with odd coordinates
ds = Dataset(
{"a": (("x", "y"), [[0, 1], [2, 3]])},
coords={
"x": [4, 3],
"y": [1, 2],
"z": (["x", "y"], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]]),
},
)
expected = Dataset(
{"a": (("x", "y"), [[3]])},
coords={"x": [3], "y": [2], "z": (["x", "y"], [[np.pi * 3]])},
)
actual = ds.where(ds > 2, drop=True)
assert_identical(expected, actual)
# 2d multiple variables
ds = Dataset(
{"a": (("x", "y"), [[0, 1], [2, 3]]), "b": (("x", "y"), [[4, 5], [6, 7]])}
)
expected = Dataset(
{
"a": (("x", "y"), [[np.nan, 1], [2, 3]]),
"b": (("x", "y"), [[4, 5], [6, 7]]),
}
)
actual = ds.where(ds > 0, drop=True)
assert_identical(expected, actual)
def test_where_drop_empty(self):
# regression test for GH1341
array = DataArray(np.random.rand(100, 10), dims=["nCells", "nVertLevels"])
mask = DataArray(np.zeros((100,), dtype="bool"), dims="nCells")
actual = array.where(mask, drop=True)
expected = DataArray(np.zeros((0, 10)), dims=["nCells", "nVertLevels"])
assert_identical(expected, actual)
def test_where_drop_no_indexes(self):
ds = Dataset({"foo": ("x", [0.0, 1.0])})
expected = Dataset({"foo": ("x", [1.0])})
actual = ds.where(ds == 1, drop=True)
assert_identical(expected, actual)
def test_reduce(self):
data = create_test_data()
assert len(data.mean().coords) == 0
actual = data.max()
expected = Dataset({k: v.max() for k, v in data.data_vars.items()})
assert_equal(expected, actual)
assert_equal(data.min(dim=["dim1"]), data.min(dim="dim1"))
for reduct, expected in [
("dim2", ["dim3", "time", "dim1"]),
(["dim2", "time"], ["dim3", "dim1"]),
(("dim2", "time"), ["dim3", "dim1"]),
((), ["dim2", "dim3", "time", "dim1"]),
]:
actual = list(data.min(dim=reduct).dims)
assert actual == expected
assert_equal(data.mean(dim=[]), data)
with pytest.raises(ValueError):
data.mean(axis=0)
def test_reduce_coords(self):
# regression test for GH1470
data = xr.Dataset({"a": ("x", [1, 2, 3])}, coords={"b": 4})
expected = xr.Dataset({"a": 2}, coords={"b": 4})
actual = data.mean("x")
assert_identical(actual, expected)
# should be consistent
actual = data["a"].mean("x").to_dataset()
assert_identical(actual, expected)
def test_mean_uint_dtype(self):
data = xr.Dataset(
{
"a": (("x", "y"), np.arange(6).reshape(3, 2).astype("uint")),
"b": (("x",), np.array([0.1, 0.2, np.nan])),
}
)
actual = data.mean("x", skipna=True)
expected = xr.Dataset(
{"a": data["a"].mean("x"), "b": data["b"].mean("x", skipna=True)}
)
assert_identical(actual, expected)
def test_reduce_bad_dim(self):
data = create_test_data()
with pytest.raises(ValueError, match=r"Dataset does not contain"):
data.mean(dim="bad_dim")
def test_reduce_cumsum(self):
data = xr.Dataset(
{"a": 1, "b": ("x", [1, 2]), "c": (("x", "y"), [[np.nan, 3], [0, 4]])}
)
assert_identical(data.fillna(0), data.cumsum("y"))
expected = xr.Dataset(
{"a": 1, "b": ("x", [1, 3]), "c": (("x", "y"), [[0, 3], [0, 7]])}
)
assert_identical(expected, data.cumsum())
@pytest.mark.parametrize(
"reduct, expected",
[
("dim1", ["dim2", "dim3", "time", "dim1"]),
("dim2", ["dim3", "time", "dim1", "dim2"]),
("dim3", ["dim2", "time", "dim1", "dim3"]),
("time", ["dim2", "dim3", "dim1"]),
],
)
@pytest.mark.parametrize("func", ["cumsum", "cumprod"])
def test_reduce_cumsum_test_dims(self, reduct, expected, func):
data = create_test_data()
with pytest.raises(ValueError, match=r"Dataset does not contain"):
getattr(data, func)(dim="bad_dim")
# ensure dimensions are correct
actual = getattr(data, func)(dim=reduct).dims
assert list(actual) == expected
def test_reduce_non_numeric(self):
data1 = create_test_data(seed=44)
data2 = create_test_data(seed=44)
add_vars = {"var4": ["dim1", "dim2"]}
for v, dims in sorted(add_vars.items()):
size = tuple(data1.dims[d] for d in dims)
data = np.random.randint(0, 100, size=size).astype(np.str_)
data1[v] = (dims, data, {"foo": "variable"})
assert "var4" not in data1.mean()
assert_equal(data1.mean(), data2.mean())
assert_equal(data1.mean(dim="dim1"), data2.mean(dim="dim1"))
@pytest.mark.filterwarnings(
"ignore:Once the behaviour of DataArray:DeprecationWarning"
)
def test_reduce_strings(self):
expected = Dataset({"x": "a"})
ds = Dataset({"x": ("y", ["a", "b"])})
ds.coords["y"] = [-10, 10]
actual = ds.min()
assert_identical(expected, actual)
expected = Dataset({"x": "b"})
actual = ds.max()
assert_identical(expected, actual)
expected = Dataset({"x": 0})
actual = ds.argmin()
assert_identical(expected, actual)
expected = Dataset({"x": 1})
actual = ds.argmax()
assert_identical(expected, actual)
expected = Dataset({"x": -10})
actual = ds.idxmin()
assert_identical(expected, actual)
expected = Dataset({"x": 10})
actual = ds.idxmax()
assert_identical(expected, actual)
expected = Dataset({"x": b"a"})
ds = Dataset({"x": ("y", np.array(["a", "b"], "S1"))})
actual = ds.min()
assert_identical(expected, actual)
expected = Dataset({"x": "a"})
ds = Dataset({"x": ("y", np.array(["a", "b"], "U1"))})
actual = ds.min()
assert_identical(expected, actual)
def test_reduce_dtypes(self):
# regression test for GH342
expected = Dataset({"x": 1})
actual = Dataset({"x": True}).sum()
assert_identical(expected, actual)
# regression test for GH505
expected = Dataset({"x": 3})
actual = Dataset({"x": ("y", np.array([1, 2], "uint16"))}).sum()
assert_identical(expected, actual)
expected = Dataset({"x": 1 + 1j})
actual = Dataset({"x": ("y", [1, 1j])}).sum()
assert_identical(expected, actual)
def test_reduce_keep_attrs(self):
data = create_test_data()
_attrs = {"attr1": "value1", "attr2": 2929}
attrs = dict(_attrs)
data.attrs = attrs
# Test dropped attrs
ds = data.mean()
assert ds.attrs == {}
for v in ds.data_vars.values():
assert v.attrs == {}
# Test kept attrs
ds = data.mean(keep_attrs=True)
assert ds.attrs == attrs
for k, v in ds.data_vars.items():
assert v.attrs == data[k].attrs
@pytest.mark.filterwarnings(
"ignore:Once the behaviour of DataArray:DeprecationWarning"
)
def test_reduce_argmin(self):
# regression test for #205
ds = Dataset({"a": ("x", [0, 1])})
expected = Dataset({"a": ([], 0)})
actual = ds.argmin()
assert_identical(expected, actual)
actual = ds.argmin("x")
assert_identical(expected, actual)
def test_reduce_scalars(self):
ds = Dataset({"x": ("a", [2, 2]), "y": 2, "z": ("b", [2])})
expected = Dataset({"x": 0, "y": 0, "z": 0})
actual = ds.var()
assert_identical(expected, actual)
expected = Dataset({"x": 0, "y": 0, "z": ("b", [0])})
actual = ds.var("a")
assert_identical(expected, actual)
def test_reduce_only_one_axis(self):
def mean_only_one_axis(x, axis):
if not isinstance(axis, integer_types):
raise TypeError("non-integer axis")
return x.mean(axis)
ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])})
expected = Dataset({"a": ("x", [2])})
actual = ds.reduce(mean_only_one_axis, "y")
assert_identical(expected, actual)
with pytest.raises(
TypeError, match=r"missing 1 required positional argument: 'axis'"
):
ds.reduce(mean_only_one_axis)
def test_reduce_no_axis(self):
def total_sum(x):
return np.sum(x.flatten())
ds = Dataset({"a": (["x", "y"], [[0, 1, 2, 3, 4]])})
expected = Dataset({"a": ((), 10)})
actual = ds.reduce(total_sum)
assert_identical(expected, actual)
with pytest.raises(TypeError, match=r"unexpected keyword argument 'axis'"):
ds.reduce(total_sum, dim="x")
def test_reduce_keepdims(self):
ds = Dataset(
{"a": (["x", "y"], [[0, 1, 2, 3, 4]])},
coords={
"y": [0, 1, 2, 3, 4],
"x": [0],
"lat": (["x", "y"], [[0, 1, 2, 3, 4]]),
"c": -999.0,
},
)
# Shape should match behaviour of numpy reductions with keepdims=True
# Coordinates involved in the reduction should be removed
actual = ds.mean(keepdims=True)
expected = Dataset(
{"a": (["x", "y"], np.mean(ds.a, keepdims=True).data)}, coords={"c": ds.c}
)
assert_identical(expected, actual)
actual = ds.mean("x", keepdims=True)
expected = Dataset(
{"a": (["x", "y"], np.mean(ds.a, axis=0, keepdims=True).data)},
coords={"y": ds.y, "c": ds.c},
)
assert_identical(expected, actual)
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]])
def test_quantile(self, q, skipna):
ds = create_test_data(seed=123)
for dim in [None, "dim1", ["dim1"]]:
ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)
if is_scalar(q):
assert "quantile" not in ds_quantile.dims
else:
assert "quantile" in ds_quantile.dims
for var, dar in ds.data_vars.items():
assert var in ds_quantile
assert_identical(
ds_quantile[var], dar.quantile(q, dim=dim, skipna=skipna)
)
dim = ["dim1", "dim2"]
ds_quantile = ds.quantile(q, dim=dim, skipna=skipna)
assert "dim3" in ds_quantile.dims
assert all(d not in ds_quantile.dims for d in dim)
@pytest.mark.parametrize("skipna", [True, False])
def test_quantile_skipna(self, skipna):
q = 0.1
dim = "time"
ds = Dataset({"a": ([dim], np.arange(0, 11))})
ds = ds.where(ds >= 1)
result = ds.quantile(q=q, dim=dim, skipna=skipna)
value = 1.9 if skipna else np.nan
expected = Dataset({"a": value}, coords={"quantile": q})
assert_identical(result, expected)
@requires_bottleneck
def test_rank(self):
ds = create_test_data(seed=1234)
# only ds.var3 depends on dim3
z = ds.rank("dim3")
assert ["var3"] == list(z.data_vars)
# same as dataarray version
x = z.var3
y = ds.var3.rank("dim3")
assert_equal(x, y)
# coordinates stick
assert list(z.coords) == list(ds.coords)
assert list(x.coords) == list(y.coords)
# invalid dim
with pytest.raises(ValueError, match=r"does not contain"):
x.rank("invalid_dim")
def test_count(self):
ds = Dataset({"x": ("a", [np.nan, 1]), "y": 0, "z": np.nan})
expected = Dataset({"x": 1, "y": 1, "z": 0})
actual = ds.count()
assert_identical(expected, actual)
def test_map(self):
data = create_test_data()
data.attrs["foo"] = "bar"
assert_identical(data.map(np.mean), data.mean())
expected = data.mean(keep_attrs=True)
actual = data.map(lambda x: x.mean(keep_attrs=True), keep_attrs=True)
assert_identical(expected, actual)
assert_identical(data.map(lambda x: x, keep_attrs=True), data.drop_vars("time"))
def scale(x, multiple=1):
return multiple * x
actual = data.map(scale, multiple=2)
assert_equal(actual["var1"], 2 * data["var1"])
assert_identical(actual["numbers"], data["numbers"])
actual = data.map(np.asarray)
expected = data.drop_vars("time") # time is not used on a data var
assert_equal(expected, actual)
def test_apply_pending_deprecated_map(self):
data = create_test_data()
data.attrs["foo"] = "bar"
with pytest.warns(PendingDeprecationWarning):
assert_identical(data.apply(np.mean), data.mean())
def make_example_math_dataset(self):
variables = {
"bar": ("x", np.arange(100, 400, 100)),
"foo": (("x", "y"), 1.0 * np.arange(12).reshape(3, 4)),
}
coords = {"abc": ("x", ["a", "b", "c"]), "y": 10 * np.arange(4)}
ds = Dataset(variables, coords)
ds["foo"][0, 0] = np.nan
return ds
def test_dataset_number_math(self):
ds = self.make_example_math_dataset()
assert_identical(ds, +ds)
assert_identical(ds, ds + 0)
assert_identical(ds, 0 + ds)
assert_identical(ds, ds + np.array(0))
assert_identical(ds, np.array(0) + ds)
actual = ds.copy(deep=True)
actual += 0
assert_identical(ds, actual)
def test_unary_ops(self):
ds = self.make_example_math_dataset()
assert_identical(ds.map(abs), abs(ds))
assert_identical(ds.map(lambda x: x + 4), ds + 4)
for func in [
lambda x: x.isnull(),
lambda x: x.round(),
lambda x: x.astype(int),
]:
assert_identical(ds.map(func), func(ds))
assert_identical(ds.isnull(), ~ds.notnull())
# don't actually patch these methods in
with pytest.raises(AttributeError):
ds.item
with pytest.raises(AttributeError):
ds.searchsorted
def test_dataset_array_math(self):
ds = self.make_example_math_dataset()
expected = ds.map(lambda x: x - ds["foo"])
assert_identical(expected, ds - ds["foo"])
assert_identical(expected, -ds["foo"] + ds)
assert_identical(expected, ds - ds["foo"].variable)
assert_identical(expected, -ds["foo"].variable + ds)
actual = ds.copy(deep=True)
actual -= ds["foo"]
assert_identical(expected, actual)
expected = ds.map(lambda x: x + ds["bar"])
assert_identical(expected, ds + ds["bar"])
actual = ds.copy(deep=True)
actual += ds["bar"]
assert_identical(expected, actual)
expected = Dataset({"bar": ds["bar"] + np.arange(3)})
assert_identical(expected, ds[["bar"]] + np.arange(3))
assert_identical(expected, np.arange(3) + ds[["bar"]])
def test_dataset_dataset_math(self):
ds = self.make_example_math_dataset()
assert_identical(ds, ds + 0 * ds)
assert_identical(ds, ds + {"foo": 0, "bar": 0})
expected = ds.map(lambda x: 2 * x)
assert_identical(expected, 2 * ds)
assert_identical(expected, ds + ds)
assert_identical(expected, ds + ds.data_vars)
assert_identical(expected, ds + dict(ds.data_vars))
actual = ds.copy(deep=True)
expected_id = id(actual)
actual += ds
assert_identical(expected, actual)
assert expected_id == id(actual)
assert_identical(ds == ds, ds.notnull())
subsampled = ds.isel(y=slice(2))
expected = 2 * subsampled
assert_identical(expected, subsampled + ds)
assert_identical(expected, ds + subsampled)
def test_dataset_math_auto_align(self):
ds = self.make_example_math_dataset()
subset = ds.isel(y=[1, 3])
expected = 2 * subset
actual = ds + subset
assert_identical(expected, actual)
actual = ds.isel(y=slice(1)) + ds.isel(y=slice(1, None))
expected = 2 * ds.drop_sel(y=ds.y)
assert_equal(actual, expected)
actual = ds + ds[["bar"]]
expected = (2 * ds[["bar"]]).merge(ds.coords)
assert_identical(expected, actual)
assert_identical(ds + Dataset(), ds.coords.to_dataset())
assert_identical(Dataset() + Dataset(), Dataset())
ds2 = Dataset(coords={"bar": 42})
assert_identical(ds + ds2, ds.coords.merge(ds2))
assert_identical(Dataset() + 1, Dataset())
actual = ds.copy(deep=True)
other = ds.isel(y=slice(2))
actual += other
expected = ds + other.reindex_like(ds)
assert_identical(expected, actual)
def test_dataset_math_errors(self):
ds = self.make_example_math_dataset()
with pytest.raises(TypeError):
ds["foo"] += ds
with pytest.raises(TypeError):
ds["foo"].variable += ds
with pytest.raises(ValueError, match=r"must have the same"):
ds += ds[["bar"]]
other = DataArray(np.datetime64("2000-01-01"), coords={"c": 2})
actual = ds.copy(deep=True)
with pytest.raises(TypeError):
actual += other
assert_identical(actual, ds)
def test_dataset_transpose(self):
ds = Dataset(
{
"a": (("x", "y"), np.random.randn(3, 4)),
"b": (("y", "x"), np.random.randn(4, 3)),
},
coords={
"x": range(3),
"y": range(4),
"xy": (("x", "y"), np.random.randn(3, 4)),
},
)
actual = ds.transpose()
expected = Dataset(
{"a": (("y", "x"), ds.a.values.T), "b": (("x", "y"), ds.b.values.T)},
coords={
"x": ds.x.values,
"y": ds.y.values,
"xy": (("y", "x"), ds.xy.values.T),
},
)
assert_identical(expected, actual)
actual = ds.transpose(...)
expected = ds
assert_identical(expected, actual)
actual = ds.transpose("x", "y")
expected = ds.map(lambda x: x.transpose("x", "y", transpose_coords=True))
assert_identical(expected, actual)
ds = create_test_data()
actual = ds.transpose()
for k in ds.variables:
assert actual[k].dims[::-1] == ds[k].dims
new_order = ("dim2", "dim3", "dim1", "time")
actual = ds.transpose(*new_order)
for k in ds.variables:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
assert actual[k].dims == expected_dims
# same as above but with ellipsis
new_order = ("dim2", "dim3", "dim1", "time")
actual = ds.transpose("dim2", "dim3", ...)
for k in ds.variables:
expected_dims = tuple(d for d in new_order if d in ds[k].dims)
assert actual[k].dims == expected_dims
with pytest.raises(ValueError, match=r"permuted"):
ds.transpose("dim1", "dim2", "dim3")
with pytest.raises(ValueError, match=r"permuted"):
ds.transpose("dim1", "dim2", "dim3", "time", "extra_dim")
assert "T" not in dir(ds)
def test_dataset_ellipsis_transpose_different_ordered_vars(self):
# https://github.com/pydata/xarray/issues/1081#issuecomment-544350457
ds = Dataset(
dict(
a=(("w", "x", "y", "z"), np.ones((2, 3, 4, 5))),
b=(("x", "w", "y", "z"), np.zeros((3, 2, 4, 5))),
)
)
result = ds.transpose(..., "z", "y")
assert list(result["a"].dims) == list("wxzy")
assert list(result["b"].dims) == list("xwzy")
def test_dataset_retains_period_index_on_transpose(self):
ds = create_test_data()
ds["time"] = pd.period_range("2000-01-01", periods=20)
transposed = ds.transpose()
assert isinstance(transposed.time.to_index(), pd.PeriodIndex)
def test_dataset_diff_n1_simple(self):
ds = Dataset({"foo": ("x", [5, 5, 6, 6])})
actual = ds.diff("x")
expected = Dataset({"foo": ("x", [0, 1, 0])})
assert_equal(expected, actual)
def test_dataset_diff_n1_label(self):
ds = Dataset({"foo": ("x", [5, 5, 6, 6])}, {"x": [0, 1, 2, 3]})
actual = ds.diff("x", label="lower")
expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [0, 1, 2]})
assert_equal(expected, actual)
actual = ds.diff("x", label="upper")
expected = Dataset({"foo": ("x", [0, 1, 0])}, {"x": [1, 2, 3]})
assert_equal(expected, actual)
def test_dataset_diff_n1(self):
ds = create_test_data(seed=1)
actual = ds.diff("dim2")
expected = {}
expected["var1"] = DataArray(
np.diff(ds["var1"].values, axis=1),
{"dim2": ds["dim2"].values[1:]},
["dim1", "dim2"],
)
expected["var2"] = DataArray(
np.diff(ds["var2"].values, axis=1),
{"dim2": ds["dim2"].values[1:]},
["dim1", "dim2"],
)
expected["var3"] = ds["var3"]
expected = Dataset(expected, coords={"time": ds["time"].values})
expected.coords["numbers"] = ("dim3", ds["numbers"].values)
assert_equal(expected, actual)
def test_dataset_diff_n2(self):
ds = create_test_data(seed=1)
actual = ds.diff("dim2", n=2)
expected = {}
expected["var1"] = DataArray(
np.diff(ds["var1"].values, axis=1, n=2),
{"dim2": ds["dim2"].values[2:]},
["dim1", "dim2"],
)
expected["var2"] = DataArray(
np.diff(ds["var2"].values, axis=1, n=2),
{"dim2": ds["dim2"].values[2:]},
["dim1", "dim2"],
)
expected["var3"] = ds["var3"]
expected = Dataset(expected, coords={"time": ds["time"].values})
expected.coords["numbers"] = ("dim3", ds["numbers"].values)
assert_equal(expected, actual)
def test_dataset_diff_exception_n_neg(self):
ds = create_test_data(seed=1)
with pytest.raises(ValueError, match=r"must be non-negative"):
ds.diff("dim2", n=-1)
def test_dataset_diff_exception_label_str(self):
ds = create_test_data(seed=1)
with pytest.raises(ValueError, match=r"'label' argument has to"):
ds.diff("dim2", label="raise_me")
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": -10}])
def test_shift(self, fill_value):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.shift(x=1, fill_value=fill_value)
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
elif isinstance(fill_value, dict):
fill_value = fill_value.get("foo", np.nan)
expected = Dataset({"foo": ("x", [fill_value, 1, 2])}, coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.shift(foo=123)
def test_roll_coords(self):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.roll(x=1, roll_coords=True)
ex_coords = {"bar": ("x", list("cab")), "x": [2, -4, 3]}
expected = Dataset({"foo": ("x", [3, 1, 2])}, ex_coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.roll(foo=123, roll_coords=True)
def test_roll_no_coords(self):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
actual = ds.roll(x=1, roll_coords=False)
expected = Dataset({"foo": ("x", [3, 1, 2])}, coords, attrs)
assert_identical(expected, actual)
with pytest.raises(ValueError, match=r"dimensions"):
ds.roll(abc=321, roll_coords=False)
def test_roll_coords_none(self):
coords = {"bar": ("x", list("abc")), "x": [-4, 3, 2]}
attrs = {"meta": "data"}
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords, attrs)
with pytest.warns(FutureWarning):
actual = ds.roll(x=1, roll_coords=None)
ex_coords = {"bar": ("x", list("cab")), "x": [2, -4, 3]}
expected = Dataset({"foo": ("x", [3, 1, 2])}, ex_coords, attrs)
assert_identical(expected, actual)
def test_roll_multidim(self):
# regression test for 2445
arr = xr.DataArray(
[[1, 2, 3], [4, 5, 6]],
coords={"x": range(3), "y": range(2)},
dims=("y", "x"),
)
actual = arr.roll(x=1, roll_coords=True)
expected = xr.DataArray(
[[3, 1, 2], [6, 4, 5]], coords=[("y", [0, 1]), ("x", [2, 0, 1])]
)
assert_identical(expected, actual)
def test_real_and_imag(self):
attrs = {"foo": "bar"}
ds = Dataset({"x": ((), 1 + 2j, attrs)}, attrs=attrs)
expected_re = Dataset({"x": ((), 1, attrs)}, attrs=attrs)
assert_identical(ds.real, expected_re)
expected_im = Dataset({"x": ((), 2, attrs)}, attrs=attrs)
assert_identical(ds.imag, expected_im)
def test_setattr_raises(self):
ds = Dataset({}, coords={"scalar": 1}, attrs={"foo": "bar"})
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.scalar = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.foo = 2
with pytest.raises(AttributeError, match=r"cannot set attr"):
ds.other = 2
def test_filter_by_attrs(self):
precip = dict(standard_name="convective_precipitation_flux")
temp0 = dict(standard_name="air_potential_temperature", height="0 m")
temp10 = dict(standard_name="air_potential_temperature", height="10 m")
ds = Dataset(
{
"temperature_0": (["t"], [0], temp0),
"temperature_10": (["t"], [0], temp10),
"precipitation": (["t"], [0], precip),
},
coords={"time": (["t"], [0], dict(axis="T", long_name="time_in_seconds"))},
)
# Test return empty Dataset.
ds.filter_by_attrs(standard_name="invalid_standard_name")
new_ds = ds.filter_by_attrs(standard_name="invalid_standard_name")
assert not bool(new_ds.data_vars)
# Test return one DataArray.
new_ds = ds.filter_by_attrs(standard_name="convective_precipitation_flux")
assert new_ds["precipitation"].standard_name == "convective_precipitation_flux"
assert_equal(new_ds["precipitation"], ds["precipitation"])
# Test filter coordinates
new_ds = ds.filter_by_attrs(long_name="time_in_seconds")
assert new_ds["time"].long_name == "time_in_seconds"
assert not bool(new_ds.data_vars)
# Test return more than one DataArray.
new_ds = ds.filter_by_attrs(standard_name="air_potential_temperature")
assert len(new_ds.data_vars) == 2
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
# Test callable.
new_ds = ds.filter_by_attrs(height=lambda v: v is not None)
assert len(new_ds.data_vars) == 2
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
new_ds = ds.filter_by_attrs(height="10 m")
assert len(new_ds.data_vars) == 1
for var in new_ds.data_vars:
assert new_ds[var].height == "10 m"
# Test return empty Dataset due to conflicting filters
new_ds = ds.filter_by_attrs(
standard_name="convective_precipitation_flux", height="0 m"
)
assert not bool(new_ds.data_vars)
# Test return one DataArray with two filter conditions
new_ds = ds.filter_by_attrs(
standard_name="air_potential_temperature", height="0 m"
)
for var in new_ds.data_vars:
assert new_ds[var].standard_name == "air_potential_temperature"
assert new_ds[var].height == "0 m"
assert new_ds[var].height != "10 m"
# Test return empty Dataset due to conflicting callables
new_ds = ds.filter_by_attrs(
standard_name=lambda v: False, height=lambda v: True
)
assert not bool(new_ds.data_vars)
def test_binary_op_propagate_indexes(self):
ds = Dataset(
{"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]})}
)
expected = ds.xindexes["x"]
actual = (ds * 2).xindexes["x"]
assert expected is actual
def test_binary_op_join_setting(self):
# arithmetic_join applies to data array coordinates
missing_2 = xr.Dataset({"x": [0, 1]})
missing_0 = xr.Dataset({"x": [1, 2]})
with xr.set_options(arithmetic_join="outer"):
actual = missing_2 + missing_0
expected = xr.Dataset({"x": [0, 1, 2]})
assert_equal(actual, expected)
# arithmetic join also applies to data_vars
ds1 = xr.Dataset({"foo": 1, "bar": 2})
ds2 = xr.Dataset({"bar": 2, "baz": 3})
expected = xr.Dataset({"bar": 4}) # default is inner joining
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="outer"):
expected = xr.Dataset({"foo": np.nan, "bar": 4, "baz": np.nan})
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="left"):
expected = xr.Dataset({"foo": np.nan, "bar": 4})
actual = ds1 + ds2
assert_equal(actual, expected)
with xr.set_options(arithmetic_join="right"):
expected = xr.Dataset({"bar": 4, "baz": np.nan})
actual = ds1 + ds2
assert_equal(actual, expected)
def test_full_like(self):
# For more thorough tests, see test_variable.py
# Note: testing data_vars with mismatched dtypes
ds = Dataset(
{
"d1": DataArray([1, 2, 3], dims=["x"], coords={"x": [10, 20, 30]}),
"d2": DataArray([1.1, 2.2, 3.3], dims=["y"]),
},
attrs={"foo": "bar"},
)
actual = full_like(ds, 2)
expected = ds.copy(deep=True)
expected["d1"].values = [2, 2, 2]
expected["d2"].values = [2.0, 2.0, 2.0]
assert expected["d1"].dtype == int
assert expected["d2"].dtype == float
assert_identical(expected, actual)
# override dtype
actual = full_like(ds, fill_value=True, dtype=bool)
expected = ds.copy(deep=True)
expected["d1"].values = [True, True, True]
expected["d2"].values = [True, True, True]
assert expected["d1"].dtype == bool
assert expected["d2"].dtype == bool
assert_identical(expected, actual)
# with multiple fill values
actual = full_like(ds, {"d1": 1, "d2": 2.3})
expected = ds.assign(d1=("x", [1, 1, 1]), d2=("y", [2.3, 2.3, 2.3]))
assert expected["d1"].dtype == int
assert expected["d2"].dtype == float
assert_identical(expected, actual)
# override multiple dtypes
actual = full_like(ds, fill_value={"d1": 1, "d2": 2.3}, dtype={"d1": bool})
expected = ds.assign(d1=("x", [True, True, True]), d2=("y", [2.3, 2.3, 2.3]))
assert expected["d1"].dtype == bool
assert expected["d2"].dtype == float
assert_identical(expected, actual)
def test_combine_first(self):
dsx0 = DataArray([0, 0], [("x", ["a", "b"])]).to_dataset(name="dsx0")
dsx1 = DataArray([1, 1], [("x", ["b", "c"])]).to_dataset(name="dsx1")
actual = dsx0.combine_first(dsx1)
expected = Dataset(
{"dsx0": ("x", [0, 0, np.nan]), "dsx1": ("x", [np.nan, 1, 1])},
coords={"x": ["a", "b", "c"]},
)
assert_equal(actual, expected)
assert_equal(actual, xr.merge([dsx0, dsx1]))
# works just like xr.merge([self, other])
dsy2 = DataArray([2, 2, 2], [("x", ["b", "c", "d"])]).to_dataset(name="dsy2")
actual = dsx0.combine_first(dsy2)
expected = xr.merge([dsy2, dsx0])
assert_equal(actual, expected)
def test_sortby(self):
ds = Dataset(
{
"A": DataArray(
[[1, 2], [3, 4], [5, 6]], [("x", ["c", "b", "a"]), ("y", [1, 0])]
),
"B": DataArray([[5, 6], [7, 8], [9, 10]], dims=["x", "y"]),
}
)
sorted1d = Dataset(
{
"A": DataArray(
[[5, 6], [3, 4], [1, 2]], [("x", ["a", "b", "c"]), ("y", [1, 0])]
),
"B": DataArray([[9, 10], [7, 8], [5, 6]], dims=["x", "y"]),
}
)
sorted2d = Dataset(
{
"A": DataArray(
[[6, 5], [4, 3], [2, 1]], [("x", ["a", "b", "c"]), ("y", [0, 1])]
),
"B": DataArray([[10, 9], [8, 7], [6, 5]], dims=["x", "y"]),
}
)
expected = sorted1d
dax = DataArray([100, 99, 98], [("x", ["c", "b", "a"])])
actual = ds.sortby(dax)
assert_equal(actual, expected)
# test descending order sort
actual = ds.sortby(dax, ascending=False)
assert_equal(actual, ds)
# test alignment (fills in nan for 'c')
dax_short = DataArray([98, 97], [("x", ["b", "a"])])
actual = ds.sortby(dax_short)
assert_equal(actual, expected)
# test 1-D lexsort
# dax0 is sorted first to give indices of [1, 2, 0]
# and then dax1 would be used to move index 2 ahead of 1
dax0 = DataArray([100, 95, 95], [("x", ["c", "b", "a"])])
dax1 = DataArray([0, 1, 0], [("x", ["c", "b", "a"])])
actual = ds.sortby([dax0, dax1]) # lexsort underneath gives [2, 1, 0]
assert_equal(actual, expected)
expected = sorted2d
# test multi-dim sort by 1D dataarray values
day = DataArray([90, 80], [("y", [1, 0])])
actual = ds.sortby([day, dax])
assert_equal(actual, expected)
# test exception-raising
with pytest.raises(KeyError) as excinfo:
actual = ds.sortby("z")
with pytest.raises(ValueError) as excinfo:
actual = ds.sortby(ds["A"])
assert "DataArray is not 1-D" in str(excinfo.value)
expected = sorted1d
actual = ds.sortby("x")
assert_equal(actual, expected)
# test pandas.MultiIndex
indices = (("b", 1), ("b", 0), ("a", 1), ("a", 0))
midx = pd.MultiIndex.from_tuples(indices, names=["one", "two"])
ds_midx = Dataset(
{
"A": DataArray(
[[1, 2], [3, 4], [5, 6], [7, 8]], [("x", midx), ("y", [1, 0])]
),
"B": DataArray([[5, 6], [7, 8], [9, 10], [11, 12]], dims=["x", "y"]),
}
)
actual = ds_midx.sortby("x")
midx_reversed = pd.MultiIndex.from_tuples(
tuple(reversed(indices)), names=["one", "two"]
)
expected = Dataset(
{
"A": DataArray(
[[7, 8], [5, 6], [3, 4], [1, 2]],
[("x", midx_reversed), ("y", [1, 0])],
),
"B": DataArray([[11, 12], [9, 10], [7, 8], [5, 6]], dims=["x", "y"]),
}
)
assert_equal(actual, expected)
# multi-dim sort by coordinate objects
expected = sorted2d
actual = ds.sortby(["x", "y"])
assert_equal(actual, expected)
# test descending order sort
actual = ds.sortby(["x", "y"], ascending=False)
assert_equal(actual, ds)
def test_attribute_access(self):
ds = create_test_data(seed=1)
for key in ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]:
assert_equal(ds[key], getattr(ds, key))
assert key in dir(ds)
for key in ["dim3", "dim1", "numbers"]:
assert_equal(ds["var3"][key], getattr(ds.var3, key))
assert key in dir(ds["var3"])
# attrs
assert ds["var3"].attrs["foo"] == ds.var3.foo
assert "foo" in dir(ds["var3"])
def test_ipython_key_completion(self):
ds = create_test_data(seed=1)
actual = ds._ipython_key_completions_()
expected = ["var1", "var2", "var3", "time", "dim1", "dim2", "dim3", "numbers"]
for item in actual:
ds[item] # should not raise
assert sorted(actual) == sorted(expected)
# for dataarray
actual = ds["var3"]._ipython_key_completions_()
expected = ["dim3", "dim1", "numbers"]
for item in actual:
ds["var3"][item] # should not raise
assert sorted(actual) == sorted(expected)
# MultiIndex
ds_midx = ds.stack(dim12=["dim1", "dim2"])
actual = ds_midx._ipython_key_completions_()
expected = [
"var1",
"var2",
"var3",
"time",
"dim1",
"dim2",
"dim3",
"numbers",
"dim12",
]
for item in actual:
ds_midx[item] # should not raise
assert sorted(actual) == sorted(expected)
# coords
actual = ds.coords._ipython_key_completions_()
expected = ["time", "dim1", "dim2", "dim3", "numbers"]
for item in actual:
ds.coords[item] # should not raise
assert sorted(actual) == sorted(expected)
actual = ds["var3"].coords._ipython_key_completions_()
expected = ["dim1", "dim3", "numbers"]
for item in actual:
ds["var3"].coords[item] # should not raise
assert sorted(actual) == sorted(expected)
# data_vars
actual = ds.data_vars._ipython_key_completions_()
expected = ["var1", "var2", "var3", "dim1"]
for item in actual:
ds.data_vars[item] # should not raise
assert sorted(actual) == sorted(expected)
def test_polyfit_output(self):
ds = create_test_data(seed=1)
out = ds.polyfit("dim2", 2, full=False)
assert "var1_polyfit_coefficients" in out
out = ds.polyfit("dim1", 2, full=True)
assert "var1_polyfit_coefficients" in out
assert "dim1_matrix_rank" in out
out = ds.polyfit("time", 2)
assert len(out.data_vars) == 0
def test_polyfit_warnings(self):
ds = create_test_data(seed=1)
with warnings.catch_warnings(record=True) as ws:
ds.var1.polyfit("dim2", 10, full=False)
assert len(ws) == 1
assert ws[0].category == np.RankWarning
ds.var1.polyfit("dim2", 10, full=True)
assert len(ws) == 1
def test_pad(self):
ds = create_test_data(seed=1)
padded = ds.pad(dim2=(1, 1), constant_values=42)
assert padded["dim2"].shape == (11,)
assert padded["var1"].shape == (8, 11)
assert padded["var2"].shape == (8, 11)
assert padded["var3"].shape == (10, 8)
assert dict(padded.dims) == {"dim1": 8, "dim2": 11, "dim3": 10, "time": 20}
np.testing.assert_equal(padded["var1"].isel(dim2=[0, -1]).data, 42)
np.testing.assert_equal(padded["dim2"][[0, -1]].data, np.nan)
def test_astype_attrs(self):
data = create_test_data(seed=123)
data.attrs["foo"] = "bar"
assert data.attrs == data.astype(float).attrs
assert data.var1.attrs == data.astype(float).var1.attrs
assert not data.astype(float, keep_attrs=False).attrs
assert not data.astype(float, keep_attrs=False).var1.attrs
@pytest.mark.parametrize("parser", ["pandas", "python"])
@pytest.mark.parametrize(
"engine", ["python", None, pytest.param("numexpr", marks=[requires_numexpr])]
)
@pytest.mark.parametrize(
"backend", ["numpy", pytest.param("dask", marks=[requires_dask])]
)
def test_query(self, backend, engine, parser):
# setup test data
np.random.seed(42)
a = np.arange(0, 10, 1)
b = np.random.randint(0, 100, size=10)
c = np.linspace(0, 1, 20)
d = np.random.choice(["foo", "bar", "baz"], size=30, replace=True).astype(
object
)
e = np.arange(0, 10 * 20).reshape(10, 20)
f = np.random.normal(0, 1, size=(10, 20, 30))
if backend == "numpy":
ds = Dataset(
{
"a": ("x", a),
"b": ("x", b),
"c": ("y", c),
"d": ("z", d),
"e": (("x", "y"), e),
"f": (("x", "y", "z"), f),
}
)
elif backend == "dask":
ds = Dataset(
{
"a": ("x", da.from_array(a, chunks=3)),
"b": ("x", da.from_array(b, chunks=3)),
"c": ("y", da.from_array(c, chunks=7)),
"d": ("z", da.from_array(d, chunks=12)),
"e": (("x", "y"), da.from_array(e, chunks=(3, 7))),
"f": (("x", "y", "z"), da.from_array(f, chunks=(3, 7, 12))),
}
)
# query single dim, single variable
actual = ds.query(x="a > 5", engine=engine, parser=parser)
expect = ds.isel(x=(a > 5))
assert_identical(expect, actual)
# query single dim, single variable, via dict
actual = ds.query(dict(x="a > 5"), engine=engine, parser=parser)
expect = ds.isel(dict(x=(a > 5)))
assert_identical(expect, actual)
# query single dim, single variable
actual = ds.query(x="b > 50", engine=engine, parser=parser)
expect = ds.isel(x=(b > 50))
assert_identical(expect, actual)
# query single dim, single variable
actual = ds.query(y="c < .5", engine=engine, parser=parser)
expect = ds.isel(y=(c < 0.5))
assert_identical(expect, actual)
# query single dim, single string variable
if parser == "pandas":
# N.B., this query currently only works with the pandas parser
# xref https://github.com/pandas-dev/pandas/issues/40436
actual = ds.query(z='d == "bar"', engine=engine, parser=parser)
expect = ds.isel(z=(d == "bar"))
assert_identical(expect, actual)
# query single dim, multiple variables
actual = ds.query(x="(a > 5) & (b > 50)", engine=engine, parser=parser)
expect = ds.isel(x=((a > 5) & (b > 50)))
assert_identical(expect, actual)
# query single dim, multiple variables with computation
actual = ds.query(x="(a * b) > 250", engine=engine, parser=parser)
expect = ds.isel(x=(a * b) > 250)
assert_identical(expect, actual)
# check pandas query syntax is supported
if parser == "pandas":
actual = ds.query(x="(a > 5) and (b > 50)", engine=engine, parser=parser)
expect = ds.isel(x=((a > 5) & (b > 50)))
assert_identical(expect, actual)
# query multiple dims via kwargs
actual = ds.query(x="a > 5", y="c < .5", engine=engine, parser=parser)
expect = ds.isel(x=(a > 5), y=(c < 0.5))
assert_identical(expect, actual)
# query multiple dims via kwargs
if parser == "pandas":
actual = ds.query(
x="a > 5", y="c < .5", z="d == 'bar'", engine=engine, parser=parser
)
expect = ds.isel(x=(a > 5), y=(c < 0.5), z=(d == "bar"))
assert_identical(expect, actual)
# query multiple dims via dict
actual = ds.query(dict(x="a > 5", y="c < .5"), engine=engine, parser=parser)
expect = ds.isel(dict(x=(a > 5), y=(c < 0.5)))
assert_identical(expect, actual)
# query multiple dims via dict
if parser == "pandas":
actual = ds.query(
dict(x="a > 5", y="c < .5", z="d == 'bar'"),
engine=engine,
parser=parser,
)
expect = ds.isel(dict(x=(a > 5), y=(c < 0.5), z=(d == "bar")))
assert_identical(expect, actual)
# test error handling
with pytest.raises(ValueError):
ds.query("a > 5") # must be dict or kwargs
with pytest.raises(ValueError):
ds.query(x=(a > 5)) # must be query string
with pytest.raises(IndexError):
ds.query(y="a > 5") # wrong length dimension
with pytest.raises(IndexError):
ds.query(x="c < .5") # wrong length dimension
with pytest.raises(IndexError):
ds.query(x="e > 100") # wrong number of dimensions
with pytest.raises(UndefinedVariableError):
ds.query(x="spam > 50") # name not present
# Py.test tests
@pytest.fixture(params=[None])
def data_set(request):
return create_test_data(request.param)
@pytest.mark.parametrize("test_elements", ([1, 2], np.array([1, 2]), DataArray([1, 2])))
def test_isin(test_elements):
expected = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 1]),
"var3": (("dim1",), [0, 1]),
}
).astype("bool")
result = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 2]),
"var3": (("dim1",), [0, 1]),
}
).isin(test_elements)
assert_equal(result, expected)
@pytest.mark.skipif(not has_dask, reason="requires dask")
@pytest.mark.parametrize("test_elements", ([1, 2], np.array([1, 2]), DataArray([1, 2])))
def test_isin_dask(test_elements):
expected = Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 1]),
"var3": (("dim1",), [0, 1]),
}
).astype("bool")
result = (
Dataset(
data_vars={
"var1": (("dim1",), [0, 1]),
"var2": (("dim1",), [1, 2]),
"var3": (("dim1",), [0, 1]),
}
)
.chunk(1)
.isin(test_elements)
.compute()
)
assert_equal(result, expected)
def test_isin_dataset():
ds = Dataset({"x": [1, 2]})
with pytest.raises(TypeError):
ds.isin(ds)
@pytest.mark.parametrize(
"unaligned_coords",
(
{"x": [2, 1, 0]},
{"x": (["x"], np.asarray([2, 1, 0]))},
{"x": (["x"], np.asarray([1, 2, 0]))},
{"x": pd.Index([2, 1, 0])},
{"x": Variable(dims="x", data=[0, 2, 1])},
{"x": IndexVariable(dims="x", data=[0, 1, 2])},
{"y": 42},
{"y": ("x", [2, 1, 0])},
{"y": ("x", np.asarray([2, 1, 0]))},
{"y": (["x"], np.asarray([2, 1, 0]))},
),
)
@pytest.mark.parametrize("coords", ({"x": ("x", [0, 1, 2])}, {"x": [0, 1, 2]}))
def test_dataset_constructor_aligns_to_explicit_coords(unaligned_coords, coords):
a = xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords)
expected = xr.Dataset(coords=coords)
expected["a"] = a
result = xr.Dataset({"a": a}, coords=coords)
assert_equal(expected, result)
def test_error_message_on_set_supplied():
with pytest.raises(TypeError, match="has invalid type <class 'set'>"):
xr.Dataset(dict(date=[1, 2, 3], sec={4}))
@pytest.mark.parametrize("unaligned_coords", ({"y": ("b", np.asarray([2, 1, 0]))},))
def test_constructor_raises_with_invalid_coords(unaligned_coords):
with pytest.raises(ValueError, match="not a subset of the DataArray dimensions"):
xr.DataArray([1, 2, 3], dims=["x"], coords=unaligned_coords)
def test_dir_expected_attrs(data_set):
some_expected_attrs = {"pipe", "mean", "isnull", "var1", "dim2", "numbers"}
result = dir(data_set)
assert set(result) >= some_expected_attrs
def test_dir_non_string(data_set):
# add a numbered key to ensure this doesn't break dir
data_set[5] = "foo"
result = dir(data_set)
assert 5 not in result
sample_data = np.random.uniform(size=[2, 2000, 10000])
x = xr.Dataset({"sample_data": (sample_data.shape, sample_data)})
x2 = x["sample_data"]
dir(x2)
def test_dir_unicode(data_set):
data_set["unicode"] = "uni"
result = dir(data_set)
assert "unicode" in result
@pytest.fixture(params=[1])
def ds(request):
if request.param == 1:
return Dataset(
dict(
z1=(["y", "x"], np.random.randn(2, 8)),
z2=(["time", "y"], np.random.randn(10, 2)),
),
dict(
x=("x", np.linspace(0, 1.0, 8)),
time=("time", np.linspace(0, 1.0, 10)),
c=("y", ["a", "b"]),
y=range(2),
),
)
if request.param == 2:
return Dataset(
{
"z1": (["time", "y"], np.random.randn(10, 2)),
"z2": (["time"], np.random.randn(10)),
"z3": (["x", "time"], np.random.randn(8, 10)),
},
{
"x": ("x", np.linspace(0, 1.0, 8)),
"time": ("time", np.linspace(0, 1.0, 10)),
"c": ("y", ["a", "b"]),
"y": range(2),
},
)
def test_coarsen_absent_dims_error(ds):
with pytest.raises(ValueError, match=r"not found in Dataset."):
ds.coarsen(foo=2)
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize(("boundary", "side"), [("trim", "left"), ("pad", "right")])
def test_coarsen(ds, dask, boundary, side):
if dask and has_dask:
ds = ds.chunk({"x": 4})
actual = ds.coarsen(time=2, x=3, boundary=boundary, side=side).max()
assert_equal(
actual["z1"], ds["z1"].coarsen(x=3, boundary=boundary, side=side).max()
)
assert_equal(
actual["time"], ds["time"].coarsen(time=2, boundary=boundary, side=side).mean()
)
@pytest.mark.parametrize("dask", [True, False])
def test_coarsen_coords(ds, dask):
if dask and has_dask:
ds = ds.chunk({"x": 4})
actual = ds.coarsen(time=2, x=3, boundary="trim", coord_func={"time": "max"}).max()
assert_equal(actual["z1"], ds["z1"].coarsen(x=3, boundary="trim").max())
assert_equal(actual["time"], ds["time"].coarsen(time=2, boundary="trim").max())
with pytest.raises(ValueError):
ds.coarsen(x=3).mean()
ds.isel(x=slice(0, 3 * (len(ds["x"]) // 3))).coarsen(x=3).mean()
da = xr.DataArray(
np.linspace(0, 365, num=364),
dims="time",
coords={"time": pd.date_range("15/12/1999", periods=364)},
)
actual = da.coarsen(time=2).mean()
@requires_cftime
def test_coarsen_coords_cftime():
times = xr.cftime_range("2000", periods=6)
da = xr.DataArray(range(6), [("time", times)])
actual = da.coarsen(time=3).mean()
expected_times = xr.cftime_range("2000-01-02", freq="3D", periods=2)
np.testing.assert_array_equal(actual.time, expected_times)
@pytest.mark.parametrize(
"funcname, argument",
[
("reduce", (np.mean,)),
("mean", ()),
],
)
def test_coarsen_keep_attrs(funcname, argument):
global_attrs = {"units": "test", "long_name": "testing"}
da_attrs = {"da_attr": "test"}
attrs_coords = {"attrs_coords": "test"}
da_not_coarsend_attrs = {"da_not_coarsend_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={
"da": ("coord", data, da_attrs),
"da_not_coarsend": ("no_coord", data, da_not_coarsend_attrs),
},
coords={"coord": ("coord", coords, attrs_coords)},
attrs=global_attrs,
)
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
result = func(*argument)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_coarsend.attrs == da_not_coarsend_attrs
assert result.coord.attrs == attrs_coords
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_coarsend.attrs == {}
assert result.coord.attrs == {}
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_coarsend.attrs == {}
assert result.coord.attrs == {}
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument, keep_attrs=True)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_coarsend.attrs == da_not_coarsend_attrs
assert result.coord.attrs == attrs_coords
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
func = getattr(ds.coarsen(dim={"coord": 5}), funcname)
with set_options(keep_attrs=True):
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_coarsend.attrs == {}
assert result.coord.attrs == {}
assert result.da.name == "da"
assert result.da_not_coarsend.name == "da_not_coarsend"
def test_coarsen_keep_attrs_deprecated():
global_attrs = {"units": "test", "long_name": "testing"}
attrs_da = {"da_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={"da": ("coord", data)},
coords={"coord": coords},
attrs=global_attrs,
)
ds.da.attrs = attrs_da
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``coarsen`` is deprecated"
):
result = ds.coarsen(dim={"coord": 5}, keep_attrs=False).mean()
assert result.attrs == {}
assert result.da.attrs == {}
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``coarsen`` is deprecated"
):
result = ds.coarsen(dim={"coord": 5}, keep_attrs=True).mean(keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
@pytest.mark.slow
@pytest.mark.parametrize("ds", (1, 2), indirect=True)
@pytest.mark.parametrize("window", (1, 2, 3, 4))
@pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median"))
def test_coarsen_reduce(ds, window, name):
coarsen_obj = ds.coarsen(time=window, boundary="trim")
actual = coarsen_obj.reduce(getattr(np, f"nan{name}"))
expected = getattr(coarsen_obj, name)()
assert_allclose(actual, expected)
assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())
for key, src_var in ds.data_vars.items():
assert src_var.dims == actual[key].dims
@pytest.mark.parametrize(
"funcname, argument",
[
("reduce", (np.mean,)),
("mean", ()),
("construct", ("window_dim",)),
("count", ()),
],
)
def test_rolling_keep_attrs(funcname, argument):
global_attrs = {"units": "test", "long_name": "testing"}
da_attrs = {"da_attr": "test"}
da_not_rolled_attrs = {"da_not_rolled_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={"da": ("coord", data), "da_not_rolled": ("no_coord", data)},
coords={"coord": coords},
attrs=global_attrs,
)
ds.da.attrs = da_attrs
ds.da_not_rolled.attrs = da_not_rolled_attrs
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
result = func(*argument)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_rolled.attrs == da_not_rolled_attrs
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=False):
result = func(*argument, keep_attrs=True)
assert result.attrs == global_attrs
assert result.da.attrs == da_attrs
assert result.da_not_rolled.attrs == da_not_rolled_attrs
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
func = getattr(ds.rolling(dim={"coord": 5}), funcname)
with set_options(keep_attrs=True):
result = func(*argument, keep_attrs=False)
assert result.attrs == {}
assert result.da.attrs == {}
assert result.da_not_rolled.attrs == {}
assert result.da.name == "da"
assert result.da_not_rolled.name == "da_not_rolled"
def test_rolling_keep_attrs_deprecated():
global_attrs = {"units": "test", "long_name": "testing"}
attrs_da = {"da_attr": "test"}
data = np.linspace(10, 15, 100)
coords = np.linspace(1, 10, 100)
ds = Dataset(
data_vars={"da": ("coord", data)},
coords={"coord": coords},
attrs=global_attrs,
)
ds.da.attrs = attrs_da
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``rolling`` is deprecated"
):
result = ds.rolling(dim={"coord": 5}, keep_attrs=False).construct("window_dim")
assert result.attrs == {}
assert result.da.attrs == {}
with pytest.warns(
FutureWarning, match="Passing ``keep_attrs`` to ``rolling`` is deprecated"
):
result = ds.rolling(dim={"coord": 5}, keep_attrs=True).construct(
"window_dim", keep_attrs=False
)
assert result.attrs == {}
assert result.da.attrs == {}
def test_rolling_properties(ds):
with pytest.raises(ValueError, match="window must be > 0"):
ds.rolling(time=-2)
with pytest.raises(ValueError, match="min_periods must be greater than zero"):
ds.rolling(time=2, min_periods=0)
with pytest.raises(KeyError, match="time2"):
ds.rolling(time2=2)
@pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median"))
@pytest.mark.parametrize("center", (True, False, None))
@pytest.mark.parametrize("min_periods", (1, None))
@pytest.mark.parametrize("key", ("z1", "z2"))
def test_rolling_wrapped_bottleneck(ds, name, center, min_periods, key):
bn = pytest.importorskip("bottleneck", minversion="1.1")
rolling_obj = ds.rolling(time=7, min_periods=min_periods)
func_name = f"move_{name}"
actual = getattr(rolling_obj, name)()
if key == "z1":
expected = ds[key]
elif key == "z2":
expected = getattr(bn, func_name)(
ds[key].values, window=7, axis=0, min_count=min_periods
)
else:
raise ValueError
assert_array_equal(actual[key].values, expected)
rolling_obj = ds.rolling(time=7, center=center)
actual = getattr(rolling_obj, name)()["time"]
assert_equal(actual, ds["time"])
@requires_numbagg
def test_rolling_exp(ds):
result = ds.rolling_exp(time=10, window_type="span").mean()
assert isinstance(result, Dataset)
@requires_numbagg
def test_rolling_exp_keep_attrs(ds):
attrs_global = {"attrs": "global"}
attrs_z1 = {"attr": "z1"}
ds.attrs = attrs_global
ds.z1.attrs = attrs_z1
result = ds.rolling_exp(time=10).mean()
assert result.attrs == attrs_global
assert result.z1.attrs == attrs_z1
result = ds.rolling_exp(time=10).mean(keep_attrs=False)
assert result.attrs == {}
assert result.z1.attrs == {}
with set_options(keep_attrs=False):
result = ds.rolling_exp(time=10).mean()
assert result.attrs == {}
assert result.z1.attrs == {}
with set_options(keep_attrs=False):
result = ds.rolling_exp(time=10).mean(keep_attrs=True)
assert result.attrs == attrs_global
assert result.z1.attrs == attrs_z1
with set_options(keep_attrs=True):
result = ds.rolling_exp(time=10).mean(keep_attrs=False)
assert result.attrs == {}
assert result.z1.attrs == {}
with pytest.warns(
UserWarning, match="Passing ``keep_attrs`` to ``rolling_exp`` has no effect."
):
ds.rolling_exp(time=10, keep_attrs=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1, 2, 3))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
def test_rolling_pandas_compat(center, window, min_periods):
df = pd.DataFrame(
{
"x": np.random.randn(20),
"y": np.random.randn(20),
"time": np.linspace(0, 1, 20),
}
)
ds = Dataset.from_dataframe(df)
if min_periods is not None and window < min_periods:
min_periods = window
df_rolling = df.rolling(window, center=center, min_periods=min_periods).mean()
ds_rolling = ds.rolling(index=window, center=center, min_periods=min_periods).mean()
np.testing.assert_allclose(df_rolling["x"].values, ds_rolling["x"].values)
np.testing.assert_allclose(df_rolling.index, ds_rolling["index"])
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
def test_rolling_construct(center, window):
df = pd.DataFrame(
{
"x": np.random.randn(20),
"y": np.random.randn(20),
"time": np.linspace(0, 1, 20),
}
)
ds = Dataset.from_dataframe(df)
df_rolling = df.rolling(window, center=center, min_periods=1).mean()
ds_rolling = ds.rolling(index=window, center=center)
ds_rolling_mean = ds_rolling.construct("window").mean("window")
np.testing.assert_allclose(df_rolling["x"].values, ds_rolling_mean["x"].values)
np.testing.assert_allclose(df_rolling.index, ds_rolling_mean["index"])
ds_rolling_mean = ds_rolling.construct("window", stride=2).mean("window")
np.testing.assert_allclose(df_rolling["x"][::2].values, ds_rolling_mean["x"].values)
np.testing.assert_allclose(df_rolling.index[::2], ds_rolling_mean["index"])
ds_rolling_mean = ds_rolling.construct("window", stride=2, fill_value=0.0).mean(
"window"
)
assert (ds_rolling_mean.isnull().sum() == 0).to_array(dim="vars").all()
assert (ds_rolling_mean["x"] == 0.0).sum() >= 0
@pytest.mark.slow
@pytest.mark.parametrize("ds", (1, 2), indirect=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1, 2, 3))
@pytest.mark.parametrize("window", (1, 2, 3, 4))
@pytest.mark.parametrize("name", ("sum", "mean", "std", "var", "min", "max", "median"))
def test_rolling_reduce(ds, center, min_periods, window, name):
if min_periods is not None and window < min_periods:
min_periods = window
if name == "std" and window == 1:
pytest.skip("std with window == 1 is unstable in bottleneck")
rolling_obj = ds.rolling(time=window, center=center, min_periods=min_periods)
actual = rolling_obj.reduce(getattr(np, "nan%s" % name))
expected = getattr(rolling_obj, name)()
assert_allclose(actual, expected)
assert ds.dims == actual.dims
assert list(ds.data_vars.keys()) == list(actual.data_vars.keys())
for key, src_var in ds.data_vars.items():
assert src_var.dims == actual[key].dims
@pytest.mark.parametrize("ds", (2,), indirect=True)
@pytest.mark.parametrize("center", (True, False))
@pytest.mark.parametrize("min_periods", (None, 1))
@pytest.mark.parametrize("name", ("sum", "max"))
@pytest.mark.parametrize("dask", (True, False))
def test_ndrolling_reduce(ds, center, min_periods, name, dask):
if dask and has_dask:
ds = ds.chunk({"x": 4})
rolling_obj = ds.rolling(time=4, x=3, center=center, min_periods=min_periods)
actual = getattr(rolling_obj, name)()
expected = getattr(
getattr(
ds.rolling(time=4, center=center, min_periods=min_periods), name
)().rolling(x=3, center=center, min_periods=min_periods),
name,
)()
assert_allclose(actual, expected)
assert actual.dims == expected.dims
expected = getattr(
getattr(
ds.rolling(x=3, center=center, min_periods=min_periods), name
)().rolling(time=4, center=center, min_periods=min_periods),
name,
)()
assert_allclose(actual, expected)
assert actual.dims == expected.dims
@pytest.mark.parametrize("center", (True, False, (True, False)))
@pytest.mark.parametrize("fill_value", (np.nan, 0.0))
@pytest.mark.parametrize("dask", (True, False))
def test_ndrolling_construct(center, fill_value, dask):
da = DataArray(
np.arange(5 * 6 * 7).reshape(5, 6, 7).astype(float),
dims=["x", "y", "z"],
coords={"x": ["a", "b", "c", "d", "e"], "y": np.arange(6)},
)
ds = xr.Dataset({"da": da})
if dask and has_dask:
ds = ds.chunk({"x": 4})
actual = ds.rolling(x=3, z=2, center=center).construct(
x="x1", z="z1", fill_value=fill_value
)
if not isinstance(center, tuple):
center = (center, center)
expected = (
ds.rolling(x=3, center=center[0])
.construct(x="x1", fill_value=fill_value)
.rolling(z=2, center=center[1])
.construct(z="z1", fill_value=fill_value)
)
assert_allclose(actual, expected)
def test_raise_no_warning_for_nan_in_binary_ops():
with pytest.warns(None) as record:
Dataset(data_vars={"x": ("y", [1, 2, np.NaN])}) > 0
assert len(record) == 0
@pytest.mark.filterwarnings("error")
@pytest.mark.parametrize("ds", (2,), indirect=True)
def test_raise_no_warning_assert_close(ds):
assert_allclose(ds, ds)
@pytest.mark.xfail(reason="See https://github.com/pydata/xarray/pull/4369 or docstring")
@pytest.mark.filterwarnings("error")
@pytest.mark.parametrize("ds", (2,), indirect=True)
@pytest.mark.parametrize("name", ("mean", "max"))
def test_raise_no_warning_dask_rolling_assert_close(ds, name):
ds = ds.chunk({"x": 4})
rolling_obj = ds.rolling(time=4, x=3)
actual = getattr(rolling_obj, name)()
expected = getattr(getattr(ds.rolling(time=4), name)().rolling(x=3), name)()
assert_allclose(actual, expected)
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("edge_order", [1, 2])
def test_differentiate(dask, edge_order):
rs = np.random.RandomState(42)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.randn(8, 6))},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
actual = da.differentiate("x", edge_order)
expected_x = xr.DataArray(
np.gradient(da, da["x"], axis=0, edge_order=edge_order),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_x, actual)
assert_equal(
ds["var"].differentiate("x", edge_order=edge_order),
ds.differentiate("x", edge_order=edge_order)["var"],
)
assert_equal(da["x"], actual["x"])
actual = da.differentiate("y", edge_order)
expected_y = xr.DataArray(
np.gradient(da, da["y"], axis=1, edge_order=edge_order),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_y, actual)
assert_equal(actual, ds.differentiate("y", edge_order=edge_order)["var"])
assert_equal(
ds["var"].differentiate("y", edge_order=edge_order),
ds.differentiate("y", edge_order=edge_order)["var"],
)
with pytest.raises(ValueError):
da.differentiate("x2d")
@pytest.mark.parametrize("dask", [True, False])
def test_differentiate_datetime(dask):
rs = np.random.RandomState(42)
coord = np.array(
[
"2004-07-13",
"2006-01-13",
"2010-08-13",
"2010-09-13",
"2010-10-11",
"2010-12-13",
"2011-02-13",
"2012-08-13",
],
dtype="datetime64",
)
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={"x": coord, "z": 3, "x2d": (("x", "y"), rs.randn(8, 6))},
)
if dask and has_dask:
da = da.chunk({"x": 4})
actual = da.differentiate("x", edge_order=1, datetime_unit="D")
expected_x = xr.DataArray(
np.gradient(
da, da["x"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1
),
dims=da.dims,
coords=da.coords,
)
assert_equal(expected_x, actual)
actual2 = da.differentiate("x", edge_order=1, datetime_unit="h")
assert np.allclose(actual, actual2 * 24)
actual = da["x"].differentiate("x", edge_order=1, datetime_unit="D")
assert np.allclose(actual, 1.0)
da = xr.DataArray(coord.astype("datetime64[ms]"), dims=["x"], coords={"x": coord})
actual = da.differentiate("x", edge_order=1)
assert np.allclose(actual, 1.0)
@pytest.mark.skipif(not has_cftime, reason="Test requires cftime.")
@pytest.mark.parametrize("dask", [True, False])
def test_differentiate_cftime(dask):
rs = np.random.RandomState(42)
coord = xr.cftime_range("2000", periods=8, freq="2M")
da = xr.DataArray(
rs.randn(8, 6),
coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.randn(8, 6))},
dims=["time", "y"],
)
if dask and has_dask:
da = da.chunk({"time": 4})
actual = da.differentiate("time", edge_order=1, datetime_unit="D")
expected_data = np.gradient(
da, da["time"].variable._to_numeric(datetime_unit="D"), axis=0, edge_order=1
)
expected = xr.DataArray(expected_data, coords=da.coords, dims=da.dims)
assert_equal(expected, actual)
actual2 = da.differentiate("time", edge_order=1, datetime_unit="h")
assert_allclose(actual, actual2 * 24)
actual = da["time"].differentiate("time", edge_order=1, datetime_unit="D")
assert_allclose(actual, xr.ones_like(da["time"]).astype(float))
@pytest.mark.parametrize("dask", [True, False])
def test_integrate(dask):
rs = np.random.RandomState(42)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={
"x": coord,
"x2": (("x",), rs.randn(8)),
"z": 3,
"x2d": (("x", "y"), rs.randn(8, 6)),
},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
actual = da.integrate("x")
expected_x = xr.DataArray(
np.trapz(da.compute(), da["x"], axis=0),
dims=["y"],
coords={k: v for k, v in da.coords.items() if "x" not in v.dims},
)
assert_allclose(expected_x, actual.compute())
assert_equal(ds["var"].integrate("x"), ds.integrate("x")["var"])
assert isinstance(actual.data, type(da.data))
actual = da.integrate("y")
expected_y = xr.DataArray(
np.trapz(da, da["y"], axis=1),
dims=["x"],
coords={k: v for k, v in da.coords.items() if "y" not in v.dims},
)
assert_allclose(expected_y, actual.compute())
assert_equal(actual, ds.integrate("y")["var"])
assert_equal(ds["var"].integrate("y"), ds.integrate("y")["var"])
actual = da.integrate(("y", "x"))
assert actual.ndim == 0
with pytest.raises(ValueError):
da.integrate("x2d")
with pytest.warns(FutureWarning):
da.integrate(dim="x")
@requires_scipy
@pytest.mark.parametrize("dask", [True, False])
def test_cumulative_integrate(dask):
rs = np.random.RandomState(43)
coord = [0.2, 0.35, 0.4, 0.6, 0.7, 0.75, 0.76, 0.8]
da = xr.DataArray(
rs.randn(8, 6),
dims=["x", "y"],
coords={
"x": coord,
"x2": (("x",), rs.randn(8)),
"z": 3,
"x2d": (("x", "y"), rs.randn(8, 6)),
},
)
if dask and has_dask:
da = da.chunk({"x": 4})
ds = xr.Dataset({"var": da})
actual = da.cumulative_integrate("x")
from scipy.integrate import cumtrapz
expected_x = xr.DataArray(
cumtrapz(da.compute(), da["x"], axis=0, initial=0.0),
dims=["x", "y"],
coords=da.coords,
)
assert_allclose(expected_x, actual.compute())
assert_equal(
ds["var"].cumulative_integrate("x"),
ds.cumulative_integrate("x")["var"],
)
assert isinstance(actual.data, type(da.data))
actual = da.cumulative_integrate("y")
expected_y = xr.DataArray(
cumtrapz(da, da["y"], axis=1, initial=0.0),
dims=["x", "y"],
coords=da.coords,
)
assert_allclose(expected_y, actual.compute())
assert_equal(actual, ds.cumulative_integrate("y")["var"])
assert_equal(
ds["var"].cumulative_integrate("y"),
ds.cumulative_integrate("y")["var"],
)
actual = da.cumulative_integrate(("y", "x"))
assert actual.ndim == 2
with pytest.raises(ValueError):
da.cumulative_integrate("x2d")
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("which_datetime", ["np", "cftime"])
def test_trapz_datetime(dask, which_datetime):
rs = np.random.RandomState(42)
if which_datetime == "np":
coord = np.array(
[
"2004-07-13",
"2006-01-13",
"2010-08-13",
"2010-09-13",
"2010-10-11",
"2010-12-13",
"2011-02-13",
"2012-08-13",
],
dtype="datetime64",
)
else:
if not has_cftime:
pytest.skip("Test requires cftime.")
coord = xr.cftime_range("2000", periods=8, freq="2D")
da = xr.DataArray(
rs.randn(8, 6),
coords={"time": coord, "z": 3, "t2d": (("time", "y"), rs.randn(8, 6))},
dims=["time", "y"],
)
if dask and has_dask:
da = da.chunk({"time": 4})
actual = da.integrate("time", datetime_unit="D")
expected_data = np.trapz(
da.data,
duck_array_ops.datetime_to_numeric(da["time"].data, datetime_unit="D"),
axis=0,
)
expected = xr.DataArray(
expected_data,
dims=["y"],
coords={k: v for k, v in da.coords.items() if "time" not in v.dims},
)
assert_allclose(expected, actual.compute())
assert isinstance(actual.data, type(da.data))
actual2 = da.integrate("time", datetime_unit="h")
assert_allclose(actual, actual2 / 24.0)
def test_no_dict():
d = Dataset()
with pytest.raises(AttributeError):
d.__dict__
def test_subclass_slots():
with pytest.raises(AttributeError) as e:
class MyDS(Dataset):
pass
assert str(e.value) == "MyDS must explicitly define __slots__"
def test_weakref():
from weakref import ref
ds = Dataset()
r = ref(ds)
assert r() is ds
def test_deepcopy_obj_array():
x0 = Dataset(dict(foo=DataArray(np.array([object()]))))
x1 = deepcopy(x0)
assert x0["foo"].values[0] is not x1["foo"].values[0]
def test_clip(ds):
result = ds.clip(min=0.5)
assert result.min(...) >= 0.5
result = ds.clip(max=0.5)
assert result.max(...) <= 0.5
result = ds.clip(min=0.25, max=0.75)
assert result.min(...) >= 0.25
assert result.max(...) <= 0.75
result = ds.clip(min=ds.mean("y"), max=ds.mean("y"))
assert result.dims == ds.dims
| true | true |
1c32367f3d8a1f38a3cb8556d0680446dbad020c | 6,747 | py | Python | numpy/_pytesttester.py | lgeiger/numpy | be8ab91f789c3b688d707940016b4c2d262913e9 | [
"BSD-3-Clause"
] | 1 | 2020-07-01T03:50:43.000Z | 2020-07-01T03:50:43.000Z | numpy/_pytesttester.py | lgeiger/numpy | be8ab91f789c3b688d707940016b4c2d262913e9 | [
"BSD-3-Clause"
] | 24 | 2021-05-03T11:31:55.000Z | 2021-08-02T11:23:24.000Z | numpy/_pytesttester.py | lgeiger/numpy | be8ab91f789c3b688d707940016b4c2d262913e9 | [
"BSD-3-Clause"
] | 2 | 2021-08-16T05:10:04.000Z | 2022-01-15T09:10:09.000Z | """
Pytest test running.
This module implements the ``test()`` function for NumPy modules. The usual
boiler plate for doing that is to put the following in the module
``__init__.py`` file::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
Warnings filtering and other runtime settings should be dealt with in the
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
whether or not that file is found as follows:
* ``pytest.ini`` is present (develop mode)
All warnings except those explicitly filtered out are raised as error.
* ``pytest.ini`` is absent (release mode)
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
warnings are passed through.
In practice, tests run from the numpy repo are run in develop mode. That
includes the standard ``python runtests.py`` invocation.
This module is imported by every numpy subpackage, so lies at the top level to
simplify circular import issues. For the same reason, it contains no numpy
imports at module scope, instead importing numpy within function calls.
"""
import sys
import os
__all__ = ['PytestTester']
def _show_numpy_info():
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
import numpy as np
print("NumPy version %s" % np.__version__)
relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
enabled_features = "nothing enabled"
else:
enabled_features = ' '.join(__cpu_baseline__)
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
enabled_features += " %s*" % feature
else:
enabled_features += " %s?" % feature
print("NumPy CPU features:", enabled_features)
class PytestTester:
"""
Pytest test runner.
A test function is typically added to a package's __init__.py like so::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
Calling this test function finds and runs all tests associated with the
module and all its sub-modules.
Attributes
----------
module_name : str
Full path to the package to test.
Parameters
----------
module_name : module name
The name of the module to test.
Notes
-----
Unlike the previous ``nose``-based implementation, this class is not
publicly exposed as it performs some ``numpy``-specific warning
suppression.
"""
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
"""
Run tests for module using pytest.
Parameters
----------
label : {'fast', 'full'}, optional
Identifies the tests to run. When set to 'fast', tests decorated
with `pytest.mark.slow` are skipped, when 'full', the slow marker
is ignored.
verbose : int, optional
Verbosity value for test outputs, in the range 1-3. Default is 1.
extra_argv : list, optional
List with any extra arguments to pass to pytests.
doctests : bool, optional
.. note:: Not supported
coverage : bool, optional
If True, report coverage of NumPy code. Default is False.
Requires installation of (pip) pytest-cov.
durations : int, optional
If < 0, do nothing, If 0, report time of all tests, if > 0,
report the time of the slowest `timer` tests. Default is -1.
tests : test or list of tests
Tests to be executed with pytest '--pyargs'
Returns
-------
result : bool
Return True on success, false otherwise.
Notes
-----
Each NumPy module exposes `test` in its namespace to run all tests for
it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
...
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
>>> result
True
"""
import pytest
import warnings
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
# setup the pytest arguments
pytest_args = ["-l"]
# offset verbosity. The "-q" cancels a "-v".
pytest_args += ["-q"]
# Filter out distutils cpu warnings (could be localized to
# distutils tests). ASV has problems with top level import,
# so fetch module for suppression here.
with warnings.catch_warnings():
warnings.simplefilter("always")
from numpy.distutils import cpuinfo
# Filter out annoying import messages. Want these in both develop and
# release mode.
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed",
"-W ignore::UserWarning:cpuinfo",
]
# When testing matrices, ignore their PendingDeprecationWarnings
pytest_args += [
"-W ignore:the matrix subclass is not",
"-W ignore:Importing from numpy.matlib is",
]
if doctests:
raise ValueError("Doctests not supported")
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v"*(verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
# not importing at the top level to avoid circular import of module
from numpy.testing import IS_PYPY
if IS_PYPY:
pytest_args += ["-m", "not slow and not slow_pypy"]
else:
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += ["--durations=%s" % durations]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
# run tests.
_show_numpy_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0
| 31.381395 | 79 | 0.610345 | import sys
import os
__all__ = ['PytestTester']
def _show_numpy_info():
from numpy.core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
import numpy as np
print("NumPy version %s" % np.__version__)
relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous
print("NumPy relaxed strides checking option:", relaxed_strides)
if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
enabled_features = "nothing enabled"
else:
enabled_features = ' '.join(__cpu_baseline__)
for feature in __cpu_dispatch__:
if __cpu_features__[feature]:
enabled_features += " %s*" % feature
else:
enabled_features += " %s?" % feature
print("NumPy CPU features:", enabled_features)
class PytestTester:
def __init__(self, module_name):
self.module_name = module_name
def __call__(self, label='fast', verbose=1, extra_argv=None,
doctests=False, coverage=False, durations=-1, tests=None):
import pytest
import warnings
module = sys.modules[self.module_name]
module_path = os.path.abspath(module.__path__[0])
pytest_args = ["-l"]
pytest_args += ["-q"]
with warnings.catch_warnings():
warnings.simplefilter("always")
from numpy.distutils import cpuinfo
pytest_args += [
"-W ignore:Not importing directory",
"-W ignore:numpy.dtype size changed",
"-W ignore:numpy.ufunc size changed",
"-W ignore::UserWarning:cpuinfo",
]
pytest_args += [
"-W ignore:the matrix subclass is not",
"-W ignore:Importing from numpy.matlib is",
]
if doctests:
raise ValueError("Doctests not supported")
if extra_argv:
pytest_args += list(extra_argv)
if verbose > 1:
pytest_args += ["-" + "v"*(verbose - 1)]
if coverage:
pytest_args += ["--cov=" + module_path]
if label == "fast":
from numpy.testing import IS_PYPY
if IS_PYPY:
pytest_args += ["-m", "not slow and not slow_pypy"]
else:
pytest_args += ["-m", "not slow"]
elif label != "full":
pytest_args += ["-m", label]
if durations >= 0:
pytest_args += ["--durations=%s" % durations]
if tests is None:
tests = [self.module_name]
pytest_args += ["--pyargs"] + list(tests)
_show_numpy_info()
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return code == 0
| true | true |
1c32368041c5844409ff9001a9d04b7307f13cd4 | 2,791 | py | Python | firebase_admin/firestore.py | AFFOA/firebase-admin-python | bf783c313348f4c48f2b8c9bd5df52d617051989 | [
"Apache-2.0"
] | 4 | 2018-02-25T22:18:33.000Z | 2021-02-03T05:00:43.000Z | firebase_admin/firestore.py | AFFOA/firebase-admin-python | bf783c313348f4c48f2b8c9bd5df52d617051989 | [
"Apache-2.0"
] | 1 | 2018-08-15T01:05:59.000Z | 2018-08-15T01:05:59.000Z | firebase_admin/firestore.py | AFFOA/firebase-admin-python | bf783c313348f4c48f2b8c9bd5df52d617051989 | [
"Apache-2.0"
] | 1 | 2019-04-09T06:18:41.000Z | 2019-04-09T06:18:41.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Firestore module.
This module contains utilities for accessing the Google Cloud Firestore databases associated with
Firebase apps. This requires the ``google-cloud-firestore`` Python module.
"""
try:
from google.cloud import firestore # pylint: disable=import-error,no-name-in-module
existing = globals().keys()
for key, value in firestore.__dict__.items():
if not key.startswith('_') and key not in existing:
globals()[key] = value
except ImportError:
raise ImportError('Failed to import the Cloud Firestore library for Python. Make sure '
'to install the "google-cloud-firestore" module.')
from firebase_admin import _utils
_FIRESTORE_ATTRIBUTE = '_firestore'
def client(app=None):
"""Returns a client that can be used to interact with Google Cloud Firestore.
Args:
app: An App instance (optional).
Returns:
google.cloud.firestore.Firestore: A `Firestore Client`_.
Raises:
ValueError: If a project ID is not specified either via options, credentials or
environment variables, or if the specified project ID is not a valid string.
.. _Firestore Client: https://googlecloudplatform.github.io/google-cloud-python/latest\
/firestore/client.html
"""
fs_client = _utils.get_app_service(app, _FIRESTORE_ATTRIBUTE, _FirestoreClient.from_app)
return fs_client.get()
class _FirestoreClient(object):
"""Holds a Google Cloud Firestore client instance."""
def __init__(self, credentials, project):
self._client = firestore.Client(credentials=credentials, project=project)
def get(self):
return self._client
@classmethod
def from_app(cls, app):
"""Creates a new _FirestoreClient for the specified app."""
credentials = app.credential.get_credential()
project = app.project_id
if not project:
raise ValueError(
'Project ID is required to access Firestore. Either set the projectId option, '
'or use service account credentials. Alternatively, set the GCLOUD_PROJECT '
'environment variable.')
return _FirestoreClient(credentials, project)
| 36.246753 | 97 | 0.707273 |
try:
from google.cloud import firestore
existing = globals().keys()
for key, value in firestore.__dict__.items():
if not key.startswith('_') and key not in existing:
globals()[key] = value
except ImportError:
raise ImportError('Failed to import the Cloud Firestore library for Python. Make sure '
'to install the "google-cloud-firestore" module.')
from firebase_admin import _utils
_FIRESTORE_ATTRIBUTE = '_firestore'
def client(app=None):
fs_client = _utils.get_app_service(app, _FIRESTORE_ATTRIBUTE, _FirestoreClient.from_app)
return fs_client.get()
class _FirestoreClient(object):
def __init__(self, credentials, project):
self._client = firestore.Client(credentials=credentials, project=project)
def get(self):
return self._client
@classmethod
def from_app(cls, app):
credentials = app.credential.get_credential()
project = app.project_id
if not project:
raise ValueError(
'Project ID is required to access Firestore. Either set the projectId option, '
'or use service account credentials. Alternatively, set the GCLOUD_PROJECT '
'environment variable.')
return _FirestoreClient(credentials, project)
| true | true |
1c32372afbcc3b4faf98b67746dbab2cc0047a83 | 11,057 | py | Python | evolution/evaluator.py | vfcosta/coegan-trained | 44174e68909d9c03bf2e4b7e4c7a48237a560183 | [
"MIT"
] | null | null | null | evolution/evaluator.py | vfcosta/coegan-trained | 44174e68909d9c03bf2e4b7e4c7a48237a560183 | [
"MIT"
] | null | null | null | evolution/evaluator.py | vfcosta/coegan-trained | 44174e68909d9c03bf2e4b7e4c7a48237a560183 | [
"MIT"
] | 1 | 2021-06-11T16:52:55.000Z | 2021-06-11T16:52:55.000Z | import util.tools as tools
from util import config
import torch
import logging
import numpy as np
from evolution.population import Population
logger = logging.getLogger(__name__)
class Evaluator:
def __init__(self, train_loader, validation_loader):
self.train_loader = train_loader
self.validation_loader = validation_loader
self.best_discriminators = []
self.best_generators = []
self.initial = True
self.batches = []
self.eval_batches = []
def init_generation(self, generation):
self.batches = []
self.eval_batches = []
def train_evaluate(self, G, D, batches_limit):
logger.debug(f"train: G({G.genome.gan_type}) x D({D.genome.gan_type}), batches: {batches_limit}")
if config.evolution.evaluation.reset_optimizer:
D.reset_optimizer_state()
G.reset_optimizer_state()
if G.invalid or D.invalid: # do not evaluate if G or D are invalid
logger.warning("invalid D or G")
return
torch.cuda.empty_cache()
n = 0
G, D = tools.cuda(G), tools.cuda(D) # load everything on gpu (cuda)
G.train()
D.train()
G.win_rate, D.win_rate = 0, 0
while n < batches_limit:
image_loader = self.batches if config.evolution.evaluation.same_batches and self.batches else self.train_loader
for images, _ in image_loader:
if config.evolution.evaluation.same_batches and image_loader != self.batches:
self.batches.append((images, _))
n += 1
images = tools.cuda(images)
if n % config.gan.generator_iterations == 0:
D.do_train(G, images)
if n % config.gan.critic_iterations == 0:
G.do_train(D, images)
if n >= config.gan.batches_limit:
break
D.win_rate /= n
G.win_rate = 1 - D.win_rate
D.calc_skill_rating(G)
G.calc_skill_rating(D)
# print("train GLICKO G:", G.skill_rating, G.win_rate, ", D:", D.skill_rating, D.win_rate)
G.cpu(), D.cpu() # move variables back from gpu to cpu
torch.cuda.empty_cache()
def evaluate_population(self, generators, discriminators, batches_limit=None, evaluation_type=None, calc_fid=True):
"""Evaluate the population using all-vs-all pairing strategy"""
batches_limit = batches_limit or config.gan.batches_limit
evaluation_type = evaluation_type or config.evolution.evaluation.type
for i in range(config.evolution.evaluation.iterations):
if evaluation_type == "random":
for D in discriminators:
for g in np.random.choice(generators, 2, replace=False):
self.train_evaluate(g, D, batches_limit)
for G in generators:
for d in np.random.choice(discriminators, 2, replace=False):
self.train_evaluate(G, d, batches_limit)
elif evaluation_type == "spatial":
rows = 3
cols = len(discriminators)//rows
pairs = []
for center in range(len(discriminators)):
pairs.append([(center, n) for n in tools.get_neighbors(center, rows, cols)])
# reorder pairs to avoid sequential training
pairs = np.transpose(np.array(pairs), (1, 0, 2)).reshape(-1, 2)
for g, d in pairs:
self.train_evaluate(generators[g], discriminators[d], batches_limit)
elif evaluation_type == "spatial2":
rows = 3
cols = len(discriminators)//rows
for center in range(len(discriminators)):
neighbors = tools.get_neighbors(center, rows, cols)
norm = len(neighbors)
for n in neighbors:
self.train_evaluate(generators[center], discriminators[n].clone(), batches_limit)
self.train_evaluate(generators[n].clone(), discriminators[center], batches_limit)
elif evaluation_type == "all-vs-all" and config.evolution.evaluation.clone_adversarial:
# train all-vs-all in a non-sequential order
pairs = tools.permutations(generators, discriminators)
original_generators = [g.clone() for g in generators]
original_discriminators = [d.clone() for d in discriminators]
for g, d in pairs:
self.train_evaluate(generators[g], original_discriminators[d].clone(), batches_limit)
self.train_evaluate(original_generators[g].clone(), discriminators[d], batches_limit)
elif evaluation_type == "all-vs-all":
# train all-vs-all in a non-sequential order
pairs = tools.permutations(generators, discriminators)
for g, d in pairs:
self.train_evaluate(generators[g], discriminators[d], batches_limit)
elif evaluation_type in ["all-vs-best", "all-vs-species-best", "all-vs-kbest", "all-vs-kbest-previous"]:
if config.evolution.evaluation.initialize_all and self.initial:
self.initial = False
# as there are no way to determine the best G and D, we rely on all-vs-all for the first evaluation
return self.evaluate_population(generators, discriminators, batches_limit,
evaluation_type="all-vs-all")
pairs = tools.permutations(discriminators, self.best_generators)
for d, g in pairs:
adversarial = self.best_generators[g]
if config.evolution.evaluation.clone_adversarial:
adversarial = adversarial.clone()
self.train_evaluate(adversarial, discriminators[d], batches_limit)
pairs = tools.permutations(generators, self.best_discriminators)
for g, d in pairs:
adversarial = self.best_discriminators[d]
if config.evolution.evaluation.clone_adversarial:
adversarial = adversarial.clone()
self.train_evaluate(generators[g], adversarial, batches_limit)
# reset FID
for G in generators:
G.fid_score = None
images, n = None, 0
for batch, _ in self.validation_loader:
if images is None:
images = batch
else:
images = torch.cat((images, batch))
n += 1
if n >= config.evolution.fitness.evaluation_batches:
break
images = tools.cuda(images)
if len(generators) > 0:
for p in discriminators:
p = tools.cuda(p)
p.calc_global_metrics(self.best_generators or [Population(generators).best()], images)
p.cpu()
if len(discriminators) > 0:
for p in generators:
p = tools.cuda(p)
p.calc_global_metrics(self.best_discriminators or [Population(discriminators).best()], images)
p.cpu()
# # update the skill rating for the next generation
for p in discriminators + generators + self.best_discriminators + self.best_generators:
p.finish_calc_skill_rating()
for p in discriminators + generators:
p.finish_generation(calc_fid=calc_fid)
def evaluate_all_validation(self, generators, discriminators):
# evaluate in validation
logger.info(f"best G: {len(self.best_generators)}, best D: {len(self.best_discriminators)}")
for D in discriminators:
for G in self.best_generators + generators:
with torch.no_grad():
self.evaluate_validation(G, D)
for G in generators:
for D in self.best_discriminators:
with torch.no_grad():
self.evaluate_validation(G, D)
# # update the skill rating for the next generation
for p in discriminators + generators + self.best_discriminators + self.best_generators:
p.finish_calc_skill_rating()
def update_bests(self, generators_population, discriminators_population):
# store best of generation in coevolution memory
self.best_discriminators = self.get_bests(discriminators_population, self.best_discriminators)
self.best_generators = self.get_bests(generators_population, self.best_generators)
def evaluate_validation(self, G, D, eval_generator=True, eval_discriminator=True):
if G.invalid or D.invalid: # do not evaluate if G or D are invalid
logger.warning("invalid D or G")
return
torch.cuda.empty_cache()
G, D = tools.cuda(G), tools.cuda(D)
G.eval(), D.eval()
G.win_rate, D.win_rate = 0, 0
n = 0
while n < config.evolution.fitness.evaluation_batches:
image_loader = self.eval_batches if config.evolution.evaluation.same_batches and self.eval_batches else self.validation_loader
for images, _ in image_loader:
if config.evolution.evaluation.same_batches and image_loader != self.eval_batches:
self.eval_batches.append((images, _))
n += 1
images = tools.cuda(images)
if eval_discriminator:
D.do_eval(G, images) # FIXME always eval D when skill rating is enabled
if eval_generator:
G.do_eval(D, images)
G.win_rate = 1 - D.win_rate
if n >= config.evolution.fitness.evaluation_batches:
break
D.win_rate /= n
G.win_rate = 1 - D.win_rate
if eval_discriminator:
D.calc_skill_rating(G)
if eval_generator:
G.calc_skill_rating(D)
logger.debug(f"eval GLICKO G: {G.skill_rating} {G.win_rate}, D: {D.skill_rating} {D.win_rate}")
G, D = G.cpu(), D.cpu() # move variables back from gpu to cpu
torch.cuda.empty_cache()
def get_bests(self, population, previous_best=[]):
if config.evolution.evaluation.type == "all-vs-species-best":
return [species.best() for species in population.species_list]
elif config.evolution.evaluation.type == "all-vs-best":
return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]
elif config.evolution.evaluation.type == "all-vs-kbest":
return population.bests(config.evolution.evaluation.best_size)
elif config.evolution.evaluation.type == "all-vs-kbest-previous":
return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]
return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]
| 49.142222 | 138 | 0.600344 | import util.tools as tools
from util import config
import torch
import logging
import numpy as np
from evolution.population import Population
logger = logging.getLogger(__name__)
class Evaluator:
def __init__(self, train_loader, validation_loader):
self.train_loader = train_loader
self.validation_loader = validation_loader
self.best_discriminators = []
self.best_generators = []
self.initial = True
self.batches = []
self.eval_batches = []
def init_generation(self, generation):
self.batches = []
self.eval_batches = []
def train_evaluate(self, G, D, batches_limit):
logger.debug(f"train: G({G.genome.gan_type}) x D({D.genome.gan_type}), batches: {batches_limit}")
if config.evolution.evaluation.reset_optimizer:
D.reset_optimizer_state()
G.reset_optimizer_state()
if G.invalid or D.invalid:
logger.warning("invalid D or G")
return
torch.cuda.empty_cache()
n = 0
G, D = tools.cuda(G), tools.cuda(D)
G.train()
D.train()
G.win_rate, D.win_rate = 0, 0
while n < batches_limit:
image_loader = self.batches if config.evolution.evaluation.same_batches and self.batches else self.train_loader
for images, _ in image_loader:
if config.evolution.evaluation.same_batches and image_loader != self.batches:
self.batches.append((images, _))
n += 1
images = tools.cuda(images)
if n % config.gan.generator_iterations == 0:
D.do_train(G, images)
if n % config.gan.critic_iterations == 0:
G.do_train(D, images)
if n >= config.gan.batches_limit:
break
D.win_rate /= n
G.win_rate = 1 - D.win_rate
D.calc_skill_rating(G)
G.calc_skill_rating(D)
G.cpu(), D.cpu()
torch.cuda.empty_cache()
def evaluate_population(self, generators, discriminators, batches_limit=None, evaluation_type=None, calc_fid=True):
batches_limit = batches_limit or config.gan.batches_limit
evaluation_type = evaluation_type or config.evolution.evaluation.type
for i in range(config.evolution.evaluation.iterations):
if evaluation_type == "random":
for D in discriminators:
for g in np.random.choice(generators, 2, replace=False):
self.train_evaluate(g, D, batches_limit)
for G in generators:
for d in np.random.choice(discriminators, 2, replace=False):
self.train_evaluate(G, d, batches_limit)
elif evaluation_type == "spatial":
rows = 3
cols = len(discriminators)//rows
pairs = []
for center in range(len(discriminators)):
pairs.append([(center, n) for n in tools.get_neighbors(center, rows, cols)])
pairs = np.transpose(np.array(pairs), (1, 0, 2)).reshape(-1, 2)
for g, d in pairs:
self.train_evaluate(generators[g], discriminators[d], batches_limit)
elif evaluation_type == "spatial2":
rows = 3
cols = len(discriminators)//rows
for center in range(len(discriminators)):
neighbors = tools.get_neighbors(center, rows, cols)
norm = len(neighbors)
for n in neighbors:
self.train_evaluate(generators[center], discriminators[n].clone(), batches_limit)
self.train_evaluate(generators[n].clone(), discriminators[center], batches_limit)
elif evaluation_type == "all-vs-all" and config.evolution.evaluation.clone_adversarial:
pairs = tools.permutations(generators, discriminators)
original_generators = [g.clone() for g in generators]
original_discriminators = [d.clone() for d in discriminators]
for g, d in pairs:
self.train_evaluate(generators[g], original_discriminators[d].clone(), batches_limit)
self.train_evaluate(original_generators[g].clone(), discriminators[d], batches_limit)
elif evaluation_type == "all-vs-all":
pairs = tools.permutations(generators, discriminators)
for g, d in pairs:
self.train_evaluate(generators[g], discriminators[d], batches_limit)
elif evaluation_type in ["all-vs-best", "all-vs-species-best", "all-vs-kbest", "all-vs-kbest-previous"]:
if config.evolution.evaluation.initialize_all and self.initial:
self.initial = False
return self.evaluate_population(generators, discriminators, batches_limit,
evaluation_type="all-vs-all")
pairs = tools.permutations(discriminators, self.best_generators)
for d, g in pairs:
adversarial = self.best_generators[g]
if config.evolution.evaluation.clone_adversarial:
adversarial = adversarial.clone()
self.train_evaluate(adversarial, discriminators[d], batches_limit)
pairs = tools.permutations(generators, self.best_discriminators)
for g, d in pairs:
adversarial = self.best_discriminators[d]
if config.evolution.evaluation.clone_adversarial:
adversarial = adversarial.clone()
self.train_evaluate(generators[g], adversarial, batches_limit)
for G in generators:
G.fid_score = None
images, n = None, 0
for batch, _ in self.validation_loader:
if images is None:
images = batch
else:
images = torch.cat((images, batch))
n += 1
if n >= config.evolution.fitness.evaluation_batches:
break
images = tools.cuda(images)
if len(generators) > 0:
for p in discriminators:
p = tools.cuda(p)
p.calc_global_metrics(self.best_generators or [Population(generators).best()], images)
p.cpu()
if len(discriminators) > 0:
for p in generators:
p = tools.cuda(p)
p.calc_global_metrics(self.best_discriminators or [Population(discriminators).best()], images)
p.cpu()
elf.best_discriminators + self.best_generators:
p.finish_calc_skill_rating()
for p in discriminators + generators:
p.finish_generation(calc_fid=calc_fid)
def evaluate_all_validation(self, generators, discriminators):
logger.info(f"best G: {len(self.best_generators)}, best D: {len(self.best_discriminators)}")
for D in discriminators:
for G in self.best_generators + generators:
with torch.no_grad():
self.evaluate_validation(G, D)
for G in generators:
for D in self.best_discriminators:
with torch.no_grad():
self.evaluate_validation(G, D)
elf.best_discriminators + self.best_generators:
p.finish_calc_skill_rating()
def update_bests(self, generators_population, discriminators_population):
self.best_discriminators = self.get_bests(discriminators_population, self.best_discriminators)
self.best_generators = self.get_bests(generators_population, self.best_generators)
def evaluate_validation(self, G, D, eval_generator=True, eval_discriminator=True):
if G.invalid or D.invalid:
logger.warning("invalid D or G")
return
torch.cuda.empty_cache()
G, D = tools.cuda(G), tools.cuda(D)
G.eval(), D.eval()
G.win_rate, D.win_rate = 0, 0
n = 0
while n < config.evolution.fitness.evaluation_batches:
image_loader = self.eval_batches if config.evolution.evaluation.same_batches and self.eval_batches else self.validation_loader
for images, _ in image_loader:
if config.evolution.evaluation.same_batches and image_loader != self.eval_batches:
self.eval_batches.append((images, _))
n += 1
images = tools.cuda(images)
if eval_discriminator:
D.do_eval(G, images)
if eval_generator:
G.do_eval(D, images)
G.win_rate = 1 - D.win_rate
if n >= config.evolution.fitness.evaluation_batches:
break
D.win_rate /= n
G.win_rate = 1 - D.win_rate
if eval_discriminator:
D.calc_skill_rating(G)
if eval_generator:
G.calc_skill_rating(D)
logger.debug(f"eval GLICKO G: {G.skill_rating} {G.win_rate}, D: {D.skill_rating} {D.win_rate}")
G, D = G.cpu(), D.cpu()
torch.cuda.empty_cache()
def get_bests(self, population, previous_best=[]):
if config.evolution.evaluation.type == "all-vs-species-best":
return [species.best() for species in population.species_list]
elif config.evolution.evaluation.type == "all-vs-best":
return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]
elif config.evolution.evaluation.type == "all-vs-kbest":
return population.bests(config.evolution.evaluation.best_size)
elif config.evolution.evaluation.type == "all-vs-kbest-previous":
return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]
return (population.bests(1) + previous_best)[:config.evolution.evaluation.best_size]
| true | true |
1c3237e26b246d2c6c4ee775f1496398da1f6b77 | 1,643 | py | Python | ooobuild/lo/ucb/x_content_provider_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/ucb/x_content_provider_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/ucb/x_content_provider_supplier.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.ucb
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .x_content_provider import XContentProvider as XContentProvider_d4150cc0
class XContentProviderSupplier(XInterface_8f010a43):
"""
a supplier for a content provider.
See Also:
`API XContentProviderSupplier <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1ucb_1_1XContentProviderSupplier.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.XContentProviderSupplier'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.ucb.XContentProviderSupplier'
@abstractmethod
def getContentProvider(self) -> 'XContentProvider_d4150cc0':
"""
returns a content provider.
"""
__all__ = ['XContentProviderSupplier']
| 34.957447 | 150 | 0.752891 |
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .x_content_provider import XContentProvider as XContentProvider_d4150cc0
class XContentProviderSupplier(XInterface_8f010a43):
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.XContentProviderSupplier'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.ucb.XContentProviderSupplier'
@abstractmethod
def getContentProvider(self) -> 'XContentProvider_d4150cc0':
__all__ = ['XContentProviderSupplier']
| true | true |
1c323835a274c50d8c2e101b6752aae91a3e1698 | 988 | py | Python | tools/mturk/aws_credentials.py | brendel-group/causal-understanding-via-visualizations | 3cd1a30f4305c48b1a715da5f62a552d803fb933 | [
"MIT"
] | 6 | 2021-06-30T11:10:48.000Z | 2022-01-14T11:36:14.000Z | tools/mturk/aws_credentials.py | brendel-group/causal-understanding-via-visualizations | 3cd1a30f4305c48b1a715da5f62a552d803fb933 | [
"MIT"
] | null | null | null | tools/mturk/aws_credentials.py | brendel-group/causal-understanding-via-visualizations | 3cd1a30f4305c48b1a715da5f62a552d803fb933 | [
"MIT"
] | null | null | null | """Loads the credentials needed to access AWS"""
import os
class AWSCredentials:
"""
Access AWS credentials.
"""
__loaded = False
@staticmethod
def aws_secret_access_key():
AWSCredentials._load_data()
return AWSCredentials.__aws_secret_access_key
@staticmethod
def aws_access_key_id():
AWSCredentials._load_data()
return AWSCredentials.__aws_access_key_id
@staticmethod
def _load_data():
if AWSCredentials.__loaded:
return
aws_key_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "aws.key"
)
assert os.path.exists(aws_key_path), "AWS key file not found."
with open(aws_key_path) as f:
lines = f.readlines()
AWSCredentials.__aws_access_key_id = lines[0].split("=")[1].strip()
AWSCredentials.__aws_secret_access_key = lines[1].split("=")[1].strip()
AWSCredentials.__loaded = True
| 25.333333 | 83 | 0.634615 |
import os
class AWSCredentials:
__loaded = False
@staticmethod
def aws_secret_access_key():
AWSCredentials._load_data()
return AWSCredentials.__aws_secret_access_key
@staticmethod
def aws_access_key_id():
AWSCredentials._load_data()
return AWSCredentials.__aws_access_key_id
@staticmethod
def _load_data():
if AWSCredentials.__loaded:
return
aws_key_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "aws.key"
)
assert os.path.exists(aws_key_path), "AWS key file not found."
with open(aws_key_path) as f:
lines = f.readlines()
AWSCredentials.__aws_access_key_id = lines[0].split("=")[1].strip()
AWSCredentials.__aws_secret_access_key = lines[1].split("=")[1].strip()
AWSCredentials.__loaded = True
| true | true |
1c3238a93189fc6466b61699e891244a53b23a8e | 484 | py | Python | src/database/addingData.py | inpritamkundu/python | 27e404493cd436106cdf493a81e4f074f5d5ab96 | [
"MIT"
] | null | null | null | src/database/addingData.py | inpritamkundu/python | 27e404493cd436106cdf493a81e4f074f5d5ab96 | [
"MIT"
] | null | null | null | src/database/addingData.py | inpritamkundu/python | 27e404493cd436106cdf493a81e4f074f5d5ab96 | [
"MIT"
] | null | null | null | import psycopg2
connection = psycopg2.connect(dbname="postgres", user="postgres",
password="mithukundu60@", host="localhost", port="5432")
print("connection established")
# Creating cursor
cursor = connection.cursor()
# Serial is used for generating sequence automatically
cursor.execute(
'''INSERT INTO student(NAME,ADDRESS,AGE) VALUES('santosh','garwa',23);''')
print("Data inserted")
# Commiting the changes
connection.commit()
connection.close
| 30.25 | 86 | 0.71281 | import psycopg2
connection = psycopg2.connect(dbname="postgres", user="postgres",
password="mithukundu60@", host="localhost", port="5432")
print("connection established")
cursor = connection.cursor()
cursor.execute(
'''INSERT INTO student(NAME,ADDRESS,AGE) VALUES('santosh','garwa',23);''')
print("Data inserted")
connection.commit()
connection.close
| true | true |
1c3238feee54598fe31e2521fe3f2e69ed774933 | 11,664 | py | Python | HadoopConnect/bin/clusters_handler.py | mshensg/hadoop-connect-for-splunk | 14afd1eb83753d6e1f95dcae49a27e3002070bbe | [
"Apache-2.0"
] | 1 | 2021-11-09T07:33:42.000Z | 2021-11-09T07:33:42.000Z | HadoopConnect/bin/clusters_handler.py | mshensg/hadoop-connect-for-splunk | 14afd1eb83753d6e1f95dcae49a27e3002070bbe | [
"Apache-2.0"
] | 2 | 2021-09-02T23:48:40.000Z | 2021-11-09T07:35:27.000Z | HadoopConnect/bin/clusters_handler.py | mshensg/hadoop-connect-for-splunk | 14afd1eb83753d6e1f95dcae49a27e3002070bbe | [
"Apache-2.0"
] | 2 | 2021-03-15T20:34:58.000Z | 2022-03-27T09:49:26.000Z | import os
import splunk.admin as admin
from clusters import *
from delegating_handler import DelegatingRestHandler
import constants
from errors import *
import util
required_args = ['uri'] # required on create
optional_args = ['namenode_http_port', 'hadoop_home', 'java_home', 'kerberos_principal', 'kerberos_service_principal', 'ha', 'hdfs_site', 'auth_to_local']
ENDPOINT = 'configs/conf-clusters'
class ClustersHandler(DelegatingRestHandler):
def setup(self):
self.appName = constants.APP_NAME
self.userName = 'nobody'
if self.requestedAction == admin.ACTION_LIST:
self.supportedArgs.addOptArg('add_versions')
elif self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:
req_args = required_args
opt_args = optional_args
for arg in opt_args:
self.supportedArgs.addOptArg(arg)
for arg in req_args:
if self.requestedAction == admin.ACTION_EDIT:
self.supportedArgs.addOptArg(arg)
else:
self.supportedArgs.addReqArg(arg)
def _isLocalURI(self, uri):
return uri.startswith('file://')
def handleList(self, confInfo):
self.delegate(ENDPOINT, confInfo, method='GET')
# add some runtime information to the items
for name, obj in confInfo.items():
if self._isLocalURI(obj.get('uri', '')):
continue
cluster = Cluster(name, obj)
obj['hdfs_site'] = cluster.getHdfsSite()
obj['cluster_dir'] = cluster.getClusterDir()
obj['cluster_cli'] = cluster.hadoop_cli
obj['uri'] = cluster.getURI().rstrip('/')
obj['authentication_mode'] = 'simple' if obj.get('kerberos_principal', '') == '' else 'kerberos'
obj['authorization_mode'] = '0' if obj.get('kerberos_principal', '') == '' else '1'
if self.callerArgs.get('add_versions', [''])[0] == '1':
local = 'unknown'
remote = 'unknown'
try:
local = cluster.getLocalHadoopVersion()
except: pass
try:
remote = cluster.getRemoteHadoopVersion()
except: pass
obj['local_hadoop_version'] = local
obj['remote_hadoop_version'] = remote
def handleCreate(self, confInfo):
if self.getEntities('admin/conf-clusters', 'name='+self.callerArgs.id):
raise HcException(HCERR2007, {'stanza':str(self.callerArgs.id)})
conf = self.getEntity('admin/conf-clusters', '_new')
if self._isLocalURI(self.callerArgs.get('uri', [''])[0]):
return self.handleLocalCreateOrEdit(confInfo, conf)
cluster = self.validateHdfsClusterArgs(conf)
self.callerArgs.id = cluster.name
try:
self.handleHdfsCreateOrEdit(cluster, confInfo)
except Exception as e:
logger.exception("Failed to handleCreate:")
# remove created dir and xml file
cluster.remove()
# delete new stanza if exists
if self.getEntities('admin/conf-clusters', self.callerArgs.id):
self.deleteEntity('admin/conf-clusters', self.callerArgs.id)
raise e
def handleEdit(self, confInfo):
conf = self.getEntity('admin/conf-clusters', self.callerArgs.id)
uri = util.getProperty(self.callerArgs, 'uri', conf)
if self._isLocalURI(uri):
return self.handleLocalCreateOrEdit(confInfo, conf)
cluster = self.validateHdfsClusterArgs(conf)
hdfs_site_old = cluster.getHdfsSite()
try:
self.handleHdfsCreateOrEdit(cluster, confInfo)
except Exception as e:
logger.exception("Failed to handleEdit")
# rollback to previous hdfs-site.xml file if exists
if not hdfs_site_old:
cluster.props['hdfs_site'] = hdfs_site_old
# rollback to previous core-site.xml file
cluster.props['kerberos_service_principal'] = conf['kerberos_service_principal'] if 'kerberos_service_principal' in conf else None
cluster.props['kerberos_principal'] = conf['kerberos_principal'] if 'kerberos_principal' in conf else None
if cluster.props['kerberos_service_principal'] != None and cluster.props['kerberos_service_principal'].strip() != '':
cluster.props['authentication_mode'] = 'kerberos'
cluster.props['authorization_mode'] = '1'
else:
cluster.props['authentication_mode'] = 'simple'
cluster.props['authorization_mode'] = '0'
cluster.saveXml()
# rollback to previous stanza
for k,v in self.callerArgs.items():
self.callerArgs[k] = conf[k]
self.delegate(ENDPOINT, confInfo)
raise e
def _ensureRequiredCreateArgs(self, args):
if self.requestedAction == admin.ACTION_CREATE:
for arg in args:
if arg not in self.callerArgs or self.callerArgs.get(arg) == None or len(self.callerArgs.get(arg)) == 0 or self.callerArgs.get(arg)[0].strip() == '':
raise HcException(HCERR0501, {'argument': arg})
def validateLocalClusterArgs(self, conf):
import util
uri = util.getProperty(self.callerArgs, 'uri', conf)
if not self._isLocalURI(uri):
raise HcException(HCERR0503, {'name':'uri', 'value':uri, 'accepted_values':'file://<path>'})
import os.path
if not os.path.isdir(uri[7:]):
raise HcException(HCERR0502, {'name':'uri', 'value':uri, 'error':'path does not exist'})
import splunk.entity as en
clusters = en.getEntities('admin/conf-clusters', search='uri=file://*', namespace=self.appName, owner=self.userName, sessionKey=self.getSessionKey());
for name, obj in clusters.items():
if name != self.callerArgs.id and obj['uri'].rstrip('/') == uri.rstrip('/'):
raise HcException(HCERR1515, {'path':uri, 'cluster':name})
def validateHdfsClusterArgs(self, conf):
ha_nameservice = None
namenode_http_port = None
active_namenode = None
hdfs_site = None
ha = util.getProperty(self.callerArgs, 'ha', conf, '')
import splunk.util
if len(ha) > 0 and splunk.util.normalizeBoolean(ha):
if self.requestedAction == admin.ACTION_EDIT and 'hdfs_site' not in self.callerArgs:
cluster = Cluster(self.callerArgs.id, None)
self.callerArgs['hdfs_site'] = cluster.getHdfsSite()
if 'hdfs_site' not in self.callerArgs or not self.callerArgs['hdfs_site'][0] or self.callerArgs['hdfs_site'][0].strip()=='':
raise HcException(HCERR0501, {'argument': 'hdfs_site'})
hdfs_site = util.getProperty(self.callerArgs, 'hdfs_site', conf)
if not hdfs_site.strip().startswith('<configuration>'):
hdfs_site = '<configuration>'+hdfs_site.strip()
if not hdfs_site.strip().endswith('</configuration>'):
hdfs_site = hdfs_site.strip()+'</configuration>'
try:
ha_nameservice, active_namenode, namenode_http_port = parseHdfsSiteXml(hdfs_site)
except HcException:
logger.exception('Failed to parse hdfs_site')
raise
except Exception:
logger.exception('Failed to parse hdfs_site')
raise HcException(HCERR2009, {'error':'please make sure xml is normalized'})
if ha_nameservice != self.callerArgs.id:
raise HcException(HCERR0502, {'name': 'id', 'value': self.callerArgs.id, 'error': 'clusters stanza name must be same as HA nameservice id'})
else:
self._ensureRequiredCreateArgs(['namenode_http_port', 'hadoop_home', 'java_home'])
namenode_http_port = int(util.getProperty(self.callerArgs, 'namenode_http_port', conf))
hadoop_home = util.getProperty(self.callerArgs, 'hadoop_home', conf)
java_home = util.getProperty(self.callerArgs, 'java_home', conf)
authentication_mode = 'simple'
authorization_mode = '0'
principal = util.getProperty(self.callerArgs, 'kerberos_principal', conf)
kerberos_service_principal = util.getProperty(self.callerArgs, 'kerberos_service_principal', conf)
if kerberos_service_principal != None and kerberos_service_principal.strip() != '':
authentication_mode = 'kerberos'
authorization_mode = '1'
auth_to_local = util.getProperty(self.callerArgs, 'auth_to_local', conf, '')
props = {'namenode_http_port':namenode_http_port,
'hadoop_home': hadoop_home,
'java_home': java_home,
'authentication_mode':authentication_mode,
'authorization_mode':authorization_mode,
'principal':principal,
'kerberos_service_principal':kerberos_service_principal}
if active_namenode != None:
props['active_namenode'] = active_namenode
if hdfs_site != None:
props['hdfs_site'] = hdfs_site
if auth_to_local != '':
props['auth_to_local'] = auth_to_local
cluster = Cluster(self.callerArgs.id, props)
return cluster
def handleLocalCreateOrEdit(self, confInfo, conf):
self.validateLocalClusterArgs(conf)
self.delegate(ENDPOINT, confInfo)
def handleHdfsCreateOrEdit(self, cluster, confInfo):
# 1) create local/clusters/<host_port> directory if not exists 2) verify hadoop version 3) create/update core-site.xml
cluster.save()
# remove fields we don't want to save in the conf file
fields = ['name', 'uri', 'namenode_http_port', 'kerberos_principal', 'kerberos_service_principal', 'hadoop_home', 'java_home', 'ha', 'auth_to_local']
for k in self.callerArgs.keys():
if not k in fields:
del self.callerArgs[k]
# create/edit conf stanza
self.delegate(ENDPOINT, confInfo)
principal = cluster.props['principal'] if cluster.props['authentication_mode'] == 'kerberos' else None
import hadooputils as hu
# verify kerberos_principal, keytab and kerberos_service_principal and ls works
hu.validateConnectionToHadoop(self.getSessionKey(), principal, 'hdfs://'+self.callerArgs.id+'/')
def handleRemove(self, confInfo):
# delegate remove to /servicesNS/<user>/<app>/admin/conf-clusters
conf = self.getEntity('admin/conf-clusters', self.callerArgs.id)
self.delegate(ENDPOINT, confInfo, method='DELETE')
# fix for SPL-61583
uri = None
if 'uri' in conf:
uri = conf.get('uri')
if(uri == None or uri.strip() == ''):
uri = 'hdfs://'+self.callerArgs.id;
if not self._isLocalURI(uri): #remote cluster
cluster = Cluster(self.callerArgs.id)
cluster.remove()
def handleCustom(self, confInfo):
method = 'GET' if self.requestedAction == admin.ACTION_LIST else 'POST'
self.delegate(ENDPOINT, confInfo, method=method, customAction=self.customAction)
admin.init(ClustersHandler, admin.CONTEXT_APP_ONLY)
| 46.285714 | 164 | 0.61154 | import os
import splunk.admin as admin
from clusters import *
from delegating_handler import DelegatingRestHandler
import constants
from errors import *
import util
required_args = ['uri']
optional_args = ['namenode_http_port', 'hadoop_home', 'java_home', 'kerberos_principal', 'kerberos_service_principal', 'ha', 'hdfs_site', 'auth_to_local']
ENDPOINT = 'configs/conf-clusters'
class ClustersHandler(DelegatingRestHandler):
def setup(self):
self.appName = constants.APP_NAME
self.userName = 'nobody'
if self.requestedAction == admin.ACTION_LIST:
self.supportedArgs.addOptArg('add_versions')
elif self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:
req_args = required_args
opt_args = optional_args
for arg in opt_args:
self.supportedArgs.addOptArg(arg)
for arg in req_args:
if self.requestedAction == admin.ACTION_EDIT:
self.supportedArgs.addOptArg(arg)
else:
self.supportedArgs.addReqArg(arg)
def _isLocalURI(self, uri):
return uri.startswith('file://')
def handleList(self, confInfo):
self.delegate(ENDPOINT, confInfo, method='GET')
for name, obj in confInfo.items():
if self._isLocalURI(obj.get('uri', '')):
continue
cluster = Cluster(name, obj)
obj['hdfs_site'] = cluster.getHdfsSite()
obj['cluster_dir'] = cluster.getClusterDir()
obj['cluster_cli'] = cluster.hadoop_cli
obj['uri'] = cluster.getURI().rstrip('/')
obj['authentication_mode'] = 'simple' if obj.get('kerberos_principal', '') == '' else 'kerberos'
obj['authorization_mode'] = '0' if obj.get('kerberos_principal', '') == '' else '1'
if self.callerArgs.get('add_versions', [''])[0] == '1':
local = 'unknown'
remote = 'unknown'
try:
local = cluster.getLocalHadoopVersion()
except: pass
try:
remote = cluster.getRemoteHadoopVersion()
except: pass
obj['local_hadoop_version'] = local
obj['remote_hadoop_version'] = remote
def handleCreate(self, confInfo):
if self.getEntities('admin/conf-clusters', 'name='+self.callerArgs.id):
raise HcException(HCERR2007, {'stanza':str(self.callerArgs.id)})
conf = self.getEntity('admin/conf-clusters', '_new')
if self._isLocalURI(self.callerArgs.get('uri', [''])[0]):
return self.handleLocalCreateOrEdit(confInfo, conf)
cluster = self.validateHdfsClusterArgs(conf)
self.callerArgs.id = cluster.name
try:
self.handleHdfsCreateOrEdit(cluster, confInfo)
except Exception as e:
logger.exception("Failed to handleCreate:")
cluster.remove()
if self.getEntities('admin/conf-clusters', self.callerArgs.id):
self.deleteEntity('admin/conf-clusters', self.callerArgs.id)
raise e
def handleEdit(self, confInfo):
conf = self.getEntity('admin/conf-clusters', self.callerArgs.id)
uri = util.getProperty(self.callerArgs, 'uri', conf)
if self._isLocalURI(uri):
return self.handleLocalCreateOrEdit(confInfo, conf)
cluster = self.validateHdfsClusterArgs(conf)
hdfs_site_old = cluster.getHdfsSite()
try:
self.handleHdfsCreateOrEdit(cluster, confInfo)
except Exception as e:
logger.exception("Failed to handleEdit")
if not hdfs_site_old:
cluster.props['hdfs_site'] = hdfs_site_old
cluster.props['kerberos_service_principal'] = conf['kerberos_service_principal'] if 'kerberos_service_principal' in conf else None
cluster.props['kerberos_principal'] = conf['kerberos_principal'] if 'kerberos_principal' in conf else None
if cluster.props['kerberos_service_principal'] != None and cluster.props['kerberos_service_principal'].strip() != '':
cluster.props['authentication_mode'] = 'kerberos'
cluster.props['authorization_mode'] = '1'
else:
cluster.props['authentication_mode'] = 'simple'
cluster.props['authorization_mode'] = '0'
cluster.saveXml()
for k,v in self.callerArgs.items():
self.callerArgs[k] = conf[k]
self.delegate(ENDPOINT, confInfo)
raise e
def _ensureRequiredCreateArgs(self, args):
if self.requestedAction == admin.ACTION_CREATE:
for arg in args:
if arg not in self.callerArgs or self.callerArgs.get(arg) == None or len(self.callerArgs.get(arg)) == 0 or self.callerArgs.get(arg)[0].strip() == '':
raise HcException(HCERR0501, {'argument': arg})
def validateLocalClusterArgs(self, conf):
import util
uri = util.getProperty(self.callerArgs, 'uri', conf)
if not self._isLocalURI(uri):
raise HcException(HCERR0503, {'name':'uri', 'value':uri, 'accepted_values':'file://<path>'})
import os.path
if not os.path.isdir(uri[7:]):
raise HcException(HCERR0502, {'name':'uri', 'value':uri, 'error':'path does not exist'})
import splunk.entity as en
clusters = en.getEntities('admin/conf-clusters', search='uri=file://*', namespace=self.appName, owner=self.userName, sessionKey=self.getSessionKey());
for name, obj in clusters.items():
if name != self.callerArgs.id and obj['uri'].rstrip('/') == uri.rstrip('/'):
raise HcException(HCERR1515, {'path':uri, 'cluster':name})
def validateHdfsClusterArgs(self, conf):
ha_nameservice = None
namenode_http_port = None
active_namenode = None
hdfs_site = None
ha = util.getProperty(self.callerArgs, 'ha', conf, '')
import splunk.util
if len(ha) > 0 and splunk.util.normalizeBoolean(ha):
if self.requestedAction == admin.ACTION_EDIT and 'hdfs_site' not in self.callerArgs:
cluster = Cluster(self.callerArgs.id, None)
self.callerArgs['hdfs_site'] = cluster.getHdfsSite()
if 'hdfs_site' not in self.callerArgs or not self.callerArgs['hdfs_site'][0] or self.callerArgs['hdfs_site'][0].strip()=='':
raise HcException(HCERR0501, {'argument': 'hdfs_site'})
hdfs_site = util.getProperty(self.callerArgs, 'hdfs_site', conf)
if not hdfs_site.strip().startswith('<configuration>'):
hdfs_site = '<configuration>'+hdfs_site.strip()
if not hdfs_site.strip().endswith('</configuration>'):
hdfs_site = hdfs_site.strip()+'</configuration>'
try:
ha_nameservice, active_namenode, namenode_http_port = parseHdfsSiteXml(hdfs_site)
except HcException:
logger.exception('Failed to parse hdfs_site')
raise
except Exception:
logger.exception('Failed to parse hdfs_site')
raise HcException(HCERR2009, {'error':'please make sure xml is normalized'})
if ha_nameservice != self.callerArgs.id:
raise HcException(HCERR0502, {'name': 'id', 'value': self.callerArgs.id, 'error': 'clusters stanza name must be same as HA nameservice id'})
else:
self._ensureRequiredCreateArgs(['namenode_http_port', 'hadoop_home', 'java_home'])
namenode_http_port = int(util.getProperty(self.callerArgs, 'namenode_http_port', conf))
hadoop_home = util.getProperty(self.callerArgs, 'hadoop_home', conf)
java_home = util.getProperty(self.callerArgs, 'java_home', conf)
authentication_mode = 'simple'
authorization_mode = '0'
principal = util.getProperty(self.callerArgs, 'kerberos_principal', conf)
kerberos_service_principal = util.getProperty(self.callerArgs, 'kerberos_service_principal', conf)
if kerberos_service_principal != None and kerberos_service_principal.strip() != '':
authentication_mode = 'kerberos'
authorization_mode = '1'
auth_to_local = util.getProperty(self.callerArgs, 'auth_to_local', conf, '')
props = {'namenode_http_port':namenode_http_port,
'hadoop_home': hadoop_home,
'java_home': java_home,
'authentication_mode':authentication_mode,
'authorization_mode':authorization_mode,
'principal':principal,
'kerberos_service_principal':kerberos_service_principal}
if active_namenode != None:
props['active_namenode'] = active_namenode
if hdfs_site != None:
props['hdfs_site'] = hdfs_site
if auth_to_local != '':
props['auth_to_local'] = auth_to_local
cluster = Cluster(self.callerArgs.id, props)
return cluster
def handleLocalCreateOrEdit(self, confInfo, conf):
self.validateLocalClusterArgs(conf)
self.delegate(ENDPOINT, confInfo)
def handleHdfsCreateOrEdit(self, cluster, confInfo):
cluster.save()
fields = ['name', 'uri', 'namenode_http_port', 'kerberos_principal', 'kerberos_service_principal', 'hadoop_home', 'java_home', 'ha', 'auth_to_local']
for k in self.callerArgs.keys():
if not k in fields:
del self.callerArgs[k]
# create/edit conf stanza
self.delegate(ENDPOINT, confInfo)
principal = cluster.props['principal'] if cluster.props['authentication_mode'] == 'kerberos' else None
import hadooputils as hu
# verify kerberos_principal, keytab and kerberos_service_principal and ls works
hu.validateConnectionToHadoop(self.getSessionKey(), principal, 'hdfs://'+self.callerArgs.id+'/')
def handleRemove(self, confInfo):
# delegate remove to /servicesNS/<user>/<app>/admin/conf-clusters
conf = self.getEntity('admin/conf-clusters', self.callerArgs.id)
self.delegate(ENDPOINT, confInfo, method='DELETE')
# fix for SPL-61583
uri = None
if 'uri' in conf:
uri = conf.get('uri')
if(uri == None or uri.strip() == ''):
uri = 'hdfs://'+self.callerArgs.id;
if not self._isLocalURI(uri): #remote cluster
cluster = Cluster(self.callerArgs.id)
cluster.remove()
def handleCustom(self, confInfo):
method = 'GET' if self.requestedAction == admin.ACTION_LIST else 'POST'
self.delegate(ENDPOINT, confInfo, method=method, customAction=self.customAction)
admin.init(ClustersHandler, admin.CONTEXT_APP_ONLY)
| true | true |
1c32398e7095398e8602f7d3c4d6512bd57639ff | 6,977 | py | Python | 21_autoencoder.py | juniwang/tensorflow-hw | 212223b2ea4ffe4f91e7ce2b55ba4f9c6797f767 | [
"MIT"
] | null | null | null | 21_autoencoder.py | juniwang/tensorflow-hw | 212223b2ea4ffe4f91e7ce2b55ba4f9c6797f767 | [
"MIT"
] | null | null | null | 21_autoencoder.py | juniwang/tensorflow-hw | 212223b2ea4ffe4f91e7ce2b55ba4f9c6797f767 | [
"MIT"
] | null | null | null | from __future__ import division, print_function, absolute_import
# disable warnings which are just informing you if you build TensorFlow from source it can be faster on your machine.
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Visualize decoder setting
# Parameters
learning_rate = 0.01
training_epochs = 5
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
"""
# Visualize encoder setting
# Parameters
learning_rate = 0.01 # 0.01 this learning rate will be better! Tested
training_epochs = 10
batch_size = 256
display_step = 1
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 128
n_hidden_2 = 64
n_hidden_3 = 10
n_hidden_4 = 2
weights = {
'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1],)),
'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],)),
'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3],)),
'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4],)),
'decoder_h1': tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3],)),
'decoder_h2': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2],)),
'decoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1],)),
'decoder_h4': tf.Variable(tf.truncated_normal([n_hidden_1, n_input],)),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),
'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_3])),
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b3': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b4': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']),
biases['encoder_b3']))
layer_4 = tf.add(tf.matmul(layer_3, weights['encoder_h4']),
biases['encoder_b4'])
return layer_4
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']),
biases['decoder_b3']))
layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['decoder_h4']),
biases['decoder_b4']))
return layer_4
"""
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size) # max(x) = 1, min(x) = 0
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# # Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
plt.show()
# encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images})
# plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels)
# plt.colorbar()
# plt.show()
| 36.721053 | 117 | 0.656443 | from __future__ import division, print_function, absolute_import
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
learning_rate = 0.01
training_epochs = 5
batch_size = 256
display_step = 1
examples_to_show = 10
n_input = 784
X = tf.placeholder("float", [None, n_input])
n_hidden_1 = 256
n_hidden_2 = 128
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
y_pred = decoder_op
y_true = X
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
with tf.Session() as sess:
if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:
init = tf.initialize_all_variables()
else:
init = tf.global_variables_initializer()
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
for epoch in range(training_epochs):
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
red, feed_dict={X: mnist.test.images[:examples_to_show]})
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
plt.show()
| true | true |
1c3239fb6450e7070cf99ff189993812fcfcd3d4 | 7,664 | py | Python | sqlcouch/models.py | dimagi/sqlcouch | ff264740d223185d80e0bf8ea11f6f3c8f3d7e7b | [
"BSD-3-Clause"
] | 1 | 2016-03-01T10:20:20.000Z | 2016-03-01T10:20:20.000Z | sqlcouch/models.py | dimagi/sqlcouch | ff264740d223185d80e0bf8ea11f6f3c8f3d7e7b | [
"BSD-3-Clause"
] | null | null | null | sqlcouch/models.py | dimagi/sqlcouch | ff264740d223185d80e0bf8ea11f6f3c8f3d7e7b | [
"BSD-3-Clause"
] | null | null | null | import base64
from couchdbkit import Database, ResourceNotFound, resource
from couchdbkit.ext.django.schema import Document
from django.db import models, transaction
import json
import uuid
from couchdbkit.exceptions import ResourceConflict
class SQLDocDB(Database):
def delete_doc(self, doc, **params):
found = False
try:
sqldoc = SQLDocModel.objects.get(doc_id=doc)
except SQLDocModel.DoesNotExist:
pass
else:
found = True
sqldoc.delete()
try:
return super(SQLDocDB, self).delete_doc(doc)
except ResourceNotFound:
if found:
return {'id': doc, 'ok': True, 'rev': None}
else:
raise
def copy_doc(self, doc, dest=None, headers=None):
from . import sync
sync.sync_all()
return super(SQLDocDB, self).copy_doc(doc, dest, headers)
def save_doc(self, doc, encode_attachments=True, force_update=False,
**params):
raise NotImplementedError()
def save_docs(self, docs, use_uuids=True, all_or_nothing=False,
**params):
raise NotImplementedError()
def delete_docs(self, docs, all_or_nothing=False, empty_on_delete=False,
**params):
raise NotImplementedError()
def _get(self, docid, **params):
try:
doc_model = SQLDocModel.objects.get(doc_id=docid)
except SQLDocModel.DoesNotExist:
docid = resource.escape_docid(docid)
return self.res.get(docid, **params).json_body
else:
doc = doc_model.doc
assert doc['doc_type'] == doc_model.doc_type
assert doc['_id'] == doc_model.doc_id
doc['_rev'] = doc_model.sql_rev
doc['_attachments'] = dict(
att.format_stub() for att in
doc_model.sqldocattachment_set.defer('payload')
)
return doc
def open_doc(self, docid, **params):
# This whole function is copied from Database.open_doc...
wrapper = None
if "wrapper" in params:
wrapper = params.pop("wrapper")
elif "schema" in params:
schema = params.pop("schema")
if not hasattr(schema, "wrap"):
raise TypeError("invalid schema")
wrapper = schema.wrap
# ...except for this line, which is changed
doc = self._get(docid)
if wrapper is not None:
if not callable(wrapper):
raise TypeError("wrapper isn't a callable")
return wrapper(doc)
return doc
def view(self, *args, **kwargs):
from . import sync
sync.sync_all()
return super(SQLDocDB, self).view(*args, **kwargs)
bulk_save = save_docs
bulk_delete = delete_docs
get = open_doc
class SQLDoc(Document):
@classmethod
def get_db(cls):
db = super(SQLDoc, cls).get_db()
db.__class__ = SQLDocDB
return db
def save(self):
with transaction.commit_on_success():
doc_model = self._get_and_lock_sqldoc()
doc_model.doc_type = self.doc_type
doc_model.sql_rev = self._new_sql_rev(doc_model.rev)
doc_model.doc = self.to_json()
doc_model.save()
self._rev = doc_model.sql_rev
def fetch_attachment(self, name):
try:
attachment = SQLDocAttachment.objects.get(doc=self._id, name=name)
except SQLDocAttachment.DoesNotExist:
return super(SQLDoc, self).fetch_attachment(name)
else:
content = attachment.content
try:
return content.decode('utf-8')
except UnicodeDecodeError:
return content
def put_attachment(self, content, name=None, content_type=None,
content_length=None):
with transaction.commit_on_success():
doc_model = self._get_and_lock_sqldoc()
try:
attachment = (SQLDocAttachment.objects.select_for_update()
.only('doc', 'name')
.get(doc=self._id, name=name))
except SQLDocAttachment.DoesNotExist:
attachment = SQLDocAttachment(
doc=doc_model,
name=name
)
if hasattr(content, 'read'):
content = content.read()
if isinstance(content, unicode):
content = content.encode('utf-8')
attachment.content = content
attachment.content_type = content_type
attachment.length = content_length or len(content)
doc_model.sql_rev = self._new_sql_rev(doc_model.rev)
attachment.save()
doc_model.save()
self._rev = doc_model.sql_rev
if self._attachments is None:
self._attachments = {}
self._attachments.__setitem__(*attachment.format_stub())
def delete_attachment(self, name):
raise NotImplementedError()
def _get_and_lock_sqldoc(self):
"""This should be done inside a transaction"""
if not self._id:
self._id = self.get_db().server.next_uuid()
try:
doc_model = (SQLDocModel.objects.select_for_update()
.only('rev', 'sql_rev', 'doc_id').get(pk=self._id))
except SQLDocModel.DoesNotExist:
doc_model = SQLDocModel(doc_id=self._id, rev=self._rev)
else:
if doc_model.sql_rev != self._rev:
raise ResourceConflict(
'[sqlcouch] (sql)_rev {0} of doc {1} '
'does not match the one stored in sql: {2}'
.format(self._rev, self._id, doc_model.sql_rev)
)
return doc_model
def _new_sql_rev(self, rev):
return (rev or '') + '-' + uuid.uuid4().hex
class SQLDocModel(models.Model):
doc_id = models.CharField(max_length=256, primary_key=True)
rev = models.CharField(max_length=256, null=True)
# docs stored in postgres will need their own rev scheme
# that mimics couchdb's, because docs may be saved a number of times
# in postgres before it's synced to couchdb
# if couchdb is up to date, sql_rev and rev will be equal
sql_rev = models.CharField(max_length=256)
doc_type = models.CharField(max_length=20)
doc_json = models.TextField()
synced = models.BooleanField(default=False, db_index=True)
def get_doc(self):
return json.loads(self.doc_json)
def set_doc(self, doc):
self.doc_json = json.dumps(doc)
doc = property(get_doc, set_doc)
def __unicode__(self):
return ('doc_id={0} rev={1} sql_rev={2} synced={3}'
.format(self.doc_id, self.rev, self.sql_rev, self.synced))
class SQLDocAttachment(models.Model):
doc = models.ForeignKey(SQLDocModel)
name = models.CharField(max_length=256, db_index=True)
content_type = models.CharField(max_length=256)
length = models.IntegerField()
payload = models.TextField()
synced = models.BooleanField(default=False, db_index=True)
class Meta:
unique_together = ('doc', 'name')
def get_content(self):
return base64.b64decode(self.payload)
def set_content(self, content):
self.payload = base64.b64encode(content)
content = property(get_content, set_content)
def format_stub(self):
return (self.name, {
'content_type': self.content_type,
'length': self.length,
'stub': True,
})
| 34.214286 | 78 | 0.594207 | import base64
from couchdbkit import Database, ResourceNotFound, resource
from couchdbkit.ext.django.schema import Document
from django.db import models, transaction
import json
import uuid
from couchdbkit.exceptions import ResourceConflict
class SQLDocDB(Database):
def delete_doc(self, doc, **params):
found = False
try:
sqldoc = SQLDocModel.objects.get(doc_id=doc)
except SQLDocModel.DoesNotExist:
pass
else:
found = True
sqldoc.delete()
try:
return super(SQLDocDB, self).delete_doc(doc)
except ResourceNotFound:
if found:
return {'id': doc, 'ok': True, 'rev': None}
else:
raise
def copy_doc(self, doc, dest=None, headers=None):
from . import sync
sync.sync_all()
return super(SQLDocDB, self).copy_doc(doc, dest, headers)
def save_doc(self, doc, encode_attachments=True, force_update=False,
**params):
raise NotImplementedError()
def save_docs(self, docs, use_uuids=True, all_or_nothing=False,
**params):
raise NotImplementedError()
def delete_docs(self, docs, all_or_nothing=False, empty_on_delete=False,
**params):
raise NotImplementedError()
def _get(self, docid, **params):
try:
doc_model = SQLDocModel.objects.get(doc_id=docid)
except SQLDocModel.DoesNotExist:
docid = resource.escape_docid(docid)
return self.res.get(docid, **params).json_body
else:
doc = doc_model.doc
assert doc['doc_type'] == doc_model.doc_type
assert doc['_id'] == doc_model.doc_id
doc['_rev'] = doc_model.sql_rev
doc['_attachments'] = dict(
att.format_stub() for att in
doc_model.sqldocattachment_set.defer('payload')
)
return doc
def open_doc(self, docid, **params):
wrapper = None
if "wrapper" in params:
wrapper = params.pop("wrapper")
elif "schema" in params:
schema = params.pop("schema")
if not hasattr(schema, "wrap"):
raise TypeError("invalid schema")
wrapper = schema.wrap
doc = self._get(docid)
if wrapper is not None:
if not callable(wrapper):
raise TypeError("wrapper isn't a callable")
return wrapper(doc)
return doc
def view(self, *args, **kwargs):
from . import sync
sync.sync_all()
return super(SQLDocDB, self).view(*args, **kwargs)
bulk_save = save_docs
bulk_delete = delete_docs
get = open_doc
class SQLDoc(Document):
@classmethod
def get_db(cls):
db = super(SQLDoc, cls).get_db()
db.__class__ = SQLDocDB
return db
def save(self):
with transaction.commit_on_success():
doc_model = self._get_and_lock_sqldoc()
doc_model.doc_type = self.doc_type
doc_model.sql_rev = self._new_sql_rev(doc_model.rev)
doc_model.doc = self.to_json()
doc_model.save()
self._rev = doc_model.sql_rev
def fetch_attachment(self, name):
try:
attachment = SQLDocAttachment.objects.get(doc=self._id, name=name)
except SQLDocAttachment.DoesNotExist:
return super(SQLDoc, self).fetch_attachment(name)
else:
content = attachment.content
try:
return content.decode('utf-8')
except UnicodeDecodeError:
return content
def put_attachment(self, content, name=None, content_type=None,
content_length=None):
with transaction.commit_on_success():
doc_model = self._get_and_lock_sqldoc()
try:
attachment = (SQLDocAttachment.objects.select_for_update()
.only('doc', 'name')
.get(doc=self._id, name=name))
except SQLDocAttachment.DoesNotExist:
attachment = SQLDocAttachment(
doc=doc_model,
name=name
)
if hasattr(content, 'read'):
content = content.read()
if isinstance(content, unicode):
content = content.encode('utf-8')
attachment.content = content
attachment.content_type = content_type
attachment.length = content_length or len(content)
doc_model.sql_rev = self._new_sql_rev(doc_model.rev)
attachment.save()
doc_model.save()
self._rev = doc_model.sql_rev
if self._attachments is None:
self._attachments = {}
self._attachments.__setitem__(*attachment.format_stub())
def delete_attachment(self, name):
raise NotImplementedError()
def _get_and_lock_sqldoc(self):
if not self._id:
self._id = self.get_db().server.next_uuid()
try:
doc_model = (SQLDocModel.objects.select_for_update()
.only('rev', 'sql_rev', 'doc_id').get(pk=self._id))
except SQLDocModel.DoesNotExist:
doc_model = SQLDocModel(doc_id=self._id, rev=self._rev)
else:
if doc_model.sql_rev != self._rev:
raise ResourceConflict(
'[sqlcouch] (sql)_rev {0} of doc {1} '
'does not match the one stored in sql: {2}'
.format(self._rev, self._id, doc_model.sql_rev)
)
return doc_model
def _new_sql_rev(self, rev):
return (rev or '') + '-' + uuid.uuid4().hex
class SQLDocModel(models.Model):
doc_id = models.CharField(max_length=256, primary_key=True)
rev = models.CharField(max_length=256, null=True)
# docs stored in postgres will need their own rev scheme
# that mimics couchdb's, because docs may be saved a number of times
# if couchdb is up to date, sql_rev and rev will be equal
sql_rev = models.CharField(max_length=256)
doc_type = models.CharField(max_length=20)
doc_json = models.TextField()
synced = models.BooleanField(default=False, db_index=True)
def get_doc(self):
return json.loads(self.doc_json)
def set_doc(self, doc):
self.doc_json = json.dumps(doc)
doc = property(get_doc, set_doc)
def __unicode__(self):
return ('doc_id={0} rev={1} sql_rev={2} synced={3}'
.format(self.doc_id, self.rev, self.sql_rev, self.synced))
class SQLDocAttachment(models.Model):
doc = models.ForeignKey(SQLDocModel)
name = models.CharField(max_length=256, db_index=True)
content_type = models.CharField(max_length=256)
length = models.IntegerField()
payload = models.TextField()
synced = models.BooleanField(default=False, db_index=True)
class Meta:
unique_together = ('doc', 'name')
def get_content(self):
return base64.b64decode(self.payload)
def set_content(self, content):
self.payload = base64.b64encode(content)
content = property(get_content, set_content)
def format_stub(self):
return (self.name, {
'content_type': self.content_type,
'length': self.length,
'stub': True,
})
| true | true |
1c323ad2feb1f7eec6edfed83be15809e4df3fed | 970 | py | Python | Data Manipulation with Pandas/reindexing_methods.py | zack28/TakenMind-Internship | 7fb7c1c0b255ee233f18fd9ab4fa76a9b2c992d7 | [
"MIT"
] | 4 | 2019-07-05T22:28:21.000Z | 2021-11-08T12:45:15.000Z | Data Manipulation with Pandas/reindexing_methods.py | zack28/TakenMind-Internship | 7fb7c1c0b255ee233f18fd9ab4fa76a9b2c992d7 | [
"MIT"
] | null | null | null | Data Manipulation with Pandas/reindexing_methods.py | zack28/TakenMind-Internship | 7fb7c1c0b255ee233f18fd9ab4fa76a9b2c992d7 | [
"MIT"
] | 5 | 2020-07-23T18:15:33.000Z | 2021-09-14T14:34:40.000Z | import numpy as np
import pandas as pd
from pandas import Series,DataFrame
from numpy.random import randn
#create a new series
s1=Series([1,2,3,4],index=['e','f','g','h'])
print(s1)
#creating new indexes using reindex
s2=s1.reindex(['e','f','g','h','i','j'])
print(s2)
#using fill value
s2=s2.reindex(['e','f','g','h','i','j','k'],fill_value=10)
print(s2)
#using ffill
cars=Series(['Audi','BMW','Honda'],index=[0,4,8])
print(cars)
ranger=range(13)
print(ranger)
cars=cars.reindex(ranger,method="ffill")
print(cars)
#create new df suing random
#reindex rows of data frame
df1=DataFrame(randn(25).reshape(5,5),index=['a','b','c','d','e'],columns=['c1','c2','c3','c4','c5'])
print(df1)
df2=df1.reindex(['a','b','c','d','e','f'])
print(df2)
#reindex cols
df3=df2.reindex(columns=['c1','c2','c3','c4','c5','c6'])
print(df3)
#using ix[] to reindex
df4=df1.loc[['a','b','c','d','e','f'],['c1','c2','c3','c4','c5','c6']]
print(df4) | 24.25 | 101 | 0.605155 | import numpy as np
import pandas as pd
from pandas import Series,DataFrame
from numpy.random import randn
s1=Series([1,2,3,4],index=['e','f','g','h'])
print(s1)
s2=s1.reindex(['e','f','g','h','i','j'])
print(s2)
s2=s2.reindex(['e','f','g','h','i','j','k'],fill_value=10)
print(s2)
cars=Series(['Audi','BMW','Honda'],index=[0,4,8])
print(cars)
ranger=range(13)
print(ranger)
cars=cars.reindex(ranger,method="ffill")
print(cars)
df1=DataFrame(randn(25).reshape(5,5),index=['a','b','c','d','e'],columns=['c1','c2','c3','c4','c5'])
print(df1)
df2=df1.reindex(['a','b','c','d','e','f'])
print(df2)
df3=df2.reindex(columns=['c1','c2','c3','c4','c5','c6'])
print(df3)
df4=df1.loc[['a','b','c','d','e','f'],['c1','c2','c3','c4','c5','c6']]
print(df4) | true | true |
1c323b3ab9ea1bba0d26414302fd4c5c78be2980 | 1,454 | py | Python | src/streamlink/plugins/mrtmk.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 5 | 2019-07-26T17:03:26.000Z | 2020-10-17T23:23:43.000Z | src/streamlink/plugins/mrtmk.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 9 | 2018-01-14T15:20:23.000Z | 2021-03-08T20:29:51.000Z | src/streamlink/plugins/mrtmk.py | hymer-up/streamlink | f09bf6e04cddc78eceb9ded655f716ef3ee4b84f | [
"BSD-2-Clause"
] | 4 | 2018-01-14T13:27:25.000Z | 2021-11-15T22:28:30.000Z | import logging
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class MRTmk(Plugin):
url_re = re.compile(r"""https?://play.mrt.com.mk/(live|play)/""")
file_re = re.compile(r"""(?P<url>https?://vod-[\d\w]+\.interspace\.com[^"',]+\.m3u8[^"',]*)""")
stream_schema = validate.Schema(
validate.all(
validate.transform(file_re.finditer),
validate.transform(list),
[validate.get("url")],
# remove duplicates
validate.transform(set),
validate.transform(list),
),
)
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
stream_urls = self.stream_schema.validate(res.text)
log.debug("Found streams: {0}".format(len(stream_urls)))
if not stream_urls:
return
for stream_url in stream_urls:
try:
for s in HLSStream.parse_variant_playlist(self.session, stream_url).items():
yield s
except IOError as err:
if "403 Client Error" in str(err):
log.error("Failed to access stream, may be due to geo-restriction")
else:
raise err
__plugin__ = MRTmk
| 29.673469 | 99 | 0.589409 | import logging
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class MRTmk(Plugin):
url_re = re.compile(r"""https?://play.mrt.com.mk/(live|play)/""")
file_re = re.compile(r"""(?P<url>https?://vod-[\d\w]+\.interspace\.com[^"',]+\.m3u8[^"',]*)""")
stream_schema = validate.Schema(
validate.all(
validate.transform(file_re.finditer),
validate.transform(list),
[validate.get("url")],
validate.transform(set),
validate.transform(list),
),
)
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
stream_urls = self.stream_schema.validate(res.text)
log.debug("Found streams: {0}".format(len(stream_urls)))
if not stream_urls:
return
for stream_url in stream_urls:
try:
for s in HLSStream.parse_variant_playlist(self.session, stream_url).items():
yield s
except IOError as err:
if "403 Client Error" in str(err):
log.error("Failed to access stream, may be due to geo-restriction")
else:
raise err
__plugin__ = MRTmk
| true | true |
1c323be4d60185e31f821e471d54bc21e0f55f33 | 3,604 | py | Python | python/pb/envoy/type/matcher/path_pb2.py | adriangb/enterprise-client | 5d50b457425b0c6d08415b0d986fa9151b792151 | [
"Apache-2.0"
] | null | null | null | python/pb/envoy/type/matcher/path_pb2.py | adriangb/enterprise-client | 5d50b457425b0c6d08415b0d986fa9151b792151 | [
"Apache-2.0"
] | null | null | null | python/pb/envoy/type/matcher/path_pb2.py | adriangb/enterprise-client | 5d50b457425b0c6d08415b0d986fa9151b792151 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: envoy/type/matcher/path.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from envoy.type.matcher import string_pb2 as envoy_dot_type_dot_matcher_dot_string__pb2
from udpa.annotations import status_pb2 as udpa_dot_annotations_dot_status__pb2
from validate import validate_pb2 as validate_dot_validate__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='envoy/type/matcher/path.proto',
package='envoy.type.matcher',
syntax='proto3',
serialized_options=b'\n io.envoyproxy.envoy.type.matcherB\tPathProtoP\001\272\200\310\321\006\002\020\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1d\x65nvoy/type/matcher/path.proto\x12\x12\x65nvoy.type.matcher\x1a\x1f\x65nvoy/type/matcher/string.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"W\n\x0bPathMatcher\x12;\n\x04path\x18\x01 \x01(\x0b\x32!.envoy.type.matcher.StringMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01H\x00\x42\x0b\n\x04rule\x12\x03\xf8\x42\x01\x42\x37\n io.envoyproxy.envoy.type.matcherB\tPathProtoP\x01\xba\x80\xc8\xd1\x06\x02\x10\x01\x62\x06proto3'
,
dependencies=[envoy_dot_type_dot_matcher_dot_string__pb2.DESCRIPTOR,udpa_dot_annotations_dot_status__pb2.DESCRIPTOR,validate_dot_validate__pb2.DESCRIPTOR,])
_PATHMATCHER = _descriptor.Descriptor(
name='PathMatcher',
full_name='envoy.type.matcher.PathMatcher',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='envoy.type.matcher.PathMatcher.path', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='rule', full_name='envoy.type.matcher.PathMatcher.rule',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=142,
serialized_end=229,
)
_PATHMATCHER.fields_by_name['path'].message_type = envoy_dot_type_dot_matcher_dot_string__pb2._STRINGMATCHER
_PATHMATCHER.oneofs_by_name['rule'].fields.append(
_PATHMATCHER.fields_by_name['path'])
_PATHMATCHER.fields_by_name['path'].containing_oneof = _PATHMATCHER.oneofs_by_name['rule']
DESCRIPTOR.message_types_by_name['PathMatcher'] = _PATHMATCHER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PathMatcher = _reflection.GeneratedProtocolMessageType('PathMatcher', (_message.Message,), {
'DESCRIPTOR' : _PATHMATCHER,
'__module__' : 'envoy.type.matcher.path_pb2'
# @@protoc_insertion_point(class_scope:envoy.type.matcher.PathMatcher)
})
_sym_db.RegisterMessage(PathMatcher)
DESCRIPTOR._options = None
_PATHMATCHER.oneofs_by_name['rule']._options = None
_PATHMATCHER.fields_by_name['path']._options = None
# @@protoc_insertion_point(module_scope)
| 41.425287 | 473 | 0.791065 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from envoy.type.matcher import string_pb2 as envoy_dot_type_dot_matcher_dot_string__pb2
from udpa.annotations import status_pb2 as udpa_dot_annotations_dot_status__pb2
from validate import validate_pb2 as validate_dot_validate__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='envoy/type/matcher/path.proto',
package='envoy.type.matcher',
syntax='proto3',
serialized_options=b'\n io.envoyproxy.envoy.type.matcherB\tPathProtoP\001\272\200\310\321\006\002\020\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1d\x65nvoy/type/matcher/path.proto\x12\x12\x65nvoy.type.matcher\x1a\x1f\x65nvoy/type/matcher/string.proto\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"W\n\x0bPathMatcher\x12;\n\x04path\x18\x01 \x01(\x0b\x32!.envoy.type.matcher.StringMatcherB\x08\xfa\x42\x05\x8a\x01\x02\x10\x01H\x00\x42\x0b\n\x04rule\x12\x03\xf8\x42\x01\x42\x37\n io.envoyproxy.envoy.type.matcherB\tPathProtoP\x01\xba\x80\xc8\xd1\x06\x02\x10\x01\x62\x06proto3'
,
dependencies=[envoy_dot_type_dot_matcher_dot_string__pb2.DESCRIPTOR,udpa_dot_annotations_dot_status__pb2.DESCRIPTOR,validate_dot_validate__pb2.DESCRIPTOR,])
_PATHMATCHER = _descriptor.Descriptor(
name='PathMatcher',
full_name='envoy.type.matcher.PathMatcher',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='path', full_name='envoy.type.matcher.PathMatcher.path', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\005\212\001\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='rule', full_name='envoy.type.matcher.PathMatcher.rule',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[], serialized_options=b'\370B\001'),
],
serialized_start=142,
serialized_end=229,
)
_PATHMATCHER.fields_by_name['path'].message_type = envoy_dot_type_dot_matcher_dot_string__pb2._STRINGMATCHER
_PATHMATCHER.oneofs_by_name['rule'].fields.append(
_PATHMATCHER.fields_by_name['path'])
_PATHMATCHER.fields_by_name['path'].containing_oneof = _PATHMATCHER.oneofs_by_name['rule']
DESCRIPTOR.message_types_by_name['PathMatcher'] = _PATHMATCHER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PathMatcher = _reflection.GeneratedProtocolMessageType('PathMatcher', (_message.Message,), {
'DESCRIPTOR' : _PATHMATCHER,
'__module__' : 'envoy.type.matcher.path_pb2'
# @@protoc_insertion_point(class_scope:envoy.type.matcher.PathMatcher)
})
_sym_db.RegisterMessage(PathMatcher)
DESCRIPTOR._options = None
_PATHMATCHER.oneofs_by_name['rule']._options = None
_PATHMATCHER.fields_by_name['path']._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
1c323c0e31e11baddab9c8e810045324e97e9a2d | 25,490 | py | Python | neurst/tasks/cross_modal_translation.py | ReneeYe/XSTNet | c5e508aed878d13fea790caee71db1ce77619465 | [
"Apache-2.0"
] | 16 | 2021-06-22T02:36:32.000Z | 2022-03-27T23:07:55.000Z | neurst/tasks/cross_modal_translation.py | ReneeYe/XSTNet | c5e508aed878d13fea790caee71db1ce77619465 | [
"Apache-2.0"
] | 1 | 2022-03-12T13:28:23.000Z | 2022-03-12T13:28:23.000Z | neurst/tasks/cross_modal_translation.py | ReneeYe/XSTNet | c5e508aed878d13fea790caee71db1ce77619465 | [
"Apache-2.0"
] | 3 | 2021-08-03T12:49:35.000Z | 2021-09-02T03:58:18.000Z | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import numpy as np
import tensorflow as tf
from absl import logging
import neurst.data.dataset_utils as dataset_utils
from neurst.data.data_pipelines import DataPipeline, build_data_pipeline
from neurst.data.data_pipelines.tagged_text_data_pipeline import TaggedTextDataPipeline
from neurst.data.datasets import Dataset
from neurst.layers.metric_layers.token_metric_layers import AudioFramesMetricLayer, SequenceTokenMetricLayer, BatchCountMetricLayer
from neurst.models import build_model
from neurst.metrics import build_metric
from neurst.models.model_utils import deduce_text_length
from neurst.tasks import register_task
from neurst.tasks.task import Task
from neurst.training.training_utils import minimal_multiple
from neurst.utils import compat
from neurst.utils.configurable import deep_merge_dict
from neurst.utils.flags_core import Flag, ModuleFlag
from neurst.tasks.speech2text import create_audio_bucket_boundaries
def get_speech2text_bucket_sizes(args, num_replicas_in_sync):
audio_bucket_boundaries = create_audio_bucket_boundaries(args["max_audio_src_len"],
args["batch_bucket_min_audio_src_len"])
audio_bucket_boundaries[-1] = minimal_multiple(audio_bucket_boundaries[-1], 8)
batch_size = dataset_utils.adjust_batch_size(
args["audio_batch_size"],
args["batch_size_per_gpu"],
num_replicas_in_sync=num_replicas_in_sync,
verbose=False)
batch_size_per_gpu = batch_size // num_replicas_in_sync
bucket_batch_sizes = [int(batch_size_per_gpu // bound
* num_replicas_in_sync) for bound in audio_bucket_boundaries]
return audio_bucket_boundaries, bucket_batch_sizes
def get_text2text_bucket_sizes(args, num_replicas_in_sync):
src_text_bucket_boundaries = dataset_utils.create_batch_bucket_boundaries(args["max_text_src_len"])
bucket_batch_sizes = dataset_utils.adjust_batch_size(
args["text_batch_size"],
args["batch_size_per_gpu"],
bucket_boundaries={"src_text": src_text_bucket_boundaries}
if args["batch_by_tokens"] else None,
boundaries_reduce_to_length_fn=lambda x: max(tf.nest.flatten(x)),
num_replicas_in_sync=num_replicas_in_sync)
return src_text_bucket_boundaries, bucket_batch_sizes
def get_speech2text_bucket_size_with_ratio(args,
audio_bucket_boundaries,
bucket_batch_sizes):
frame_transcript_ratio = args.get("experimental_frame_transcript_ratio", None)
assert frame_transcript_ratio is not None, "define experimental_frame_transcript_ratio, or it will OOM!"
trans_bucket_boundaries = [
int(bound / (frame_transcript_ratio + i * (
args["max_audio_src_len"] / args["max_audio_trg_len"] - frame_transcript_ratio) /
len(audio_bucket_boundaries)))
for i, bound in enumerate(audio_bucket_boundaries)]
trans_bucket_boundaries = [minimal_multiple(min(i, args["max_audio_trg_len"]), 8) for i in
trans_bucket_boundaries]
num_buckets = len(trans_bucket_boundaries)
true_trans_bucket_boundaries = []
num_input_shapes = 0
for idx, (batc, bound, tbound) in enumerate(zip(bucket_batch_sizes, audio_bucket_boundaries,
trans_bucket_boundaries)):
max_trans_len = [tbound,
trans_bucket_boundaries[min(idx + 1, len(bucket_batch_sizes) - 1)]]
num_input_shapes += len(set(max_trans_len))
true_trans_bucket_boundaries.append(max_trans_len)
logging.info(f"There are {num_input_shapes} input shapes to be compiled:")
for idx, (batc, bound, tbound) in enumerate(zip(bucket_batch_sizes, audio_bucket_boundaries,
true_trans_bucket_boundaries)):
logging.info(f" - batch={batc}, maximum-frames={bound}, "
f"maximum-transcript-length={set(tbound)}")
true_trans_bucket_boundaries = tf.constant(true_trans_bucket_boundaries, dtype=tf.int32)
true_audio_bucket_boundaries = tf.transpose(tf.constant([audio_bucket_boundaries] * 2, dtype=tf.int32))
return true_audio_bucket_boundaries, true_trans_bucket_boundaries, num_buckets
@register_task(["xm_translation", "xst_translation", "cross_modal_translation", "XModalPretrain"])
class CrossModalTranslation(Task):
""" Defines the cross-modal(audio & text) pre-train task. """
def __init__(self, args):
""" Initializes the task.
Args:
args: A dict of model configurations.
"""
super(CrossModalTranslation, self).__init__(args)
text_data_pipeline_cls = args.get("text_data_pipeline.class", TaggedTextDataPipeline)
text_data_pipeline_params = args.get("text_data_pipeline.params", None) or {}
self._text_data_pipeline = build_data_pipeline(
text_data_pipeline_cls, **text_data_pipeline_params)
self._audio_feature_dim = args["audio_feature_dim"]
self._audio_feature_channels = args["audio_feature_channels"]
def get_config(self):
return {
"text_data_pipeline.class": self._text_data_pipeline.__class__.__name__,
"text_data_pipeline.params": self._text_data_pipeline.get_config(),
"audio_feature_dim": self._audio_feature_dim,
"audio_feature_channels": self._audio_feature_channels
}
@staticmethod
def class_or_method_args():
this_args = super(CrossModalTranslation, CrossModalTranslation).class_or_method_args()
this_args.extend([
ModuleFlag("text_data_pipeline", DataPipeline.REGISTRY_NAME,
default=TaggedTextDataPipeline.__name__,
help="The text data pipeline."),
Flag("audio_feature_dim", dtype=Flag.TYPE.INTEGER, default=1,
help="The dimension of audio features."),
Flag("audio_feature_channels", dtype=Flag.TYPE.INTEGER, default=1,
help="The number of channels of audio features."),
Flag("max_audio_src_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum source length of training audio frames."),
Flag("max_text_src_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum source length of training text data."),
Flag("batch_bucket_min_audio_src_len", dtype=Flag.TYPE.INTEGER, default=1000,
help="The minimum source length of the training bucket of audio frames."),
Flag("batch_bucket_min_text_src_len", dtype=Flag.TYPE.INTEGER, default=120,
help="The minimum source length of the training bucket of text data."),
Flag("max_audio_trg_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum target length of training audio data."),
Flag("max_text_trg_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum target length of training text data."),
Flag("truncate_src", dtype=Flag.TYPE.BOOLEAN, default=None,
help="Whether to truncate source to max_audio_src_len or max_text_src_len."),
Flag("truncate_trg", dtype=Flag.TYPE.BOOLEAN, default=None,
help="Whether to truncate target to max_audio_trg_len or max_text_trg_len."),
Flag("experimental_frame_transcript_ratio", dtype=Flag.TYPE.INTEGER, default=None,
help="The ratio of the number of frames and its transcript for training batch bucket."),
Flag("batch_by_frames", dtype=Flag.TYPE.BOOLEAN, default=True,
help="Whether to batch the data by audio frames."),
Flag("audio_batch_size", dtype=Flag.TYPE.INTEGER, default=None,
help="The batch size of audio (frames)."),
Flag("batch_by_tokens", dtype=Flag.TYPE.BOOLEAN, default=True,
help="Whether to batch the data by text tokens."),
Flag("text_batch_size", dtype=Flag.TYPE.INTEGER, default=None,
help="The batch size of text (tokens)."),
])
return this_args
def inputs_signature(self, mode) -> Tuple[dict, dict]:
"""Returns the input dtypes and signatures (from dataset)."""
dtypes = {"audio": tf.float32, "audio_length": tf.int64,
"src_text": tf.int64,
"tgt_text": tf.int64, "tgt_lang": tf.int64}
signatures = {
"audio": tf.TensorShape([None, None]),
"audio_length": tf.TensorShape([None, ]),
"src_text": tf.TensorShape([None, None]),
"tgt_text": tf.TensorShape([None, None]),
"tgt_lang": tf.TensorShape([None, None]),
}
return dtypes, signatures
def build_model(self, args, name=None):
""" Creates the model. """
model = build_model(args,
{"audio_feature_dim": self._audio_feature_dim,
"audio_feature_channels": self._audio_feature_channels},
self._text_data_pipeline.meta,
name=name)
return model
def example_to_input(self, batch_of_data: dict, mode) -> dict:
""" Transform the data examples to model acceptable inputs.
Args:
batch_of_data: A dict: name -> tf.keras.layers.Input
mode: The running mode.
Returns: The input data for model.
"""
batch = tf.shape(batch_of_data["audio"])[0]
input_dict = {
"audio": tf.reshape(batch_of_data["audio"],
[batch, -1, self._audio_feature_dim, self._audio_feature_channels]),
"audio_length": batch_of_data["audio_length"],
"src_text": batch_of_data["src_text"],
"src_length": deduce_text_length(batch_of_data["src_text"],
self._text_data_pipeline.meta["pad_id"],
self._text_data_pipeline.meta["padding_mode"]),
"trg_lang": batch_of_data["tgt_lang"],
}
target_bos = batch_of_data["tgt_text"][:, 0] # dim=1,
if mode == compat.ModeKeys.INFER:
input_dict["trg_input"] = target_bos
else:
input_dict["trg"] = batch_of_data["tgt_text"]
input_dict["trg_length"] = deduce_text_length(batch_of_data["tgt_text"],
self._text_data_pipeline.meta["pad_id"],
self._text_data_pipeline.meta["padding_mode"])
input_dict["trg_input"] = tf.concat([tf.expand_dims(target_bos, axis=-1),
batch_of_data["tgt_text"][:, :-1]], axis=1)
return input_dict
def get_data_postprocess_fn(self, mode):
if mode == compat.ModeKeys.INFER:
return self._text_data_pipeline.recover
raise ValueError("No postprocess for TRAIN/EVAL.")
def get_data_preprocess_fn(self, mode, ds, args=None) -> dict:
""" Preprocess data sample according to this task.
Args:
args: A dict containing dataset arguments. may contains:
- args["task"] in ["MT","ASR", "ST"]
mode: A ModeKeys indicating the running mode.
ds: neurst.data.datasets.XMMultipleDataset
Returns: A dict, A callable function to collate (process) a data sample.
map_func["speech2text"][name] = A callable function to process speech2text data
map_func["text2text"][name] = A callable function to process text2text data
"""
if args is None:
args = self._args
else:
args = deep_merge_dict(self._args, args, local_overwrite=False)
trunc_audio = args.get("truncate_src", None)
max_audio_len = args.get("max_audio_src_len", None)
max_text_src_len = args.get("max_text_src_len", None)
trunc_text_trg = args.get("truncate_trg", None)
max_text_trg_len = args.get("max_text_trg_len", None)
def _process_audio(audio):
if trunc_audio and max_audio_len:
audio = audio[:max_audio_len * self._audio_feature_dim * self._audio_feature_channels]
return audio
def _process_text(text, tag):
if isinstance(text, tf.Tensor) and (text.dtype == tf.string):
text = text.as_string().decode('utf-8')
if isinstance(text, str):
text = self._text_data_pipeline.process(text, is_processed=False)
if mode == compat.ModeKeys.TRAIN and trunc_text_trg and max_text_trg_len:
if tag == "tgt_text":
max_text_len = max_text_trg_len
elif tag == "src_text":
max_text_len = max_text_src_len
else: # tag in ["src_lang", "tgt_lang"]
max_text_len = 10 # only 1 token, set a arbitrary number
if isinstance(text, tf.Tensor):
text = tf.cond(
tf.less_equal(tf.size(text), max_text_len),
lambda: text,
lambda: tf.concat([text[:(max_text_len - 1)], text[-1:]], axis=0))
else:
if len(text) > max_text_len:
text = text[:(max_text_len - 1)] + text[-1:]
return text
def _process_lang(lang):
if not compat.is_tf_tensor(lang) and isinstance(lang, str):
if not lang.startswith("<"):
lang = f"<{lang}>"
return self._text_data_pipeline.lang2idx(lang)
return lang
def _has_lang_tag(text):
if isinstance(text, tf.Tensor) and (text.dtype == tf.string):
text = text.as_string()
if isinstance(text, str):
return text.startswith("<")
return True
def _process_speech2text(data):
audio = _process_audio(data["audio"])
lang = data.get("tgt_lang", None)
ret = {"audio": audio,
"audio_length": tf.cast((tf.shape(audio)[0] if isinstance(audio, tf.Tensor)
else audio.shape[0]) // self._audio_feature_dim // self._audio_feature_channels,
dtype=tf.int64),
"src_text": data["src_text"]}
if _has_lang_tag(data["tgt_text"]) or (lang is None):
ret["tgt_lang"] = [_process_text(data["tgt_text"], "tgt_text")[0]]
ret["tgt_text"] = _process_text(data["tgt_text"], "tgt_text")
else:
ret["tgt_lang"] = [_process_lang(lang)]
ret["tgt_text"] = [_process_lang(lang)] + _process_text(data["tgt_text"], "tgt_text")
return ret
def _process_text2text(data):
ret = {"audio": tf.constant([], dtype=tf.float32),
"audio_length": tf.cast(0, dtype=tf.int64)}
if _has_lang_tag(data["tgt_text"]):
ret["src_text"] = _process_text(data["src_text"], "src_text")
ret["tgt_text"] = _process_text(data["tgt_text"], "tgt_text")
ret["tgt_lang"] = [_process_text(data["tgt_text"], "tgt_text")[0]]
else:
ret["src_text"] = [_process_lang(data["src_lang"])] + _process_text(data["src_text"], "src_text")
ret["tgt_text"] = [_process_lang(data["tgt_lang"])] + _process_text(data["tgt_text"], "tgt_text")
ret["tgt_lang"] = [_process_lang(data["tgt_lang"])]
return ret
preprocess_func_dict = {}
for ds_type in ds.datasets:
preprocess_func_dict[ds_type] = {}
if ds_type == "speech2text":
for ds_name in ds.datasets[ds_type]:
preprocess_func_dict[ds_type][ds_name] = _process_speech2text
elif ds_type == "text2text":
for ds_name in ds.datasets[ds_type]:
preprocess_func_dict[ds_type][ds_name] = _process_text2text
else:
logging.warning("dataset type must be `text2text` or `speech2text` ")
return preprocess_func_dict
def create_and_batch_tfds(self, ds: Dataset, mode,
args=None, num_replicas_in_sync=1) -> tf.data.Dataset:
""" Creates a dataset according to the `mode`.
Args:
args: A dict containing dataset arguments.
ds: A neurst.data.datasets.Dataset object. neurst.data.datasets.XMMultipleDataset object
mode: A ModeKeys indicating the running mode.
num_replicas_in_sync: The number of GPUs or other workers. We will generate global
batches, and each global batch is equally divisible by number of replicas.
Returns:
A tf.data.Dataset or a INFER_DATA tuple.
"""
if args is None:
args = self._args
else:
args = deep_merge_dict(self._args, args, local_overwrite=False)
float_zero = tf.constant(0, dtype=tf.float32)
int_zero = tf.constant(0, dtype=tf.int64)
eos = tf.constant(self._text_data_pipeline.meta["eos_id"], dtype=tf.int64)
padding_values = {"audio": float_zero,
"audio_length": int_zero,
"src_text": eos,
"tgt_text": eos,
"tgt_lang": eos}
dataset = ds.build(map_func=self.get_data_preprocess_fn(mode, ds, args),
map_output_dtypes=self.inputs_signature(mode)[0],
auto_shard=(mode == compat.ModeKeys.TRAIN),
shuffle=(mode == compat.ModeKeys.TRAIN))
if mode != compat.ModeKeys.TRAIN:
is_s2t = True
for x in dataset.take(1):
if tf.size(x["audio"]) == 0:
is_s2t = False
padded_shapes = {"audio_length": [], "tgt_text": [None], "tgt_lang": [None]}
if is_s2t:
padded_shapes["audio"] = [None]
padded_shapes["src_text"] = [tf.constant(1, dtype=tf.int32)]
return dataset.cache().padded_batch(
dataset_utils.adjust_batch_size(args["batch_size"],
num_replicas_in_sync=num_replicas_in_sync),
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=False)
else:
padded_shapes["audio"] = [tf.constant(8000, dtype=tf.float32)]
padded_shapes["src_text"] = [None]
return dataset.cache().padded_batch(
dataset_utils.adjust_batch_size(args["batch_size"],
num_replicas_in_sync=num_replicas_in_sync),
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=False
)
clean_length_dict = {"audio": args["max_audio_src_len"] *
self._audio_feature_dim * self._audio_feature_channels,
"audio_length": -1,
"src_text": args["max_text_src_len"],
"tgt_text": args["max_text_trg_len"],
"tgt_lang": -1}
dataset = dataset.filter(
lambda data_sample: tf.reduce_all([
(length == -1) or (length is None) or
tf.shape(data_sample[k])[0] <= length
for k, length in clean_length_dict.items()]))
logging.info("Created training dataset and batchifying...")
audio_bucket_boundaries, s2t_bucket_batch_sizes = get_speech2text_bucket_sizes(args,
num_replicas_in_sync)
s2t_audio_bucket_boundaries, s2t_trans_bucket_boundries, s2t_buckets_num = \
get_speech2text_bucket_size_with_ratio(args, audio_bucket_boundaries,
s2t_bucket_batch_sizes)
s2t_bucket_batch_sizes = tf.constant(s2t_bucket_batch_sizes, dtype=tf.int64)
audio_bucket_boundaries = tf.constant(audio_bucket_boundaries, dtype=tf.int32)
text_bucket_boundaries, t2t_bucket_batch_sizes = get_text2text_bucket_sizes(args,
num_replicas_in_sync)
t2t_bucket_batch_sizes = tf.constant(t2t_bucket_batch_sizes, dtype=tf.int64)
text_bucket_boundaries = tf.constant(text_bucket_boundaries, dtype=tf.int32)
t2t_max_trg_len = tf.constant(args["max_text_trg_len"], dtype=tf.int32)
# make s2t batches
t2t_bucket_num = tf.constant(len(t2t_bucket_batch_sizes), tf.int64)
def example_to_bucket_id(examples):
"""Return a tuple bucket_id for the example"""
is_text2text = tf.equal(tf.cast(examples["audio_length"], tf.int32),
tf.constant(0, dtype=tf.int32))
def _to_t2t_bucket_id():
seq_length = tf.size(examples["src_text"])
conditions_c = tf.less_equal(tf.cast(seq_length, tf.int32),
tf.cast(text_bucket_boundaries, tf.int32))
return tf.reduce_min(tf.where(conditions_c))
def _to_s2t_bucket_id():
conditions_c = tf.logical_and(
tf.less_equal(tf.cast(examples["audio_length"], tf.int32),
s2t_audio_bucket_boundaries),
tf.less_equal(tf.size(examples["tgt_text"]),
s2t_trans_bucket_boundries))
minimum_match = tf.where(conditions_c)[0]
return (minimum_match[0] * s2t_buckets_num + minimum_match[1]) + t2t_bucket_num
return tf.cond(is_text2text, _to_t2t_bucket_id, _to_s2t_bucket_id)
def window_size_fn(bucket_id):
def t2t_bucket_size():
return t2t_bucket_batch_sizes[bucket_id]
def s2t_bucket_size():
s2t_bucket_id = bucket_id - t2t_bucket_num
return s2t_bucket_batch_sizes[s2t_bucket_id // s2t_buckets_num]
return tf.cond(tf.less(bucket_id, t2t_bucket_num),
t2t_bucket_size, s2t_bucket_size)
def batching_fn(bucket_id, grouped_dataset):
bucket_batch_size = window_size_fn(bucket_id)
def t2t_shapes():
ret = {"audio": [tf.constant(5000, dtype=tf.int32)], "audio_length": [],
"src_text": [text_bucket_boundaries[bucket_id]],
"tgt_text": [t2t_max_trg_len],}
ret["tgt_lang"] = [1]
return ret
def s2t_shapes():
s2t_bucket_id = bucket_id - t2t_bucket_num
ret = {"audio": ([audio_bucket_boundaries[s2t_bucket_id // s2t_buckets_num]
* self._audio_feature_dim * self._audio_feature_channels]),
"audio_length": [],
"src_text": [tf.constant(5, dtype=tf.int32)],
"tgt_text": [s2t_trans_bucket_boundries[s2t_bucket_id // s2t_buckets_num][s2t_bucket_id % s2t_buckets_num]],
"tgt_lang": [1]}
return ret
padded_shapes = tf.cond(tf.less(bucket_id, t2t_bucket_num),
t2t_shapes, s2t_shapes)
return grouped_dataset.padded_batch(
bucket_batch_size,
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=True
)
tfds = dataset.apply(tf.data.experimental.group_by_window(
key_func=example_to_bucket_id, reduce_func=batching_fn,
window_size=None, window_size_func=window_size_fn))
return tfds
def build_metric_layer(self):
return [AudioFramesMetricLayer("audio"),
SequenceTokenMetricLayer("trg"), BatchCountMetricLayer("audio")]
def get_eval_metric(self, args, name="metric", ds=None):
""" Returns a neurst.metrics.metric.Metric object for evaluation."""
return build_metric(args[name + ".class"], language=self._text_data_pipeline.meta["language"],
**args[name + ".params"])
| 50.475248 | 131 | 0.60255 |
from typing import Tuple
import numpy as np
import tensorflow as tf
from absl import logging
import neurst.data.dataset_utils as dataset_utils
from neurst.data.data_pipelines import DataPipeline, build_data_pipeline
from neurst.data.data_pipelines.tagged_text_data_pipeline import TaggedTextDataPipeline
from neurst.data.datasets import Dataset
from neurst.layers.metric_layers.token_metric_layers import AudioFramesMetricLayer, SequenceTokenMetricLayer, BatchCountMetricLayer
from neurst.models import build_model
from neurst.metrics import build_metric
from neurst.models.model_utils import deduce_text_length
from neurst.tasks import register_task
from neurst.tasks.task import Task
from neurst.training.training_utils import minimal_multiple
from neurst.utils import compat
from neurst.utils.configurable import deep_merge_dict
from neurst.utils.flags_core import Flag, ModuleFlag
from neurst.tasks.speech2text import create_audio_bucket_boundaries
def get_speech2text_bucket_sizes(args, num_replicas_in_sync):
audio_bucket_boundaries = create_audio_bucket_boundaries(args["max_audio_src_len"],
args["batch_bucket_min_audio_src_len"])
audio_bucket_boundaries[-1] = minimal_multiple(audio_bucket_boundaries[-1], 8)
batch_size = dataset_utils.adjust_batch_size(
args["audio_batch_size"],
args["batch_size_per_gpu"],
num_replicas_in_sync=num_replicas_in_sync,
verbose=False)
batch_size_per_gpu = batch_size // num_replicas_in_sync
bucket_batch_sizes = [int(batch_size_per_gpu // bound
* num_replicas_in_sync) for bound in audio_bucket_boundaries]
return audio_bucket_boundaries, bucket_batch_sizes
def get_text2text_bucket_sizes(args, num_replicas_in_sync):
src_text_bucket_boundaries = dataset_utils.create_batch_bucket_boundaries(args["max_text_src_len"])
bucket_batch_sizes = dataset_utils.adjust_batch_size(
args["text_batch_size"],
args["batch_size_per_gpu"],
bucket_boundaries={"src_text": src_text_bucket_boundaries}
if args["batch_by_tokens"] else None,
boundaries_reduce_to_length_fn=lambda x: max(tf.nest.flatten(x)),
num_replicas_in_sync=num_replicas_in_sync)
return src_text_bucket_boundaries, bucket_batch_sizes
def get_speech2text_bucket_size_with_ratio(args,
audio_bucket_boundaries,
bucket_batch_sizes):
frame_transcript_ratio = args.get("experimental_frame_transcript_ratio", None)
assert frame_transcript_ratio is not None, "define experimental_frame_transcript_ratio, or it will OOM!"
trans_bucket_boundaries = [
int(bound / (frame_transcript_ratio + i * (
args["max_audio_src_len"] / args["max_audio_trg_len"] - frame_transcript_ratio) /
len(audio_bucket_boundaries)))
for i, bound in enumerate(audio_bucket_boundaries)]
trans_bucket_boundaries = [minimal_multiple(min(i, args["max_audio_trg_len"]), 8) for i in
trans_bucket_boundaries]
num_buckets = len(trans_bucket_boundaries)
true_trans_bucket_boundaries = []
num_input_shapes = 0
for idx, (batc, bound, tbound) in enumerate(zip(bucket_batch_sizes, audio_bucket_boundaries,
trans_bucket_boundaries)):
max_trans_len = [tbound,
trans_bucket_boundaries[min(idx + 1, len(bucket_batch_sizes) - 1)]]
num_input_shapes += len(set(max_trans_len))
true_trans_bucket_boundaries.append(max_trans_len)
logging.info(f"There are {num_input_shapes} input shapes to be compiled:")
for idx, (batc, bound, tbound) in enumerate(zip(bucket_batch_sizes, audio_bucket_boundaries,
true_trans_bucket_boundaries)):
logging.info(f" - batch={batc}, maximum-frames={bound}, "
f"maximum-transcript-length={set(tbound)}")
true_trans_bucket_boundaries = tf.constant(true_trans_bucket_boundaries, dtype=tf.int32)
true_audio_bucket_boundaries = tf.transpose(tf.constant([audio_bucket_boundaries] * 2, dtype=tf.int32))
return true_audio_bucket_boundaries, true_trans_bucket_boundaries, num_buckets
@register_task(["xm_translation", "xst_translation", "cross_modal_translation", "XModalPretrain"])
class CrossModalTranslation(Task):
def __init__(self, args):
super(CrossModalTranslation, self).__init__(args)
text_data_pipeline_cls = args.get("text_data_pipeline.class", TaggedTextDataPipeline)
text_data_pipeline_params = args.get("text_data_pipeline.params", None) or {}
self._text_data_pipeline = build_data_pipeline(
text_data_pipeline_cls, **text_data_pipeline_params)
self._audio_feature_dim = args["audio_feature_dim"]
self._audio_feature_channels = args["audio_feature_channels"]
def get_config(self):
return {
"text_data_pipeline.class": self._text_data_pipeline.__class__.__name__,
"text_data_pipeline.params": self._text_data_pipeline.get_config(),
"audio_feature_dim": self._audio_feature_dim,
"audio_feature_channels": self._audio_feature_channels
}
@staticmethod
def class_or_method_args():
this_args = super(CrossModalTranslation, CrossModalTranslation).class_or_method_args()
this_args.extend([
ModuleFlag("text_data_pipeline", DataPipeline.REGISTRY_NAME,
default=TaggedTextDataPipeline.__name__,
help="The text data pipeline."),
Flag("audio_feature_dim", dtype=Flag.TYPE.INTEGER, default=1,
help="The dimension of audio features."),
Flag("audio_feature_channels", dtype=Flag.TYPE.INTEGER, default=1,
help="The number of channels of audio features."),
Flag("max_audio_src_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum source length of training audio frames."),
Flag("max_text_src_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum source length of training text data."),
Flag("batch_bucket_min_audio_src_len", dtype=Flag.TYPE.INTEGER, default=1000,
help="The minimum source length of the training bucket of audio frames."),
Flag("batch_bucket_min_text_src_len", dtype=Flag.TYPE.INTEGER, default=120,
help="The minimum source length of the training bucket of text data."),
Flag("max_audio_trg_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum target length of training audio data."),
Flag("max_text_trg_len", dtype=Flag.TYPE.INTEGER, default=None,
help="The maximum target length of training text data."),
Flag("truncate_src", dtype=Flag.TYPE.BOOLEAN, default=None,
help="Whether to truncate source to max_audio_src_len or max_text_src_len."),
Flag("truncate_trg", dtype=Flag.TYPE.BOOLEAN, default=None,
help="Whether to truncate target to max_audio_trg_len or max_text_trg_len."),
Flag("experimental_frame_transcript_ratio", dtype=Flag.TYPE.INTEGER, default=None,
help="The ratio of the number of frames and its transcript for training batch bucket."),
Flag("batch_by_frames", dtype=Flag.TYPE.BOOLEAN, default=True,
help="Whether to batch the data by audio frames."),
Flag("audio_batch_size", dtype=Flag.TYPE.INTEGER, default=None,
help="The batch size of audio (frames)."),
Flag("batch_by_tokens", dtype=Flag.TYPE.BOOLEAN, default=True,
help="Whether to batch the data by text tokens."),
Flag("text_batch_size", dtype=Flag.TYPE.INTEGER, default=None,
help="The batch size of text (tokens)."),
])
return this_args
def inputs_signature(self, mode) -> Tuple[dict, dict]:
dtypes = {"audio": tf.float32, "audio_length": tf.int64,
"src_text": tf.int64,
"tgt_text": tf.int64, "tgt_lang": tf.int64}
signatures = {
"audio": tf.TensorShape([None, None]),
"audio_length": tf.TensorShape([None, ]),
"src_text": tf.TensorShape([None, None]),
"tgt_text": tf.TensorShape([None, None]),
"tgt_lang": tf.TensorShape([None, None]),
}
return dtypes, signatures
def build_model(self, args, name=None):
model = build_model(args,
{"audio_feature_dim": self._audio_feature_dim,
"audio_feature_channels": self._audio_feature_channels},
self._text_data_pipeline.meta,
name=name)
return model
def example_to_input(self, batch_of_data: dict, mode) -> dict:
batch = tf.shape(batch_of_data["audio"])[0]
input_dict = {
"audio": tf.reshape(batch_of_data["audio"],
[batch, -1, self._audio_feature_dim, self._audio_feature_channels]),
"audio_length": batch_of_data["audio_length"],
"src_text": batch_of_data["src_text"],
"src_length": deduce_text_length(batch_of_data["src_text"],
self._text_data_pipeline.meta["pad_id"],
self._text_data_pipeline.meta["padding_mode"]),
"trg_lang": batch_of_data["tgt_lang"],
}
target_bos = batch_of_data["tgt_text"][:, 0]
if mode == compat.ModeKeys.INFER:
input_dict["trg_input"] = target_bos
else:
input_dict["trg"] = batch_of_data["tgt_text"]
input_dict["trg_length"] = deduce_text_length(batch_of_data["tgt_text"],
self._text_data_pipeline.meta["pad_id"],
self._text_data_pipeline.meta["padding_mode"])
input_dict["trg_input"] = tf.concat([tf.expand_dims(target_bos, axis=-1),
batch_of_data["tgt_text"][:, :-1]], axis=1)
return input_dict
def get_data_postprocess_fn(self, mode):
if mode == compat.ModeKeys.INFER:
return self._text_data_pipeline.recover
raise ValueError("No postprocess for TRAIN/EVAL.")
def get_data_preprocess_fn(self, mode, ds, args=None) -> dict:
if args is None:
args = self._args
else:
args = deep_merge_dict(self._args, args, local_overwrite=False)
trunc_audio = args.get("truncate_src", None)
max_audio_len = args.get("max_audio_src_len", None)
max_text_src_len = args.get("max_text_src_len", None)
trunc_text_trg = args.get("truncate_trg", None)
max_text_trg_len = args.get("max_text_trg_len", None)
def _process_audio(audio):
if trunc_audio and max_audio_len:
audio = audio[:max_audio_len * self._audio_feature_dim * self._audio_feature_channels]
return audio
def _process_text(text, tag):
if isinstance(text, tf.Tensor) and (text.dtype == tf.string):
text = text.as_string().decode('utf-8')
if isinstance(text, str):
text = self._text_data_pipeline.process(text, is_processed=False)
if mode == compat.ModeKeys.TRAIN and trunc_text_trg and max_text_trg_len:
if tag == "tgt_text":
max_text_len = max_text_trg_len
elif tag == "src_text":
max_text_len = max_text_src_len
else:
max_text_len = 10
if isinstance(text, tf.Tensor):
text = tf.cond(
tf.less_equal(tf.size(text), max_text_len),
lambda: text,
lambda: tf.concat([text[:(max_text_len - 1)], text[-1:]], axis=0))
else:
if len(text) > max_text_len:
text = text[:(max_text_len - 1)] + text[-1:]
return text
def _process_lang(lang):
if not compat.is_tf_tensor(lang) and isinstance(lang, str):
if not lang.startswith("<"):
lang = f"<{lang}>"
return self._text_data_pipeline.lang2idx(lang)
return lang
def _has_lang_tag(text):
if isinstance(text, tf.Tensor) and (text.dtype == tf.string):
text = text.as_string()
if isinstance(text, str):
return text.startswith("<")
return True
def _process_speech2text(data):
audio = _process_audio(data["audio"])
lang = data.get("tgt_lang", None)
ret = {"audio": audio,
"audio_length": tf.cast((tf.shape(audio)[0] if isinstance(audio, tf.Tensor)
else audio.shape[0]) // self._audio_feature_dim // self._audio_feature_channels,
dtype=tf.int64),
"src_text": data["src_text"]}
if _has_lang_tag(data["tgt_text"]) or (lang is None):
ret["tgt_lang"] = [_process_text(data["tgt_text"], "tgt_text")[0]]
ret["tgt_text"] = _process_text(data["tgt_text"], "tgt_text")
else:
ret["tgt_lang"] = [_process_lang(lang)]
ret["tgt_text"] = [_process_lang(lang)] + _process_text(data["tgt_text"], "tgt_text")
return ret
def _process_text2text(data):
ret = {"audio": tf.constant([], dtype=tf.float32),
"audio_length": tf.cast(0, dtype=tf.int64)}
if _has_lang_tag(data["tgt_text"]):
ret["src_text"] = _process_text(data["src_text"], "src_text")
ret["tgt_text"] = _process_text(data["tgt_text"], "tgt_text")
ret["tgt_lang"] = [_process_text(data["tgt_text"], "tgt_text")[0]]
else:
ret["src_text"] = [_process_lang(data["src_lang"])] + _process_text(data["src_text"], "src_text")
ret["tgt_text"] = [_process_lang(data["tgt_lang"])] + _process_text(data["tgt_text"], "tgt_text")
ret["tgt_lang"] = [_process_lang(data["tgt_lang"])]
return ret
preprocess_func_dict = {}
for ds_type in ds.datasets:
preprocess_func_dict[ds_type] = {}
if ds_type == "speech2text":
for ds_name in ds.datasets[ds_type]:
preprocess_func_dict[ds_type][ds_name] = _process_speech2text
elif ds_type == "text2text":
for ds_name in ds.datasets[ds_type]:
preprocess_func_dict[ds_type][ds_name] = _process_text2text
else:
logging.warning("dataset type must be `text2text` or `speech2text` ")
return preprocess_func_dict
def create_and_batch_tfds(self, ds: Dataset, mode,
args=None, num_replicas_in_sync=1) -> tf.data.Dataset:
if args is None:
args = self._args
else:
args = deep_merge_dict(self._args, args, local_overwrite=False)
float_zero = tf.constant(0, dtype=tf.float32)
int_zero = tf.constant(0, dtype=tf.int64)
eos = tf.constant(self._text_data_pipeline.meta["eos_id"], dtype=tf.int64)
padding_values = {"audio": float_zero,
"audio_length": int_zero,
"src_text": eos,
"tgt_text": eos,
"tgt_lang": eos}
dataset = ds.build(map_func=self.get_data_preprocess_fn(mode, ds, args),
map_output_dtypes=self.inputs_signature(mode)[0],
auto_shard=(mode == compat.ModeKeys.TRAIN),
shuffle=(mode == compat.ModeKeys.TRAIN))
if mode != compat.ModeKeys.TRAIN:
is_s2t = True
for x in dataset.take(1):
if tf.size(x["audio"]) == 0:
is_s2t = False
padded_shapes = {"audio_length": [], "tgt_text": [None], "tgt_lang": [None]}
if is_s2t:
padded_shapes["audio"] = [None]
padded_shapes["src_text"] = [tf.constant(1, dtype=tf.int32)]
return dataset.cache().padded_batch(
dataset_utils.adjust_batch_size(args["batch_size"],
num_replicas_in_sync=num_replicas_in_sync),
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=False)
else:
padded_shapes["audio"] = [tf.constant(8000, dtype=tf.float32)]
padded_shapes["src_text"] = [None]
return dataset.cache().padded_batch(
dataset_utils.adjust_batch_size(args["batch_size"],
num_replicas_in_sync=num_replicas_in_sync),
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=False
)
clean_length_dict = {"audio": args["max_audio_src_len"] *
self._audio_feature_dim * self._audio_feature_channels,
"audio_length": -1,
"src_text": args["max_text_src_len"],
"tgt_text": args["max_text_trg_len"],
"tgt_lang": -1}
dataset = dataset.filter(
lambda data_sample: tf.reduce_all([
(length == -1) or (length is None) or
tf.shape(data_sample[k])[0] <= length
for k, length in clean_length_dict.items()]))
logging.info("Created training dataset and batchifying...")
audio_bucket_boundaries, s2t_bucket_batch_sizes = get_speech2text_bucket_sizes(args,
num_replicas_in_sync)
s2t_audio_bucket_boundaries, s2t_trans_bucket_boundries, s2t_buckets_num = \
get_speech2text_bucket_size_with_ratio(args, audio_bucket_boundaries,
s2t_bucket_batch_sizes)
s2t_bucket_batch_sizes = tf.constant(s2t_bucket_batch_sizes, dtype=tf.int64)
audio_bucket_boundaries = tf.constant(audio_bucket_boundaries, dtype=tf.int32)
text_bucket_boundaries, t2t_bucket_batch_sizes = get_text2text_bucket_sizes(args,
num_replicas_in_sync)
t2t_bucket_batch_sizes = tf.constant(t2t_bucket_batch_sizes, dtype=tf.int64)
text_bucket_boundaries = tf.constant(text_bucket_boundaries, dtype=tf.int32)
t2t_max_trg_len = tf.constant(args["max_text_trg_len"], dtype=tf.int32)
t2t_bucket_num = tf.constant(len(t2t_bucket_batch_sizes), tf.int64)
def example_to_bucket_id(examples):
is_text2text = tf.equal(tf.cast(examples["audio_length"], tf.int32),
tf.constant(0, dtype=tf.int32))
def _to_t2t_bucket_id():
seq_length = tf.size(examples["src_text"])
conditions_c = tf.less_equal(tf.cast(seq_length, tf.int32),
tf.cast(text_bucket_boundaries, tf.int32))
return tf.reduce_min(tf.where(conditions_c))
def _to_s2t_bucket_id():
conditions_c = tf.logical_and(
tf.less_equal(tf.cast(examples["audio_length"], tf.int32),
s2t_audio_bucket_boundaries),
tf.less_equal(tf.size(examples["tgt_text"]),
s2t_trans_bucket_boundries))
minimum_match = tf.where(conditions_c)[0]
return (minimum_match[0] * s2t_buckets_num + minimum_match[1]) + t2t_bucket_num
return tf.cond(is_text2text, _to_t2t_bucket_id, _to_s2t_bucket_id)
def window_size_fn(bucket_id):
def t2t_bucket_size():
return t2t_bucket_batch_sizes[bucket_id]
def s2t_bucket_size():
s2t_bucket_id = bucket_id - t2t_bucket_num
return s2t_bucket_batch_sizes[s2t_bucket_id // s2t_buckets_num]
return tf.cond(tf.less(bucket_id, t2t_bucket_num),
t2t_bucket_size, s2t_bucket_size)
def batching_fn(bucket_id, grouped_dataset):
bucket_batch_size = window_size_fn(bucket_id)
def t2t_shapes():
ret = {"audio": [tf.constant(5000, dtype=tf.int32)], "audio_length": [],
"src_text": [text_bucket_boundaries[bucket_id]],
"tgt_text": [t2t_max_trg_len],}
ret["tgt_lang"] = [1]
return ret
def s2t_shapes():
s2t_bucket_id = bucket_id - t2t_bucket_num
ret = {"audio": ([audio_bucket_boundaries[s2t_bucket_id // s2t_buckets_num]
* self._audio_feature_dim * self._audio_feature_channels]),
"audio_length": [],
"src_text": [tf.constant(5, dtype=tf.int32)],
"tgt_text": [s2t_trans_bucket_boundries[s2t_bucket_id // s2t_buckets_num][s2t_bucket_id % s2t_buckets_num]],
"tgt_lang": [1]}
return ret
padded_shapes = tf.cond(tf.less(bucket_id, t2t_bucket_num),
t2t_shapes, s2t_shapes)
return grouped_dataset.padded_batch(
bucket_batch_size,
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=True
)
tfds = dataset.apply(tf.data.experimental.group_by_window(
key_func=example_to_bucket_id, reduce_func=batching_fn,
window_size=None, window_size_func=window_size_fn))
return tfds
def build_metric_layer(self):
return [AudioFramesMetricLayer("audio"),
SequenceTokenMetricLayer("trg"), BatchCountMetricLayer("audio")]
def get_eval_metric(self, args, name="metric", ds=None):
return build_metric(args[name + ".class"], language=self._text_data_pipeline.meta["language"],
**args[name + ".params"])
| true | true |
1c323c3fc4e3462da91c1c2ee4031a1da2a11473 | 126 | py | Python | week06_1/static_demo/myproject/myapp/models.py | wasit7/cs459_django2018 | 77c09712ac5328d9bba285c000d25d86dbc363b1 | [
"BSD-2-Clause"
] | 4 | 2018-01-17T06:51:24.000Z | 2019-05-01T15:45:17.000Z | week06_1/static_demo/myproject/myapp/models.py | wasit7/cs459_django2018 | 77c09712ac5328d9bba285c000d25d86dbc363b1 | [
"BSD-2-Clause"
] | null | null | null | week06_1/static_demo/myproject/myapp/models.py | wasit7/cs459_django2018 | 77c09712ac5328d9bba285c000d25d86dbc363b1 | [
"BSD-2-Clause"
] | 3 | 2018-04-04T06:54:08.000Z | 2019-03-06T02:31:16.000Z | from django.db import models
# Create your models here.
class Car(models.Model):
image = models.ImageField(upload_to='cars') | 25.2 | 44 | 0.769841 | from django.db import models
class Car(models.Model):
image = models.ImageField(upload_to='cars') | true | true |
1c323c7ad0cf673c86820e22a1ccf3368afdf661 | 6,276 | py | Python | nuqql_matrixd/server.py | hwipl/nuqql-matrixd | 8120ec8bf5a3818726ad2b30f23f9f05fb37c9ec | [
"MIT"
] | null | null | null | nuqql_matrixd/server.py | hwipl/nuqql-matrixd | 8120ec8bf5a3818726ad2b30f23f9f05fb37c9ec | [
"MIT"
] | null | null | null | nuqql_matrixd/server.py | hwipl/nuqql-matrixd | 8120ec8bf5a3818726ad2b30f23f9f05fb37c9ec | [
"MIT"
] | null | null | null | """
matrixd backend server
"""
import html
import re
from typing import TYPE_CHECKING, Dict, Optional, Tuple
# nuqq-based imports
from nuqql_based.based import Based
from nuqql_based.callback import Callback
from nuqql_based.message import Message
# matrixd imports
from nuqql_matrixd.client import BackendClient
from nuqql_matrixd.matrix import unescape_name
if TYPE_CHECKING: # imports for typing
# pylint: disable=ungrouped-imports
from nuqql_based.based import CallbackList
from nuqql_based.account import Account
# matrixd version
VERSION = "0.5.0"
class BackendServer:
"""
Backend server class, manages the BackendClients for connections to
IM networks
"""
def __init__(self) -> None:
self.connections: Dict[int, BackendClient] = {}
self.based = Based("matrixd", VERSION)
async def start(self) -> None:
"""
Start server
"""
# set callbacks
callbacks: "CallbackList" = [
# nuqql messages
(Callback.HELP_WELCOME, self._help_welcome),
(Callback.HELP_ACCOUNT_ADD, self._help_account_add),
(Callback.ADD_ACCOUNT, self.add_account),
(Callback.DEL_ACCOUNT, self.del_account),
(Callback.GET_BUDDIES, self.handle_command),
(Callback.SEND_MESSAGE, self.send_message),
(Callback.SET_STATUS, self.handle_command),
(Callback.GET_STATUS, self.handle_command),
(Callback.CHAT_LIST, self.handle_command),
(Callback.CHAT_JOIN, self.handle_command),
(Callback.CHAT_PART, self.handle_command),
(Callback.CHAT_SEND, self.chat_send),
(Callback.CHAT_USERS, self.handle_command),
(Callback.CHAT_INVITE, self.handle_command),
]
self.based.set_callbacks(callbacks)
# start based
await self.based.start()
async def handle_command(self, account: Optional["Account"], cmd: Callback,
params: Tuple) -> str:
"""
add commands to the command queue of the account/client
"""
assert account
try:
client = self.connections[account.aid]
except KeyError:
# no active connection
return ""
await client.handle_command(cmd, params)
return ""
async def send_message(self, account: Optional["Account"], cmd: Callback,
params: Tuple) -> str:
"""
send a message to a destination on an account
"""
# parse parameters
if len(params) > 2:
dest, msg, msg_type = params
else:
dest, msg = params
msg_type = "chat"
# nuqql sends a html-escaped message; construct "plain-text" version
# and xhtml version using nuqql's message and use them as message body
# later
html_msg = f'<body xmlns="http://www.w3.org/1999/xhtml">{msg}</body>'
msg = html.unescape(msg)
msg = "\n".join(re.split("<br/>", msg, flags=re.IGNORECASE))
# send message
await self.handle_command(account, cmd, (unescape_name(dest), msg,
html_msg, msg_type))
return ""
async def chat_send(self, account: Optional["Account"], _cmd: Callback,
params: Tuple) -> str:
"""
Send message to chat on account
"""
chat, msg = params
# TODO: use cmd to infer msg type in send_message and remove this
# function?
return await self.send_message(account, Callback.SEND_MESSAGE,
(chat, msg, "groupchat"))
async def run_client(self, account: "Account") -> None:
"""
Run client connection
"""
# init client connection
client = BackendClient(account)
# save client connection in active connections dictionary
self.connections[account.aid] = client
# start client; this returns when client is stopped
await client.start()
async def add_account(self, account: Optional["Account"], _cmd: Callback,
_params: Tuple) -> str:
"""
Add a new account (from based) and run a new client thread for it
"""
# only handle matrix accounts
assert account
if account.type != "matrix":
return ""
# create and start client
await self.run_client(account)
return ""
async def del_account(self, account: Optional["Account"], _cmd: Callback,
_params: Tuple) -> str:
"""
Delete an existing account (in based) and
stop matrix client thread for it
"""
# let client clean up
assert account
client = self.connections[account.aid]
await client.del_account()
# cleanup
del self.connections[account.aid]
return ""
@staticmethod
async def _help_account_add(_account: Optional["Account"],
_cmd: Callback, _params: Tuple) -> str:
"""
Handle account add help event
"""
add_help = Message.info("You do not have any accounts configured.")
add_help += Message.info("You can add a new matrix account with the "
"following command: "
"account add matrix <username>@<homeserver> "
"<password>")
add_help += Message.info("Example: account add matrix "
"dummy@matrix.org MyPassword")
return add_help
async def _help_welcome(self, _account: Optional["Account"],
_cmd: Callback, _params: Tuple) -> str:
"""
Handle welcome help message event
"""
welcome = Message.info(f"Welcome to nuqql-matrixd v{VERSION}!")
welcome += Message.info("Enter \"help\" for a list of available "
"commands and their help texts")
if self.based.config.get_push_accounts():
welcome += Message.info("Listing your accounts:")
return welcome
| 31.857868 | 79 | 0.578235 |
import html
import re
from typing import TYPE_CHECKING, Dict, Optional, Tuple
from nuqql_based.based import Based
from nuqql_based.callback import Callback
from nuqql_based.message import Message
from nuqql_matrixd.client import BackendClient
from nuqql_matrixd.matrix import unescape_name
if TYPE_CHECKING:
from nuqql_based.based import CallbackList
from nuqql_based.account import Account
VERSION = "0.5.0"
class BackendServer:
def __init__(self) -> None:
self.connections: Dict[int, BackendClient] = {}
self.based = Based("matrixd", VERSION)
async def start(self) -> None:
callbacks: "CallbackList" = [
(Callback.HELP_WELCOME, self._help_welcome),
(Callback.HELP_ACCOUNT_ADD, self._help_account_add),
(Callback.ADD_ACCOUNT, self.add_account),
(Callback.DEL_ACCOUNT, self.del_account),
(Callback.GET_BUDDIES, self.handle_command),
(Callback.SEND_MESSAGE, self.send_message),
(Callback.SET_STATUS, self.handle_command),
(Callback.GET_STATUS, self.handle_command),
(Callback.CHAT_LIST, self.handle_command),
(Callback.CHAT_JOIN, self.handle_command),
(Callback.CHAT_PART, self.handle_command),
(Callback.CHAT_SEND, self.chat_send),
(Callback.CHAT_USERS, self.handle_command),
(Callback.CHAT_INVITE, self.handle_command),
]
self.based.set_callbacks(callbacks)
await self.based.start()
async def handle_command(self, account: Optional["Account"], cmd: Callback,
params: Tuple) -> str:
assert account
try:
client = self.connections[account.aid]
except KeyError:
return ""
await client.handle_command(cmd, params)
return ""
async def send_message(self, account: Optional["Account"], cmd: Callback,
params: Tuple) -> str:
if len(params) > 2:
dest, msg, msg_type = params
else:
dest, msg = params
msg_type = "chat"
# later
html_msg = f'<body xmlns="http://www.w3.org/1999/xhtml">{msg}</body>'
msg = html.unescape(msg)
msg = "\n".join(re.split("<br/>", msg, flags=re.IGNORECASE))
# send message
await self.handle_command(account, cmd, (unescape_name(dest), msg,
html_msg, msg_type))
return ""
async def chat_send(self, account: Optional["Account"], _cmd: Callback,
params: Tuple) -> str:
chat, msg = params
# TODO: use cmd to infer msg type in send_message and remove this
# function?
return await self.send_message(account, Callback.SEND_MESSAGE,
(chat, msg, "groupchat"))
async def run_client(self, account: "Account") -> None:
# init client connection
client = BackendClient(account)
# save client connection in active connections dictionary
self.connections[account.aid] = client
# start client; this returns when client is stopped
await client.start()
async def add_account(self, account: Optional["Account"], _cmd: Callback,
_params: Tuple) -> str:
# only handle matrix accounts
assert account
if account.type != "matrix":
return ""
# create and start client
await self.run_client(account)
return ""
async def del_account(self, account: Optional["Account"], _cmd: Callback,
_params: Tuple) -> str:
# let client clean up
assert account
client = self.connections[account.aid]
await client.del_account()
# cleanup
del self.connections[account.aid]
return ""
@staticmethod
async def _help_account_add(_account: Optional["Account"],
_cmd: Callback, _params: Tuple) -> str:
add_help = Message.info("You do not have any accounts configured.")
add_help += Message.info("You can add a new matrix account with the "
"following command: "
"account add matrix <username>@<homeserver> "
"<password>")
add_help += Message.info("Example: account add matrix "
"dummy@matrix.org MyPassword")
return add_help
async def _help_welcome(self, _account: Optional["Account"],
_cmd: Callback, _params: Tuple) -> str:
welcome = Message.info(f"Welcome to nuqql-matrixd v{VERSION}!")
welcome += Message.info("Enter \"help\" for a list of available "
"commands and their help texts")
if self.based.config.get_push_accounts():
welcome += Message.info("Listing your accounts:")
return welcome
| true | true |
1c323c889b5ddf67e0a8df74329016f418433b1a | 45,065 | py | Python | api_gateway/models.py | navigateconsulting/va-grievance-redressal | b5a6f28fa0bbaa2c10eb6e6cb553a7407e1027d4 | [
"Apache-2.0"
] | null | null | null | api_gateway/models.py | navigateconsulting/va-grievance-redressal | b5a6f28fa0bbaa2c10eb6e6cb553a7407e1027d4 | [
"Apache-2.0"
] | 1 | 2021-03-10T04:00:41.000Z | 2021-03-10T04:00:41.000Z | api_gateway/models.py | navigateconsulting/va-grievance-redressal | b5a6f28fa0bbaa2c10eb6e6cb553a7407e1027d4 | [
"Apache-2.0"
] | null | null | null | from bson.json_util import dumps
import json
from bson.objectid import ObjectId
from database import ConDatabase
from config import CONFIG
'''motor Mongo Db connection '''
db = ConDatabase.connect()
# noinspection PyMethodMayBeStatic
class RasaConversations:
async def get_conversations(self, sender_id):
print("Pulling tracker data for a conversation")
result = await db.conversations.find_one({"sender_id": sender_id})
return json.loads(dumps(result))
# noinspection PyMethodMayBeStatic
class RefreshDb:
async def refresh_db(self):
print('received request to refresh database')
# Setting source data paths
seed_data_path = CONFIG.get('api_gateway', 'SEED_DATA_PATH')
# Cleaning up collections
await db.entities.delete_many({})
await db.projects.delete_many({})
await db.domains.delete_many({})
await db.intents.delete_many({})
await db.responses.delete_many({})
await db.stories.delete_many({})
await db.conversations.delete_many({})
await db.actions.delete_many({})
# Inserting Data in collection
with open(seed_data_path+'projects.json') as json_file:
data = json.load(json_file)
await db.projects.insert_many(data)
# Get project ID
project = await db.projects.find_one({})
project_id = project.get('_id')
print("project ID {}".format(project_id))
with open(seed_data_path+'domains.json') as json_file:
data = json.load(json_file)
await db.domains.insert_many(data)
await db.domains.update_many({}, {'$set': {'project_id': str(project_id)}})
domain_id = await db.domains.find_one({})
with open(seed_data_path+'intents.json') as json_file:
data = json.load(json_file)
await db.intents.insert_many(data)
await db.intents.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})
with open(seed_data_path+'entities.json') as json_file:
data = json.load(json_file)
await db.entities.insert_many(data)
await db.entities.update_many({}, {'$set': {'project_id': str(project_id)}})
with open(seed_data_path+'responses.json') as json_file:
data = json.load(json_file)
await db.responses.insert_many(data)
await db.responses.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})
with open(seed_data_path+'stories.json') as json_file:
data = json.load(json_file)
await db.stories.insert_many(data)
await db.stories.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})
with open(seed_data_path+'actions.json') as json_file:
data = json.load(json_file)
await db.actions.insert_many(data)
return "Success"
# noinspection PyMethodMayBeStatic
class ProjectsModel:
def __init__(self):
pass
async def get_projects(self):
cursor = db.projects.find()
result = await cursor.to_list(length=1000)
print("Projects sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_projects(self, record):
json_record = json.loads(json.dumps(record))
# Validation to check if project already exists
val_res = await db.projects.find_one({"project_name": json_record['project_name']})
if val_res is not None:
print('Project already exists')
return {"status": "Error", "message": "Project already exists"}
else:
result = await db.projects.insert_one(json_record)
print("project created {}".format(result.inserted_id))
return {"status": "Success", "message": "Project Created with ID {}".format(result.inserted_id)}
async def delete_project(self, object_id):
query = {"_id": ObjectId("{}".format(object_id))}
# Delete Domains Intents , Entities , Stories , Responses
result = await db.domains.delete_many({"project_id": object_id})
print("Domains Deleted - count {}".format(result))
result = await db.intents.delete_many({"project_id": object_id})
print("Intents Deleted - count {}".format(result))
result = await db.entities.delete_many({"project_id": object_id})
print("Entities Deleted - count {}".format(result))
result = await db.stories.delete_many({"project_id": object_id})
print("Stories Deleted - count {}".format(result))
result = await db.responses.delete_many({"project_id": object_id})
print("Responses Deleted - count {}".format(result))
# Delete Project
result = await db.projects.delete_one(query)
print("Project Deleted count {}".format(result))
return {"status": "Success", "message": "Project Deleted Successfully"}
async def update_project(self, record):
json_record = json.loads(json.dumps(record))
#val_res = await db.projects.find_one({"project_name": json_record['project_name']})
'''
if val_res is not None:
print('Project already exists')
return {"status": "Error", "message": "Project name already exists"}
else:
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"project_description": json_record['project_description']
}}
result = await db.projects.update_one(query, update_field)
print("Project Updated , rows modified {}".format(result))
return {"status": "Success", "message": "Project details updated successfully "}
'''
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"project_description": json_record['project_description']
}}
result = await db.projects.update_one(query, update_field)
print("Project Updated , rows modified {}".format(result))
return {"status": "Success", "message": "Project details updated successfully "}
async def update_project_model(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"model_name": json_record['model_name'],
"state": json_record['state']
}}
res_archived = await db.projects.update_many({"state": "Published"}, {"$set": {"state": "Archived"}})
result = await db.projects.update_one(query, update_field)
print("Projects set to Archived state {}".format(res_archived))
print("Project Updated , rows modified {}".format(result))
return {"status": "Success", "message": "Model Published "}
async def copy_project(self, record):
json_record = json.loads(json.dumps(record))
# check if the project name exists
val_res = await db.projects.find_one({"project_name": json_record['project_name']})
if val_res is not None:
print('Project already exists')
return {"status": "Error", "message": "Project already exists"}
else:
# get source project ID
source_project = await db.projects.find_one({"project_name": json_record['source']})
source_project_id = source_project.get('_id')
print("Source project ID {}".format(source_project_id))
# Create Project
new_project = await db.projects.insert_one(json_record)
print("project created {}".format(new_project.inserted_id))
# Copy Entities
entities_cursor = db.entities.find({"project_id": str(source_project_id)})
for entity in await entities_cursor.to_list(length=100):
del entity['_id']
entity['project_id'] = "{}".format(new_project.inserted_id)
new_entity = await db.entities.insert_one(entity)
print("new entity inserted with id {}".format(new_entity.inserted_id))
# Copy domains
domains_cursor = db.domains.find({"project_id": str(source_project_id)})
for domain in await domains_cursor.to_list(length=100):
source_domain_id = domain.get('_id')
del domain['_id']
domain['project_id'] = "{}".format(new_project.inserted_id)
new_domain = await db.domains.insert_one(domain)
print("new domain inserted with id {}".format(new_domain.inserted_id))
# Copy Intents
intents_cursor = db.intents.find({"project_id": str(source_project_id), "domain_id": str(source_domain_id)})
for intents in await intents_cursor.to_list(length=100):
del intents['_id']
intents['project_id'] = "{}".format(new_project.inserted_id)
intents['domain_id'] = "{}".format(new_domain.inserted_id)
new_intents = await db.intents.insert_one(intents)
print("new intents inserted with id {}".format(new_intents.inserted_id))
# Copy Responses
responses_cursor = db.responses.find({"project_id": str(source_project_id), "domain_id": str(source_domain_id)})
for response in await responses_cursor.to_list(length=100):
del response['_id']
response['project_id'] = "{}".format(new_project.inserted_id)
response['domain_id'] = "{}".format(new_domain.inserted_id)
new_responses = await db.responses.insert_one(response)
print("new response inserted with id {}".format(new_responses.inserted_id))
# Copy Stories
stories_cursor = db.stories.find({"project_id": str(source_project_id), "domain_id": str(source_domain_id)})
for story in await stories_cursor.to_list(length=100):
del story['_id']
story['project_id'] = "{}".format(new_project.inserted_id)
story['domain_id'] = "{}".format(new_domain.inserted_id)
new_story = await db.stories.insert_one(story)
print("new story inserted with id {}".format(new_story.inserted_id))
return {"status": "Success", "message": "Project Copied ID {}".format(new_project.inserted_id)}
# noinspection PyMethodMayBeStatic
class DomainsModel:
def __init__(self):
pass
async def get_domains(self, project_id):
query = {"project_id": project_id}
cursor = db.domains.find(query)
result = await cursor.to_list(length=1000)
print("Domains sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_domain(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_name": json_record['domain_name'],
"domain_description": json_record['domain_description']}
# Check if domain exists already
val_res = await db.domains.find_one({"project_id": json_record['project_id'],
"domain_name": json_record['domain_name']})
if val_res is not None:
print('Domain already exists')
return {"status": "Error", "message": "Domain already exists"}, None
else:
insert_result = await db.domains.insert_one(json.loads(json.dumps(insert_record)))
print("Domain created with ID {}".format(insert_result.inserted_id))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain created successfully"}, domains_list
async def delete_domain(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.intents.delete_many({"domain_id": json_record['object_id']})
print("Intents Deleted - count {}".format(result))
result = await db.stories.delete_many({"domain_id": json_record['object_id']})
print("Stories Deleted - count {}".format(result))
result = await db.responses.delete_many({"domain_id": json_record['object_id']})
print("Responses Deleted - count {}".format(result))
delete_record = await db.domains.delete_one(query)
print("Domain Deleted count {}".format(delete_record))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain Deleted Successfully"}, domains_list
async def update_domain(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"domain_name": json_record['domain_name'],
"domain_description": json_record['domain_description']}}
# Check if Domain already exists
val_res = await db.domains.find_one({"project_id": json_record['project_id'],
"domain_name": json_record['domain_name']})
if val_res is None:
update_record = await db.domains.update_one(query, update_field)
print("Domain Updated , rows modified {}".format(update_record))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain updated successfully "}, domains_list
elif val_res['domain_name'] == json_record['domain_name']:
print("updating domain description")
update_record = await db.domains.update_one(query, update_field)
print("Domain Updated , rows modified {}".format(update_record))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain updated successfully "}, domains_list
else:
print('Domain already exists')
return {"status": "Error", "message": "Domain already exists"}, None
# noinspection PyMethodMayBeStatic
class IntentsModel:
def __init__(self):
pass
async def get_intents(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.intents.find(json_record, {"project_id": 1, "domain_id": 1, "intent_name": 1, "intent_description": 1})
result = await cursor.to_list(length=1000)
json_result = json.loads(dumps(result))
print("Intents sent {}".format(json_result))
return json_result
async def create_intent(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id'],
"intent_name": json_record['intent_name'],
"intent_description": json_record['intent_description'], "text_entities": []}
val_res = await db.intents.find_one({"project_id": json_record['project_id'],
#"domain_id": json_record['domain_id'],
"intent_name": json_record['intent_name']})
if val_res is not None:
print('Intent already exists')
return {"status": "Error", "message": "Intent already exists"}, None
else:
result = await db.intents.insert_one(json.loads(json.dumps(insert_record)))
message = {"status": "Success", "message": "Intent created with ID {}".format(result.inserted_id)}
get_intents = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
intents_list = await self.get_intents(get_intents)
return message, intents_list
async def delete_intent(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
# Query to check intent - {"story": {$elemMatch: {"key": "greet" }}}
# check if intent exists in any story
intent_detail = await db.intents.find_one(query)
exists = await db.stories.find_one({"story": {"$elemMatch": {"key": intent_detail['intent_name']}}})
if exists is None:
result = await db.intents.delete_one(query)
print("Intent deleted successfully {}".format(result))
message = {"status": "Success", "message": "Intent deleted successfully "}
get_intents = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
intents_list = await self.get_intents(get_intents)
return message, intents_list
else:
message = {"status": "Error", "message": "Intent is used in a story cannot delete this intent"}
return message, None
async def update_intent(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"intent_name": json_record['intent_name'],
"intent_description": json_record['intent_description']}}
# Check if intent already exists
val_res = await db.intents.find_one({"project_id": json_record['project_id'],
#"domain_id": json_record['domain_id'],
"intent_name": json_record['intent_name']})
if val_res is None or val_res['intent_name'] == json_record['intent_name']:
update_record = await db.intents.update_one(query, update_field)
print("Intent Updated , rows modified {}".format(update_record))
get_intents = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
intents_list = await self.get_intents(get_intents)
return {"status": "Success", "message": "Intent Updated Successfully"}, intents_list
else:
return {"status": "Error", "message": "Intent Name already exists"}, None
async def get_intent_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.intents.find_one(query)
print("Intent Details sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def insert_intent_detail(self, data):
# Data format - No check for Intent already exists
# {"object_id":"", "text":"I am in india ","entities":[{"start":8,"end":13,"value":"india","entity":"timezone"}]}
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
object_id = json_record['object_id']
del json_record['object_id']
result = await db.intents.update_one(query, {"$addToSet": {"text_entities": json_record}})
print("Inserted new row in Intent {}".format(result))
intent_detail = await self.get_intent_details({"object_id": object_id})
print("Result of Intent Addition {}".format(result.modified_count))
if result.modified_count == 1:
return {"status": "Success", "message": "Intent text added "}, intent_detail
else:
return {"status": "Error", "message": "Intent already exists "}, intent_detail
async def update_intent_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
index = json_record['doc_index']
del json_record['object_id']
del json_record['doc_index']
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.intents.update_one(query, {"$set": {"text_entities."+index: json_record}})
print("Record updated {}".format(result))
intent_detail = await self.get_intent_details({"object_id": object_id})
return {"status": "Success", "message": "Intent Updated successfully"}, intent_detail
async def delete_intent_detail(self, data):
# {"object_id": "", "text":"I am in india ","entities":[{"start":8,"end":13,"value":"india","entity":"timezone"}] }
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
del json_record['object_id']
intent_detail = await self.get_intent_details({"object_id": object_id})
print("Intent Details count {}".format(intent_detail['text_entities'][0]))
try:
res = intent_detail['text_entities'][1]
except IndexError:
return {"status": "Error", "message": "Atleast one record should be present for an Intent"}, intent_detail
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.intents.update_one(query, {"$pull": {"text_entities": json_record}})
print("Removed row from Intent {}".format(result))
intent_detail = await self.get_intent_details({"object_id": object_id})
return {"status": "Success", "message": "Intent text Removed "}, intent_detail
# noinspection PyMethodMayBeStatic
class ResponseModel:
def __init__(self):
pass
async def get_responses(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.responses.find(json_record, {"project_id": 1, "domain_id": 1, "response_name": 1, "response_description": 1})
result = await cursor.to_list(length=1000)
print("Responses sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_response(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id'],
"response_name": json_record['response_name'],
"response_description": json_record['response_description'], "text_entities": []}
val_res = await db.responses.find_one({"project_id": json_record['project_id'],
#"domain_id": json_record['domain_id'],
"response_name": json_record['response_name']})
if val_res is not None:
print('Response already exists')
return {"status": "Error", "message": "Response already exists"}, None
else:
result = await db.responses.insert_one(json.loads(json.dumps(insert_record)))
print("Response created with ID {}".format(result.inserted_id))
get_responses = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
responses_list = await self.get_responses(get_responses)
return {"status": "Success", "message": "Response created successfully"}, responses_list
async def delete_response(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
# check if response exists in any story
response_detail = await db.responses.find_one(query)
exists = await db.stories.find_one({"story": {"$elemMatch": {"key": response_detail['response_name']}}})
if exists is None:
result = await db.responses.delete_one(query)
print("Response Deleted count {}".format(result))
get_responses = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
responses_list = await self.get_responses(get_responses)
return {"status": "Success", "message": "Response Deleted successfully"}, responses_list
else:
return {"status": "Error", "message": "Response exists in story cannot delete response"}, None
async def update_response(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"response_name": json_record['response_name'],
"response_description": json_record['response_description']}}
# Check if Response already exists
val_res = await db.responses.find_one({"project_id": json_record['project_id'],
#"domain_id": json_record['domain_id'],
"response_name": json_record['response_name']})
if val_res is None or val_res['response_name'] == json_record['response_name']:
update_record = await db.responses.update_one(query, update_field)
print("Response Updated , rows modified {}".format(update_record))
get_responses = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
responses_list = await self.get_responses(get_responses)
return {"status": "Success", "message": "Response Updated successfully"}, responses_list
else:
return {"status": "Error", "message": "Response Name already exists"}, None
async def get_response_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.responses.find_one(query)
print("Response Details sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def insert_response_detail(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
object_id = json_record['object_id']
del json_record['object_id']
# to Prevent Duplicates
result = await db.responses.update_one(query, {"$addToSet": {"text_entities": json_record['text_entities']}})
print("Inserted new row in Intent {}".format(result.modified_count))
intent_detail = await self.get_response_details({"object_id": object_id})
if result.modified_count == 1:
return {"status": "Success", "message": "Response added "}, intent_detail
else:
return {"status": "Error", "message": "Response Already exists "}, intent_detail
async def delete_response_detail(self, data):
# {"object_id": "", "text":"I am in india ","entities":[{"start":8,"end":13,"value":"india","entity":"timezone"}] }
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
del json_record['object_id']
response_detail = await self.get_response_details({"object_id": object_id})
try:
res = response_detail['text_entities'][1]
except IndexError:
return {"status": "Error", "message": "Atleast one record should be present for an Response"}, response_detail
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.responses.update_one(query, {"$pull": {"text_entities": json_record['text_entities']}})
print("Removed row from Intent {}".format(result))
response_detail = await self.get_response_details({"object_id": object_id})
return {"status": "Success", "message": "Response text Removed "}, response_detail
# noinspection PyMethodMayBeStatic
class StoryModel:
def __init__(self):
pass
async def get_stories(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.stories.find(json_record, {"project_id": 1, "domain_id": 1, "story_name": 1, "story_description": 1})
result = await cursor.to_list(length=1000)
print("Stories sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_story(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id'],
"story_name": json_record['story_name'],
"story_description": json_record['story_description'], "story": []}
val_res = await db.stories.find_one({"project_id": json_record['project_id'],
"domain_id": json_record['domain_id'],
"story_name": json_record['story_name']})
if val_res is not None:
print('Story already exists')
return {"status": "Error", "message": "Story already exists"}, None
else:
result = await db.stories.insert_one(json.loads(json.dumps(insert_record)))
print("Story created with ID {}".format(result.inserted_id))
get_stories = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
stories_list = await self.get_stories(get_stories)
return {"status": "Success", "message": "Story created successfully "}, stories_list
async def delete_story(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.stories.delete_one(query)
print("Story Deleted count {}".format(result))
get_stories = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
stories_list = await self.get_stories(get_stories)
return {"status": "Success", "message": "Story Deleted successfully"}, stories_list
async def update_story(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"story_name": json_record['story_name'],
"story_description": json_record['story_description']}}
# Check if Response already exists
val_res = await db.stories.find_one({"project_id": json_record['project_id'],
"domain_id": json_record['domain_id'],
"story_name": json_record['story_name']})
if val_res is None or val_res['story_name'] == json_record['story_name']:
update_record = await db.stories.update_one(query, update_field)
print("Story Updated , rows modified {}".format(update_record))
get_stories = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
stories_list = await self.get_stories(get_stories)
return {"status": "Success", "message": "Story Updated successfully "}, stories_list
else:
return {"status": "Error", "message": "Story Name already exists"}, None
async def get_only_story_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.stories.find_one(query)
print("Story Details sent {}".format(json.loads(dumps(result))))
return result
async def get_story_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.stories.find_one(query)
print("Story Details sent {}".format(json.loads(dumps(result))))
# TODO - Verify if this works If intents or responses are created , when user is in Story details page , all intents / responses should be
# broadcast to this room as well
# Get intents
# cursor = db.intents.find({"project_id": json_record['project_id'], "domain_id": json_record['domain_id']})
cursor = db.intents.find({"project_id": json_record['project_id']})
result_intents = await cursor.to_list(length=1000)
intents_list = json.loads(dumps(result_intents))
# Get Responses
# cursor = db.responses.find({"project_id": json_record['project_id'], "domain_id": json_record['domain_id']})
cursor = db.responses.find({"project_id": json_record['project_id']})
result_response = await cursor.to_list(length=1000)
response_list = json.loads(dumps(result_response))
# get actions
cursor = db.actions.find({})
result_action = await cursor.to_list(length=1000)
action_list = json.loads(dumps(result_action))
return json.loads(dumps(result)), intents_list, response_list, action_list
async def insert_story_details(self, data):
# {'object_id':"", "position":"", "story": ["key":"abc", "value":"", "type": "intent",
# "entities": [{"entity_name": "test entity", "entity_value": "Test"}]]}
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
position = json_record['position']
result = await db.stories.update_one(query, {"$push": {"story": {"$each": json_record['story'],
"$position": position}
}})
print("Story Details Updated {}".format(result))
story_details, intents_list, response_list, actions_list = await self.get_story_details({"object_id": json_record['object_id'],
"project_id": json_record['project_id'],
"domain_id": json_record['domain_id']})
return {"status": "Success", "message": "Story created"}, story_details, intents_list, response_list, actions_list
async def delete_story_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
index = json_record['doc_index']
query = {"_id": ObjectId("{}".format(object_id))}
# Unset the record at position provided and then pull it to properly remove the element
result1 = await db.stories.update_one(query, {"$unset": {"story."+str(index): 1}})
result = await db.stories.update_one(query, {"$pull": {"story": None}})
print("Removed row from Story {}".format(result))
story_detail, intents_list, response_list,actions_list = await self.get_story_details({"object_id": json_record['object_id'],
"project_id": json_record['project_id'],
"domain_id": json_record['domain_id']})
return {"status": "Success", "message": "Story element Removed "}, story_detail, intents_list, response_list, actions_list
async def update_story_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
index = json_record['doc_index']
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.stories.update_one(query, {"$set": {"story."+str(index): json_record['story']}})
print("Record updated {}".format(result))
story_detail, intents_list, response_list, actions_list = await self.get_story_details({"object_id": json_record['object_id'],
"project_id": json_record['project_id'],
"domain_id": json_record['domain_id']})
return {"status": "Success", "message": "Story Updated successfully"}, story_detail, intents_list, response_list, actions_list
# noinspection PyMethodMayBeStatic
class EntityModel:
def __init__(self):
pass
async def get_entities(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.entities.find(json_record)
result = await cursor.to_list(length=1000)
print("Entities sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_entity(self, record):
json_record = json.loads(json.dumps(record))
# Check if Entity already exists
val_res = await db.entities.find_one({"project_id": json_record['project_id'],
"entity_name": json_record['entity_name']})
if val_res is not None:
print("Entity Already exists ")
return {"status": "Error", "message": "Entity Already exists "}, None
else:
result = await db.entities.insert_one(json_record)
print("Entity created with ID {}".format(result.inserted_id))
get_entities = {"project_id": json_record['project_id']}
entities_list = await self.get_entities(get_entities)
return {"status": "Success", "message": "Entity created successfully"}, entities_list
async def delete_entity(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
# check if entity is used in any Intent
# {"text_entities": {"$elemMatch": {"entities.entity": "location_value"} }}
entity_detail = await db.entities.find_one(query)
res = await db.intents.find_one({"text_entities": {"$elemMatch": {"entities.entity": entity_detail['entity_name']}}})
res2 = await db.responses.find_one({"text_entities": "/"+entity_detail['entity_name']+"/"})
if res is None and res2 is None:
result = await db.entities.delete_one(query)
print("Entity Deleted count {}".format(result))
get_entities = {"project_id": json_record['project_id']}
entities_list = await self.get_entities(get_entities)
return {"status": "Success", "message": "Entity deleted successfully"}, entities_list
elif res is None:
return {"status": "Error", "message": "Unable to delete entity , its used in an Response"}, None
else:
return {"status": "Error", "message": "Unable to delete entity , its used in an Intent"}, None
async def update_entity(self, record):
json_record = json.loads(json.dumps(record))
# Check if Entity already exists
val_res = await db.entities.find_one({"project_id": json_record['project_id'],
"entity_name": json_record['entity_name']})
object_id = val_res.get('_id')
query = {"_id": ObjectId("{}".format(object_id))}
if val_res is None or val_res['entity_name'] == json_record['entity_name']:
del json_record['_id']
print("Got value ", json_record)
update_record = await db.entities.update_one(query, {"$set": json_record})
print("Entity Updated , rows modified {}".format(update_record.modified_count))
get_entities = {"project_id": json_record['project_id']}
entities_list = await self.get_entities(get_entities)
return {"status": "Success", "message": "Entity updated successfully"}, entities_list
else:
return {"status": "Error", "message": "Entity Name already exists"}, None
class ValidateData:
def __int__(self):
pass
async def validate_data(self, project_id):
ret_val = ''
query = {"project_id": project_id}
# TODO
# Intent 'intent1' has only 1 training examples! Minimum is 2, training may fail
# Story must have valid data in it
# Check for count of Intents in project
cursor = db.intents.find(query)
result = await cursor.to_list(length=10)
print("Count of intents in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Intent should be defined in the Project \n"
# Check for count of Responses in project
cursor = db.responses.find(query)
result = await cursor.to_list(length=10)
print("Count of Responses in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Response should be defined in the Project \n"
# Check for count of Story in project
cursor = db.stories.find(query)
result = await cursor.to_list(length=10)
print("Count of Stories in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Story should be defined in the Project \n"
else:
# get the first story
try:
print("First story from the result {}".format(result[0]['story'][0]))
except IndexError:
ret_val = ret_val + "Story {} should have atleast one Intent and Response ".format(result[0]['story_name'])
# Check for count of Entity in project
cursor = db.entities.find(query)
result = await cursor.to_list(length=10)
print("Count of entities in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Entity should be defined in the Project \n"
# checks for two stage fallback policy
# Check for Negative Intent if its present.
cursor = db.intents.find({"project_id": project_id, "intent_name": "negative"})
result = await cursor.to_list(length=10)
print("Count of negative intents in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Intent 'negative' should be defined in the Project \n"
# check for utter_default
cursor = db.responses.find({"project_id": project_id, "response_name": "utter_default"})
result = await cursor.to_list(length=10)
print("Count of Responses in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Response default should be defined in the Project \n"
# check for utter_ask_rephrase
cursor = db.responses.find({"project_id": project_id, "response_name": "utter_ask_rephrase"})
result = await cursor.to_list(length=10)
print("Count of Responses in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Response ask_rephrase should be defined in the Project \n"
return ret_val
class CustomActionsModel:
def __init__(self):
pass
async def get_custom_actions(self):
cursor = db.actions.find({})
result = await cursor.to_list(length=1000)
print("Custom Actions {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_action(self, record):
json_record = json.loads(json.dumps(record))
# Validation to check if action already exists
val_res = await db.actions.find_one({"action_name": json_record['action_name']})
if val_res is not None:
print('Action already exists')
return {"status": "Error", "message": "Action already exists"}
else:
result = await db.actions.insert_one(json_record)
print("Action created {}".format(result.inserted_id))
return {"status": "Success", "message": "Action Has Been Created"}
async def update_action(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"action_description": json_record['action_description']
}}
result = await db.actions.update_one(query, update_field)
print("Action Updated , rows modified {}".format(result))
return {"status": "Success", "message": "Action details updated successfully "}
async def delete_action(self, object_id):
query = {"_id": ObjectId("{}".format(object_id))}
# Delete Action
result = await db.actions.delete_one(query)
print("Action Deleted count {}".format(result))
return {"status": "Success", "message": "Action Deleted Successfully"}
class GrievanceModel:
def __init__(self):
pass
async def get_grievance(self):
cursor = db.grievance.find({})
result = await cursor.to_list(length=1000)
print("Grievance {}".format(json.loads(dumps(result))))
return json.loads(dumps(result)) | 42.474081 | 147 | 0.614601 | from bson.json_util import dumps
import json
from bson.objectid import ObjectId
from database import ConDatabase
from config import CONFIG
db = ConDatabase.connect()
class RasaConversations:
async def get_conversations(self, sender_id):
print("Pulling tracker data for a conversation")
result = await db.conversations.find_one({"sender_id": sender_id})
return json.loads(dumps(result))
class RefreshDb:
async def refresh_db(self):
print('received request to refresh database')
seed_data_path = CONFIG.get('api_gateway', 'SEED_DATA_PATH')
await db.entities.delete_many({})
await db.projects.delete_many({})
await db.domains.delete_many({})
await db.intents.delete_many({})
await db.responses.delete_many({})
await db.stories.delete_many({})
await db.conversations.delete_many({})
await db.actions.delete_many({})
with open(seed_data_path+'projects.json') as json_file:
data = json.load(json_file)
await db.projects.insert_many(data)
project = await db.projects.find_one({})
project_id = project.get('_id')
print("project ID {}".format(project_id))
with open(seed_data_path+'domains.json') as json_file:
data = json.load(json_file)
await db.domains.insert_many(data)
await db.domains.update_many({}, {'$set': {'project_id': str(project_id)}})
domain_id = await db.domains.find_one({})
with open(seed_data_path+'intents.json') as json_file:
data = json.load(json_file)
await db.intents.insert_many(data)
await db.intents.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})
with open(seed_data_path+'entities.json') as json_file:
data = json.load(json_file)
await db.entities.insert_many(data)
await db.entities.update_many({}, {'$set': {'project_id': str(project_id)}})
with open(seed_data_path+'responses.json') as json_file:
data = json.load(json_file)
await db.responses.insert_many(data)
await db.responses.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})
with open(seed_data_path+'stories.json') as json_file:
data = json.load(json_file)
await db.stories.insert_many(data)
await db.stories.update_many({}, {'$set': {'project_id': str(project_id), 'domain_id': str(domain_id.get('_id'))}})
with open(seed_data_path+'actions.json') as json_file:
data = json.load(json_file)
await db.actions.insert_many(data)
return "Success"
class ProjectsModel:
def __init__(self):
pass
async def get_projects(self):
cursor = db.projects.find()
result = await cursor.to_list(length=1000)
print("Projects sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_projects(self, record):
json_record = json.loads(json.dumps(record))
val_res = await db.projects.find_one({"project_name": json_record['project_name']})
if val_res is not None:
print('Project already exists')
return {"status": "Error", "message": "Project already exists"}
else:
result = await db.projects.insert_one(json_record)
print("project created {}".format(result.inserted_id))
return {"status": "Success", "message": "Project Created with ID {}".format(result.inserted_id)}
async def delete_project(self, object_id):
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.domains.delete_many({"project_id": object_id})
print("Domains Deleted - count {}".format(result))
result = await db.intents.delete_many({"project_id": object_id})
print("Intents Deleted - count {}".format(result))
result = await db.entities.delete_many({"project_id": object_id})
print("Entities Deleted - count {}".format(result))
result = await db.stories.delete_many({"project_id": object_id})
print("Stories Deleted - count {}".format(result))
result = await db.responses.delete_many({"project_id": object_id})
print("Responses Deleted - count {}".format(result))
result = await db.projects.delete_one(query)
print("Project Deleted count {}".format(result))
return {"status": "Success", "message": "Project Deleted Successfully"}
async def update_project(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"project_description": json_record['project_description']
}}
result = await db.projects.update_one(query, update_field)
print("Project Updated , rows modified {}".format(result))
return {"status": "Success", "message": "Project details updated successfully "}
async def update_project_model(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"model_name": json_record['model_name'],
"state": json_record['state']
}}
res_archived = await db.projects.update_many({"state": "Published"}, {"$set": {"state": "Archived"}})
result = await db.projects.update_one(query, update_field)
print("Projects set to Archived state {}".format(res_archived))
print("Project Updated , rows modified {}".format(result))
return {"status": "Success", "message": "Model Published "}
async def copy_project(self, record):
json_record = json.loads(json.dumps(record))
val_res = await db.projects.find_one({"project_name": json_record['project_name']})
if val_res is not None:
print('Project already exists')
return {"status": "Error", "message": "Project already exists"}
else:
source_project = await db.projects.find_one({"project_name": json_record['source']})
source_project_id = source_project.get('_id')
print("Source project ID {}".format(source_project_id))
new_project = await db.projects.insert_one(json_record)
print("project created {}".format(new_project.inserted_id))
entities_cursor = db.entities.find({"project_id": str(source_project_id)})
for entity in await entities_cursor.to_list(length=100):
del entity['_id']
entity['project_id'] = "{}".format(new_project.inserted_id)
new_entity = await db.entities.insert_one(entity)
print("new entity inserted with id {}".format(new_entity.inserted_id))
domains_cursor = db.domains.find({"project_id": str(source_project_id)})
for domain in await domains_cursor.to_list(length=100):
source_domain_id = domain.get('_id')
del domain['_id']
domain['project_id'] = "{}".format(new_project.inserted_id)
new_domain = await db.domains.insert_one(domain)
print("new domain inserted with id {}".format(new_domain.inserted_id))
intents_cursor = db.intents.find({"project_id": str(source_project_id), "domain_id": str(source_domain_id)})
for intents in await intents_cursor.to_list(length=100):
del intents['_id']
intents['project_id'] = "{}".format(new_project.inserted_id)
intents['domain_id'] = "{}".format(new_domain.inserted_id)
new_intents = await db.intents.insert_one(intents)
print("new intents inserted with id {}".format(new_intents.inserted_id))
responses_cursor = db.responses.find({"project_id": str(source_project_id), "domain_id": str(source_domain_id)})
for response in await responses_cursor.to_list(length=100):
del response['_id']
response['project_id'] = "{}".format(new_project.inserted_id)
response['domain_id'] = "{}".format(new_domain.inserted_id)
new_responses = await db.responses.insert_one(response)
print("new response inserted with id {}".format(new_responses.inserted_id))
stories_cursor = db.stories.find({"project_id": str(source_project_id), "domain_id": str(source_domain_id)})
for story in await stories_cursor.to_list(length=100):
del story['_id']
story['project_id'] = "{}".format(new_project.inserted_id)
story['domain_id'] = "{}".format(new_domain.inserted_id)
new_story = await db.stories.insert_one(story)
print("new story inserted with id {}".format(new_story.inserted_id))
return {"status": "Success", "message": "Project Copied ID {}".format(new_project.inserted_id)}
class DomainsModel:
def __init__(self):
pass
async def get_domains(self, project_id):
query = {"project_id": project_id}
cursor = db.domains.find(query)
result = await cursor.to_list(length=1000)
print("Domains sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_domain(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_name": json_record['domain_name'],
"domain_description": json_record['domain_description']}
val_res = await db.domains.find_one({"project_id": json_record['project_id'],
"domain_name": json_record['domain_name']})
if val_res is not None:
print('Domain already exists')
return {"status": "Error", "message": "Domain already exists"}, None
else:
insert_result = await db.domains.insert_one(json.loads(json.dumps(insert_record)))
print("Domain created with ID {}".format(insert_result.inserted_id))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain created successfully"}, domains_list
async def delete_domain(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.intents.delete_many({"domain_id": json_record['object_id']})
print("Intents Deleted - count {}".format(result))
result = await db.stories.delete_many({"domain_id": json_record['object_id']})
print("Stories Deleted - count {}".format(result))
result = await db.responses.delete_many({"domain_id": json_record['object_id']})
print("Responses Deleted - count {}".format(result))
delete_record = await db.domains.delete_one(query)
print("Domain Deleted count {}".format(delete_record))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain Deleted Successfully"}, domains_list
async def update_domain(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"domain_name": json_record['domain_name'],
"domain_description": json_record['domain_description']}}
val_res = await db.domains.find_one({"project_id": json_record['project_id'],
"domain_name": json_record['domain_name']})
if val_res is None:
update_record = await db.domains.update_one(query, update_field)
print("Domain Updated , rows modified {}".format(update_record))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain updated successfully "}, domains_list
elif val_res['domain_name'] == json_record['domain_name']:
print("updating domain description")
update_record = await db.domains.update_one(query, update_field)
print("Domain Updated , rows modified {}".format(update_record))
domains_list = await self.get_domains(json_record['project_id'])
return {"status": "Success", "message": "Domain updated successfully "}, domains_list
else:
print('Domain already exists')
return {"status": "Error", "message": "Domain already exists"}, None
class IntentsModel:
def __init__(self):
pass
async def get_intents(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.intents.find(json_record, {"project_id": 1, "domain_id": 1, "intent_name": 1, "intent_description": 1})
result = await cursor.to_list(length=1000)
json_result = json.loads(dumps(result))
print("Intents sent {}".format(json_result))
return json_result
async def create_intent(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id'],
"intent_name": json_record['intent_name'],
"intent_description": json_record['intent_description'], "text_entities": []}
val_res = await db.intents.find_one({"project_id": json_record['project_id'],
"intent_name": json_record['intent_name']})
if val_res is not None:
print('Intent already exists')
return {"status": "Error", "message": "Intent already exists"}, None
else:
result = await db.intents.insert_one(json.loads(json.dumps(insert_record)))
message = {"status": "Success", "message": "Intent created with ID {}".format(result.inserted_id)}
get_intents = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
intents_list = await self.get_intents(get_intents)
return message, intents_list
async def delete_intent(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
intent_detail = await db.intents.find_one(query)
exists = await db.stories.find_one({"story": {"$elemMatch": {"key": intent_detail['intent_name']}}})
if exists is None:
result = await db.intents.delete_one(query)
print("Intent deleted successfully {}".format(result))
message = {"status": "Success", "message": "Intent deleted successfully "}
get_intents = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
intents_list = await self.get_intents(get_intents)
return message, intents_list
else:
message = {"status": "Error", "message": "Intent is used in a story cannot delete this intent"}
return message, None
async def update_intent(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"intent_name": json_record['intent_name'],
"intent_description": json_record['intent_description']}}
val_res = await db.intents.find_one({"project_id": json_record['project_id'],
"intent_name": json_record['intent_name']})
if val_res is None or val_res['intent_name'] == json_record['intent_name']:
update_record = await db.intents.update_one(query, update_field)
print("Intent Updated , rows modified {}".format(update_record))
get_intents = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
intents_list = await self.get_intents(get_intents)
return {"status": "Success", "message": "Intent Updated Successfully"}, intents_list
else:
return {"status": "Error", "message": "Intent Name already exists"}, None
async def get_intent_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.intents.find_one(query)
print("Intent Details sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def insert_intent_detail(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
object_id = json_record['object_id']
del json_record['object_id']
result = await db.intents.update_one(query, {"$addToSet": {"text_entities": json_record}})
print("Inserted new row in Intent {}".format(result))
intent_detail = await self.get_intent_details({"object_id": object_id})
print("Result of Intent Addition {}".format(result.modified_count))
if result.modified_count == 1:
return {"status": "Success", "message": "Intent text added "}, intent_detail
else:
return {"status": "Error", "message": "Intent already exists "}, intent_detail
async def update_intent_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
index = json_record['doc_index']
del json_record['object_id']
del json_record['doc_index']
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.intents.update_one(query, {"$set": {"text_entities."+index: json_record}})
print("Record updated {}".format(result))
intent_detail = await self.get_intent_details({"object_id": object_id})
return {"status": "Success", "message": "Intent Updated successfully"}, intent_detail
async def delete_intent_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
del json_record['object_id']
intent_detail = await self.get_intent_details({"object_id": object_id})
print("Intent Details count {}".format(intent_detail['text_entities'][0]))
try:
res = intent_detail['text_entities'][1]
except IndexError:
return {"status": "Error", "message": "Atleast one record should be present for an Intent"}, intent_detail
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.intents.update_one(query, {"$pull": {"text_entities": json_record}})
print("Removed row from Intent {}".format(result))
intent_detail = await self.get_intent_details({"object_id": object_id})
return {"status": "Success", "message": "Intent text Removed "}, intent_detail
class ResponseModel:
def __init__(self):
pass
async def get_responses(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.responses.find(json_record, {"project_id": 1, "domain_id": 1, "response_name": 1, "response_description": 1})
result = await cursor.to_list(length=1000)
print("Responses sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_response(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id'],
"response_name": json_record['response_name'],
"response_description": json_record['response_description'], "text_entities": []}
val_res = await db.responses.find_one({"project_id": json_record['project_id'],
"response_name": json_record['response_name']})
if val_res is not None:
print('Response already exists')
return {"status": "Error", "message": "Response already exists"}, None
else:
result = await db.responses.insert_one(json.loads(json.dumps(insert_record)))
print("Response created with ID {}".format(result.inserted_id))
get_responses = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
responses_list = await self.get_responses(get_responses)
return {"status": "Success", "message": "Response created successfully"}, responses_list
async def delete_response(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
response_detail = await db.responses.find_one(query)
exists = await db.stories.find_one({"story": {"$elemMatch": {"key": response_detail['response_name']}}})
if exists is None:
result = await db.responses.delete_one(query)
print("Response Deleted count {}".format(result))
get_responses = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
responses_list = await self.get_responses(get_responses)
return {"status": "Success", "message": "Response Deleted successfully"}, responses_list
else:
return {"status": "Error", "message": "Response exists in story cannot delete response"}, None
async def update_response(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"response_name": json_record['response_name'],
"response_description": json_record['response_description']}}
val_res = await db.responses.find_one({"project_id": json_record['project_id'],
"response_name": json_record['response_name']})
if val_res is None or val_res['response_name'] == json_record['response_name']:
update_record = await db.responses.update_one(query, update_field)
print("Response Updated , rows modified {}".format(update_record))
get_responses = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
responses_list = await self.get_responses(get_responses)
return {"status": "Success", "message": "Response Updated successfully"}, responses_list
else:
return {"status": "Error", "message": "Response Name already exists"}, None
async def get_response_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.responses.find_one(query)
print("Response Details sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def insert_response_detail(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
object_id = json_record['object_id']
del json_record['object_id']
result = await db.responses.update_one(query, {"$addToSet": {"text_entities": json_record['text_entities']}})
print("Inserted new row in Intent {}".format(result.modified_count))
intent_detail = await self.get_response_details({"object_id": object_id})
if result.modified_count == 1:
return {"status": "Success", "message": "Response added "}, intent_detail
else:
return {"status": "Error", "message": "Response Already exists "}, intent_detail
async def delete_response_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
del json_record['object_id']
response_detail = await self.get_response_details({"object_id": object_id})
try:
res = response_detail['text_entities'][1]
except IndexError:
return {"status": "Error", "message": "Atleast one record should be present for an Response"}, response_detail
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.responses.update_one(query, {"$pull": {"text_entities": json_record['text_entities']}})
print("Removed row from Intent {}".format(result))
response_detail = await self.get_response_details({"object_id": object_id})
return {"status": "Success", "message": "Response text Removed "}, response_detail
class StoryModel:
def __init__(self):
pass
async def get_stories(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.stories.find(json_record, {"project_id": 1, "domain_id": 1, "story_name": 1, "story_description": 1})
result = await cursor.to_list(length=1000)
print("Stories sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_story(self, record):
json_record = json.loads(json.dumps(record))
insert_record = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id'],
"story_name": json_record['story_name'],
"story_description": json_record['story_description'], "story": []}
val_res = await db.stories.find_one({"project_id": json_record['project_id'],
"domain_id": json_record['domain_id'],
"story_name": json_record['story_name']})
if val_res is not None:
print('Story already exists')
return {"status": "Error", "message": "Story already exists"}, None
else:
result = await db.stories.insert_one(json.loads(json.dumps(insert_record)))
print("Story created with ID {}".format(result.inserted_id))
get_stories = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
stories_list = await self.get_stories(get_stories)
return {"status": "Success", "message": "Story created successfully "}, stories_list
async def delete_story(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.stories.delete_one(query)
print("Story Deleted count {}".format(result))
get_stories = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
stories_list = await self.get_stories(get_stories)
return {"status": "Success", "message": "Story Deleted successfully"}, stories_list
async def update_story(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"story_name": json_record['story_name'],
"story_description": json_record['story_description']}}
val_res = await db.stories.find_one({"project_id": json_record['project_id'],
"domain_id": json_record['domain_id'],
"story_name": json_record['story_name']})
if val_res is None or val_res['story_name'] == json_record['story_name']:
update_record = await db.stories.update_one(query, update_field)
print("Story Updated , rows modified {}".format(update_record))
get_stories = {"project_id": json_record['project_id'], "domain_id": json_record['domain_id']}
stories_list = await self.get_stories(get_stories)
return {"status": "Success", "message": "Story Updated successfully "}, stories_list
else:
return {"status": "Error", "message": "Story Name already exists"}, None
async def get_only_story_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.stories.find_one(query)
print("Story Details sent {}".format(json.loads(dumps(result))))
return result
async def get_story_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
result = await db.stories.find_one(query)
print("Story Details sent {}".format(json.loads(dumps(result))))
cursor = db.intents.find({"project_id": json_record['project_id']})
result_intents = await cursor.to_list(length=1000)
intents_list = json.loads(dumps(result_intents))
cursor = db.responses.find({"project_id": json_record['project_id']})
result_response = await cursor.to_list(length=1000)
response_list = json.loads(dumps(result_response))
cursor = db.actions.find({})
result_action = await cursor.to_list(length=1000)
action_list = json.loads(dumps(result_action))
return json.loads(dumps(result)), intents_list, response_list, action_list
async def insert_story_details(self, data):
json_record = json.loads(json.dumps(data))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
position = json_record['position']
result = await db.stories.update_one(query, {"$push": {"story": {"$each": json_record['story'],
"$position": position}
}})
print("Story Details Updated {}".format(result))
story_details, intents_list, response_list, actions_list = await self.get_story_details({"object_id": json_record['object_id'],
"project_id": json_record['project_id'],
"domain_id": json_record['domain_id']})
return {"status": "Success", "message": "Story created"}, story_details, intents_list, response_list, actions_list
async def delete_story_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
index = json_record['doc_index']
query = {"_id": ObjectId("{}".format(object_id))}
result1 = await db.stories.update_one(query, {"$unset": {"story."+str(index): 1}})
result = await db.stories.update_one(query, {"$pull": {"story": None}})
print("Removed row from Story {}".format(result))
story_detail, intents_list, response_list,actions_list = await self.get_story_details({"object_id": json_record['object_id'],
"project_id": json_record['project_id'],
"domain_id": json_record['domain_id']})
return {"status": "Success", "message": "Story element Removed "}, story_detail, intents_list, response_list, actions_list
async def update_story_detail(self, data):
json_record = json.loads(json.dumps(data))
object_id = json_record['object_id']
index = json_record['doc_index']
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.stories.update_one(query, {"$set": {"story."+str(index): json_record['story']}})
print("Record updated {}".format(result))
story_detail, intents_list, response_list, actions_list = await self.get_story_details({"object_id": json_record['object_id'],
"project_id": json_record['project_id'],
"domain_id": json_record['domain_id']})
return {"status": "Success", "message": "Story Updated successfully"}, story_detail, intents_list, response_list, actions_list
class EntityModel:
def __init__(self):
pass
async def get_entities(self, record):
json_record = json.loads(json.dumps(record))
cursor = db.entities.find(json_record)
result = await cursor.to_list(length=1000)
print("Entities sent {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_entity(self, record):
json_record = json.loads(json.dumps(record))
val_res = await db.entities.find_one({"project_id": json_record['project_id'],
"entity_name": json_record['entity_name']})
if val_res is not None:
print("Entity Already exists ")
return {"status": "Error", "message": "Entity Already exists "}, None
else:
result = await db.entities.insert_one(json_record)
print("Entity created with ID {}".format(result.inserted_id))
get_entities = {"project_id": json_record['project_id']}
entities_list = await self.get_entities(get_entities)
return {"status": "Success", "message": "Entity created successfully"}, entities_list
async def delete_entity(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
entity_detail = await db.entities.find_one(query)
res = await db.intents.find_one({"text_entities": {"$elemMatch": {"entities.entity": entity_detail['entity_name']}}})
res2 = await db.responses.find_one({"text_entities": "/"+entity_detail['entity_name']+"/"})
if res is None and res2 is None:
result = await db.entities.delete_one(query)
print("Entity Deleted count {}".format(result))
get_entities = {"project_id": json_record['project_id']}
entities_list = await self.get_entities(get_entities)
return {"status": "Success", "message": "Entity deleted successfully"}, entities_list
elif res is None:
return {"status": "Error", "message": "Unable to delete entity , its used in an Response"}, None
else:
return {"status": "Error", "message": "Unable to delete entity , its used in an Intent"}, None
async def update_entity(self, record):
json_record = json.loads(json.dumps(record))
val_res = await db.entities.find_one({"project_id": json_record['project_id'],
"entity_name": json_record['entity_name']})
object_id = val_res.get('_id')
query = {"_id": ObjectId("{}".format(object_id))}
if val_res is None or val_res['entity_name'] == json_record['entity_name']:
del json_record['_id']
print("Got value ", json_record)
update_record = await db.entities.update_one(query, {"$set": json_record})
print("Entity Updated , rows modified {}".format(update_record.modified_count))
get_entities = {"project_id": json_record['project_id']}
entities_list = await self.get_entities(get_entities)
return {"status": "Success", "message": "Entity updated successfully"}, entities_list
else:
return {"status": "Error", "message": "Entity Name already exists"}, None
class ValidateData:
def __int__(self):
pass
async def validate_data(self, project_id):
ret_val = ''
query = {"project_id": project_id}
cursor = db.intents.find(query)
result = await cursor.to_list(length=10)
print("Count of intents in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Intent should be defined in the Project \n"
cursor = db.responses.find(query)
result = await cursor.to_list(length=10)
print("Count of Responses in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Response should be defined in the Project \n"
cursor = db.stories.find(query)
result = await cursor.to_list(length=10)
print("Count of Stories in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Story should be defined in the Project \n"
else:
try:
print("First story from the result {}".format(result[0]['story'][0]))
except IndexError:
ret_val = ret_val + "Story {} should have atleast one Intent and Response ".format(result[0]['story_name'])
cursor = db.entities.find(query)
result = await cursor.to_list(length=10)
print("Count of entities in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Atleast one Entity should be defined in the Project \n"
cursor = db.intents.find({"project_id": project_id, "intent_name": "negative"})
result = await cursor.to_list(length=10)
print("Count of negative intents in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Intent 'negative' should be defined in the Project \n"
cursor = db.responses.find({"project_id": project_id, "response_name": "utter_default"})
result = await cursor.to_list(length=10)
print("Count of Responses in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Response default should be defined in the Project \n"
cursor = db.responses.find({"project_id": project_id, "response_name": "utter_ask_rephrase"})
result = await cursor.to_list(length=10)
print("Count of Responses in Project {}".format(len(result)))
if len(result) < 1:
ret_val = ret_val + "Response ask_rephrase should be defined in the Project \n"
return ret_val
class CustomActionsModel:
def __init__(self):
pass
async def get_custom_actions(self):
cursor = db.actions.find({})
result = await cursor.to_list(length=1000)
print("Custom Actions {}".format(json.loads(dumps(result))))
return json.loads(dumps(result))
async def create_action(self, record):
json_record = json.loads(json.dumps(record))
val_res = await db.actions.find_one({"action_name": json_record['action_name']})
if val_res is not None:
print('Action already exists')
return {"status": "Error", "message": "Action already exists"}
else:
result = await db.actions.insert_one(json_record)
print("Action created {}".format(result.inserted_id))
return {"status": "Success", "message": "Action Has Been Created"}
async def update_action(self, record):
json_record = json.loads(json.dumps(record))
query = {"_id": ObjectId("{}".format(json_record['object_id']))}
update_field = {"$set": {"action_description": json_record['action_description']
}}
result = await db.actions.update_one(query, update_field)
print("Action Updated , rows modified {}".format(result))
return {"status": "Success", "message": "Action details updated successfully "}
async def delete_action(self, object_id):
query = {"_id": ObjectId("{}".format(object_id))}
result = await db.actions.delete_one(query)
print("Action Deleted count {}".format(result))
return {"status": "Success", "message": "Action Deleted Successfully"}
class GrievanceModel:
def __init__(self):
pass
async def get_grievance(self):
cursor = db.grievance.find({})
result = await cursor.to_list(length=1000)
print("Grievance {}".format(json.loads(dumps(result))))
return json.loads(dumps(result)) | true | true |
1c323d974d565283fe3537ebf68aa347c5a46301 | 13,147 | py | Python | Utils.py | AGDCservices/Ghidra-Scripts | 4b891cb5bc1593e398652be50f27001964575bf9 | [
"Apache-2.0"
] | 38 | 2020-04-09T18:25:52.000Z | 2022-03-22T23:51:58.000Z | Ghidra-Scripts/Utils.py | paulveillard/cybersecurity-networking | 097fda0bb22baf0610ca274cf65af324908b53ce | [
"Apache-2.0"
] | null | null | null | Ghidra-Scripts/Utils.py | paulveillard/cybersecurity-networking | 097fda0bb22baf0610ca274cf65af324908b53ce | [
"Apache-2.0"
] | 2 | 2022-01-31T10:53:53.000Z | 2022-02-25T17:55:27.000Z | from __main__ import *
'''
Utility module of common helper functions used
in building Ghidra scripts
Contained function prototypes below:
Get_Bytes_List(targetEa, nLen)
Get_Bytes_String(targetEa, nLen)
Get_Ascii_String(targetEa)
Get_Call_Xrefs_To(targetEa)
Get_Prev_Target_Instruction(curInstr, mnem, N, MAX_INSTRUCTIONS = 9999)
Get_Next_Target_Instruction(curInstr, mnem, N, MAX_INSTRUCTIONS = 9999)
Get_Operand_As_Address(targetInstr, operandIndex)
Get_Operand_As_Immediate_Value(targetInstr, operandIndex)
Get_Operand_As_String(targetInstr, operandIndex)
'''
def Get_Bytes_List(targetEa, nLen):
'''
gets the bytes from memory, treating as unsigned bytes
ghidra treats read bytes as signed which is not what
you normally want when reading memory, e.g. if you call
getBytes on a byte 0xfe, you won't get 0xfe, you'll get -2
this may not be an issue depending on what operation you
are performing, or it may, e.g. reading a byte that is
displayed as a negative value will fail when compared to
the two's complement hex (-2 != 0xfe). If you're using
the byte to patch the program, it may work ok.
returns result as a list
'''
signedList = list(getBytes(targetEa, nLen))
unsignedList = []
for curByte in signedList:
if curByte < 0:
uByte = (0xff - abs(curByte) + 1)
else:
uByte= curByte
unsignedList.append(uByte)
return unsignedList
def Get_Bytes_String(targetEa, nLen):
'''
gets the bytes from memory, treating as unsigned bytes
ghidra treats read bytes as signed which is not what
you normally want when reading memory, e.g. if you call
getBytes on a byte 0xfe, you won't get 0xfe, you'll get -2
this may not be an issue depending on what operation you
are performing, or it may, e.g. reading a byte that is
displayed as a negative value will fail when compared to
the two's complement hex (-2 != 0xfe). If you're using
the byte to patch the program, it may work ok.
returns result as a string
'''
signedList = list(getBytes(targetEa, nLen))
unsignedList = []
for curByte in signedList:
if curByte < 0:
uByte = (0xff - abs(curByte) + 1)
else:
uByte= curByte
unsignedList.append(chr(uByte))
return ''.join(unsignedList)
def Get_Ascii_String(targetEa):
'''
returns the null terminated ascii string starting
at targetEa. Returns a string object and does not
include the terminating null character
targetEa must be an address object
'''
result = ''
i = 0
while True:
curByte = chr(getByte(targetEa.add(i)))
if curByte == chr(0): break
result += curByte
i += 1
return result
def Get_Call_Xrefs_To(targetEa):
'''
returns list of addresses which call the targetEa
'''
callEaList = []
for ref in getReferencesTo(targetEa):
if getInstructionAt(ref.getFromAddress()).getMnemonicString().lower() == 'call':
callEaList.append(ref.getFromAddress())
return callEaList
def Get_Prev_Target_Instruction(curInstr, mnem, N, MAX_INSTRUCTIONS = 9999):
'''
gets N'th previous target instruction from the curInstr
function will only go back MAX_INSTRUCTIONS
function will not search outside of current function if the
current instruction is inside a defined function
returns None on failure
'''
# get address set of current function to use in determining if prev instruction
# is outside of current function
try:
funcBody = getFunctionContaining(curInstr.getAddress()).getBody()
except:
funcBody = None
# get Nth prev instruction
totalInstructionCount = 0
targetInstructionCount = 0
while (totalInstructionCount < MAX_INSTRUCTIONS) and (targetInstructionCount < N):
curInstr = curInstr.getPrevious()
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
if curInstr.getMnemonicString().lower() == mnem.lower(): targetInstructionCount += 1
totalInstructionCount += 1
# return the results
if targetInstructionCount == N:
result = curInstr
else:
result = None
return result
def Get_Next_Target_Instruction(curInstr, mnem, N, MAX_INSTRUCTIONS = 9999):
'''
gets N'th next target instruction from the curInstr
function will only go forward MAX_INSTRUCTIONS
function will not search outside of current function if the
current instruction is inside defined function
returns None on failure
'''
# get address set of current function to use in determining if prev instruction
# is outside of current function
try:
funcBody = getFunctionContaining(curInstr.getAddress()).getBody()
except:
funcBody = None
# get Nth next instruction
totalInstructionCount = 0
targetInstructionCount = 0
while (totalInstructionCount < MAX_INSTRUCTIONS) and (targetInstructionCount < N):
curInstr = curInstr.getNext()
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
if curInstr.getMnemonicString().lower() == mnem.lower(): targetInstructionCount += 1
totalInstructionCount += 1
# return the results
if targetInstructionCount == N:
result = curInstr
else:
result = None
return result
def Get_Operand_As_Address(targetInstr, operandIndex):
'''
returns the value for the operandIndex operand of the
target instruction treated as an address. if the
target operand can not be treated as an address,
returns None. operandIndex starts at 0
If this is called on jumps or calls, the final
address jumped to / called will be returned
There are no real checks for validity and it's up to
the author to ensure the target operand should be an address
'''
# error check
if operandIndex >= targetInstr.getNumOperands():
print('[*] Error in Get_Operand_As_Address. operandIndex is too large at {:s}'.format(targetInstr.getAddress().toString()))
return None
elif targetInstr.getNumOperands() == 0:
return None
operand = targetInstr.getOpObjects(operandIndex)[0]
if type(operand) == ghidra.program.model.scalar.Scalar:
targetValue = toAddr(operand.getValue())
elif type(operand) == ghidra.program.model.address.GenericAddress:
targetValue = operand
else:
targetValue = None
return targetValue
def Get_Operand_As_Immediate_Value(targetInstr, operandIndex):
'''
returns the value for the operandIndex operand of the target instruction
if the target operand is not an immediate value, the function will attempt
to find where the variable was previously set. It will ONLY search within
the current function to find where the variable was previously set.
if operand value can not be determined, returns None
operandIndex starts at 0
'''
# operand types are typically different if operand is
# used in a call versus not a call and if there is a
# reference or not
OP_TYPE_IMMEDIATE = 16384
OP_TYPE_NO_CALL_REG = 512
OP_TYPE_NO_CALL_STACK = 4202496
# global variables have numerous reference types
# unsure how to differentiate the different types
# error check
if operandIndex >= targetInstr.getNumOperands():
print('[*] Error in Get_Operand_As_Immediate_Value. operandIndex is too large at {:s}'.format(targetInstr.getAddress().toString()))
return None
elif targetInstr.getNumOperands() == 0:
return None
# get address set of current function to use in determining
# if prev instruction is outside of current function
try:
funcBody = getFunctionContaining(targetInstr.getAddress()).getBody()
except:
funcBody = None
# find the actual operand value
targetValue = None
opType = targetInstr.getOperandType(operandIndex)
# if operand is a direct number
if opType == OP_TYPE_IMMEDIATE:
targetValue = targetInstr.getOpObjects(operandIndex)[0].getValue()
# else if operand is a register
elif opType == OP_TYPE_NO_CALL_REG:
regName = targetInstr.getOpObjects(operandIndex)[0].getName().lower()
# search for previous location where register value was set
curInstr = targetInstr
while True:
curInstr = curInstr.getPrevious()
# check to make sure curInstr is valid
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
# check different variations of how register values get set
curMnem = curInstr.getMnemonicString().lower()
if (curMnem == 'mov') and (curInstr.getOperandType(0) == OP_TYPE_NO_CALL_REG):
if curInstr.getOpObjects(0)[0].getName().lower() == regName:
if curInstr.getOperandType(1) == OP_TYPE_IMMEDIATE:
targetValue = curInstr.getOpObjects(1)[0].getValue()
elif curInstr.getOperandType(1) == OP_TYPE_NO_CALL_REG:
targetValue = Get_Operand_As_Immediate_Value(curInstr, 1)
break
elif (curMnem == 'xor'):
operand1 = curInstr.getOpObjects(0)[0]
operand2 = curInstr.getOpObjects(1)[0]
op1Type = curInstr.getOperandType(0)
op2Type = curInstr.getOperandType(1)
if (op1Type == OP_TYPE_NO_CALL_REG) and (op2Type == OP_TYPE_NO_CALL_REG):
if (operand1.getName().lower() == regName) and (operand2.getName().lower() == regName):
targetValue = 0
break
elif (curMnem == 'pop') and (curInstr.getOperandType(0) == OP_TYPE_NO_CALL_REG):
if curInstr.getOpObjects(0)[0].getName().lower() == regName:
# find previous push
# NOTE: assumes previous push corresponds to pop but
# will fail if there is a function call in-between
tmpCurInstr = curInstr.getPrevious()
while True:
# check to make sure tmpCurInstr is valid
if tmpCurInstr == None: break
if funcBody != None:
if funcBody.contains(tmpCurInstr.getAddress()) == False: break
if tmpCurInstr.getMnemonicString().lower() == 'push':
if tmpCurInstr.getOperandType(0) == OP_TYPE_IMMEDIATE:
targetValue = tmpCurInstr.getOpObjects(0)[0].getValue()
break
# break out of outer while loop
break
# if operand is a stack variable
elif opType == OP_TYPE_NO_CALL_STACK:
stackOffset = targetInstr.getOperandReferences(operandIndex)[0].getStackOffset()
# search for previous location where stack variable value was set
curInstr = targetInstr
while True:
curInstr = curInstr.getPrevious()
# check to make sure curInstr is valid
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
# find where stack variable was set
curMnem = curInstr.getMnemonicString().lower()
if (curMnem == 'mov') and (curInstr.getOperandType(0) == OP_TYPE_NO_CALL_STACK):
if curInstr.getOperandReferences(0)[0].getStackOffset() == stackOffset:
if curInstr.getOperandType(1) == OP_TYPE_IMMEDIATE:
targetValue = curInstr.getOpObjects(1)[0].getValue()
break
return targetValue
def Get_Operand_As_String(targetInstr, operandIndex):
'''
returns the value for the operandIndex operand of the
target instruction treated as a string.
operandIndex starts at 0
If this is called on jumps or calls, the final
address jumped to / called will be returned
'''
# error check
if operandIndex >= targetInstr.getNumOperands():
print('[*] Error in Get_Operand_As_String. operandIndex is too large at {:s}'.format(targetInstr.getAddress().toString()))
return None
elif targetInstr.getNumOperands() == 0:
return None
operand = targetInstr.getOpObjects(operandIndex)[0]
return operand.toString()
| 35.822888 | 141 | 0.633376 | from __main__ import *
def Get_Bytes_List(targetEa, nLen):
signedList = list(getBytes(targetEa, nLen))
unsignedList = []
for curByte in signedList:
if curByte < 0:
uByte = (0xff - abs(curByte) + 1)
else:
uByte= curByte
unsignedList.append(uByte)
return unsignedList
def Get_Bytes_String(targetEa, nLen):
signedList = list(getBytes(targetEa, nLen))
unsignedList = []
for curByte in signedList:
if curByte < 0:
uByte = (0xff - abs(curByte) + 1)
else:
uByte= curByte
unsignedList.append(chr(uByte))
return ''.join(unsignedList)
def Get_Ascii_String(targetEa):
result = ''
i = 0
while True:
curByte = chr(getByte(targetEa.add(i)))
if curByte == chr(0): break
result += curByte
i += 1
return result
def Get_Call_Xrefs_To(targetEa):
callEaList = []
for ref in getReferencesTo(targetEa):
if getInstructionAt(ref.getFromAddress()).getMnemonicString().lower() == 'call':
callEaList.append(ref.getFromAddress())
return callEaList
def Get_Prev_Target_Instruction(curInstr, mnem, N, MAX_INSTRUCTIONS = 9999):
try:
funcBody = getFunctionContaining(curInstr.getAddress()).getBody()
except:
funcBody = None
totalInstructionCount = 0
targetInstructionCount = 0
while (totalInstructionCount < MAX_INSTRUCTIONS) and (targetInstructionCount < N):
curInstr = curInstr.getPrevious()
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
if curInstr.getMnemonicString().lower() == mnem.lower(): targetInstructionCount += 1
totalInstructionCount += 1
if targetInstructionCount == N:
result = curInstr
else:
result = None
return result
def Get_Next_Target_Instruction(curInstr, mnem, N, MAX_INSTRUCTIONS = 9999):
try:
funcBody = getFunctionContaining(curInstr.getAddress()).getBody()
except:
funcBody = None
totalInstructionCount = 0
targetInstructionCount = 0
while (totalInstructionCount < MAX_INSTRUCTIONS) and (targetInstructionCount < N):
curInstr = curInstr.getNext()
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
if curInstr.getMnemonicString().lower() == mnem.lower(): targetInstructionCount += 1
totalInstructionCount += 1
if targetInstructionCount == N:
result = curInstr
else:
result = None
return result
def Get_Operand_As_Address(targetInstr, operandIndex):
if operandIndex >= targetInstr.getNumOperands():
print('[*] Error in Get_Operand_As_Address. operandIndex is too large at {:s}'.format(targetInstr.getAddress().toString()))
return None
elif targetInstr.getNumOperands() == 0:
return None
operand = targetInstr.getOpObjects(operandIndex)[0]
if type(operand) == ghidra.program.model.scalar.Scalar:
targetValue = toAddr(operand.getValue())
elif type(operand) == ghidra.program.model.address.GenericAddress:
targetValue = operand
else:
targetValue = None
return targetValue
def Get_Operand_As_Immediate_Value(targetInstr, operandIndex):
OP_TYPE_IMMEDIATE = 16384
OP_TYPE_NO_CALL_REG = 512
OP_TYPE_NO_CALL_STACK = 4202496
if operandIndex >= targetInstr.getNumOperands():
print('[*] Error in Get_Operand_As_Immediate_Value. operandIndex is too large at {:s}'.format(targetInstr.getAddress().toString()))
return None
elif targetInstr.getNumOperands() == 0:
return None
try:
funcBody = getFunctionContaining(targetInstr.getAddress()).getBody()
except:
funcBody = None
targetValue = None
opType = targetInstr.getOperandType(operandIndex)
if opType == OP_TYPE_IMMEDIATE:
targetValue = targetInstr.getOpObjects(operandIndex)[0].getValue()
elif opType == OP_TYPE_NO_CALL_REG:
regName = targetInstr.getOpObjects(operandIndex)[0].getName().lower()
curInstr = targetInstr
while True:
curInstr = curInstr.getPrevious()
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
curMnem = curInstr.getMnemonicString().lower()
if (curMnem == 'mov') and (curInstr.getOperandType(0) == OP_TYPE_NO_CALL_REG):
if curInstr.getOpObjects(0)[0].getName().lower() == regName:
if curInstr.getOperandType(1) == OP_TYPE_IMMEDIATE:
targetValue = curInstr.getOpObjects(1)[0].getValue()
elif curInstr.getOperandType(1) == OP_TYPE_NO_CALL_REG:
targetValue = Get_Operand_As_Immediate_Value(curInstr, 1)
break
elif (curMnem == 'xor'):
operand1 = curInstr.getOpObjects(0)[0]
operand2 = curInstr.getOpObjects(1)[0]
op1Type = curInstr.getOperandType(0)
op2Type = curInstr.getOperandType(1)
if (op1Type == OP_TYPE_NO_CALL_REG) and (op2Type == OP_TYPE_NO_CALL_REG):
if (operand1.getName().lower() == regName) and (operand2.getName().lower() == regName):
targetValue = 0
break
elif (curMnem == 'pop') and (curInstr.getOperandType(0) == OP_TYPE_NO_CALL_REG):
if curInstr.getOpObjects(0)[0].getName().lower() == regName:
tmpCurInstr = curInstr.getPrevious()
while True:
if tmpCurInstr == None: break
if funcBody != None:
if funcBody.contains(tmpCurInstr.getAddress()) == False: break
if tmpCurInstr.getMnemonicString().lower() == 'push':
if tmpCurInstr.getOperandType(0) == OP_TYPE_IMMEDIATE:
targetValue = tmpCurInstr.getOpObjects(0)[0].getValue()
break
break
elif opType == OP_TYPE_NO_CALL_STACK:
stackOffset = targetInstr.getOperandReferences(operandIndex)[0].getStackOffset()
curInstr = targetInstr
while True:
curInstr = curInstr.getPrevious()
if curInstr == None: break
if funcBody != None:
if funcBody.contains(curInstr.getAddress()) == False: break
curMnem = curInstr.getMnemonicString().lower()
if (curMnem == 'mov') and (curInstr.getOperandType(0) == OP_TYPE_NO_CALL_STACK):
if curInstr.getOperandReferences(0)[0].getStackOffset() == stackOffset:
if curInstr.getOperandType(1) == OP_TYPE_IMMEDIATE:
targetValue = curInstr.getOpObjects(1)[0].getValue()
break
return targetValue
def Get_Operand_As_String(targetInstr, operandIndex):
if operandIndex >= targetInstr.getNumOperands():
print('[*] Error in Get_Operand_As_String. operandIndex is too large at {:s}'.format(targetInstr.getAddress().toString()))
return None
elif targetInstr.getNumOperands() == 0:
return None
operand = targetInstr.getOpObjects(operandIndex)[0]
return operand.toString()
| true | true |
1c323dd44e1673bf9dd42ec3a02856b6ac02b476 | 14,959 | py | Python | tests/integration_tests/tests/agent_tests/test_plugin_update.py | ilan-WS/cloudify-manager | 510d8a277c848db351f38fc5b264806b2cb36d0b | [
"Apache-2.0"
] | null | null | null | tests/integration_tests/tests/agent_tests/test_plugin_update.py | ilan-WS/cloudify-manager | 510d8a277c848db351f38fc5b264806b2cb36d0b | [
"Apache-2.0"
] | 2 | 2021-05-31T15:12:21.000Z | 2021-05-31T19:03:05.000Z | tests/integration_tests/tests/agent_tests/test_plugin_update.py | ilan-WS/cloudify-manager | 510d8a277c848db351f38fc5b264806b2cb36d0b | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import uuid
import shutil
import pytest
from functools import wraps
import integration_tests_plugins
from manager_rest.plugins_update.constants import STATES
from integration_tests import AgentTestWithPlugins
from integration_tests.tests.utils import (
get_resource as resource,
wait_for_blueprint_upload,
wait_for_deployment_creation_to_complete
)
from integration_tests.framework.utils import zip_files
def setup_for_sourced_plugins(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.plugin_dir_name_prefix = self.plugin_name + '_'
self.base_name = 'base_sourced'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.mod_name = 'mod_sourced'
self.mod_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.addCleanup(self._remove_files)
self._prepare_files()
return f(self, *args, **kwargs)
return wrapper
def setup_for_plugins_update(f):
@pytest.mark.usefixtures('version_aware_plugin')
@wraps(f)
def wrapper(self, *args, **kwargs):
self.blueprint_name_prefix = 'plugins_update'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
_f = uploads_mock_plugins(f)
return _f(self, *args, **kwargs)
return wrapper
def uploads_mock_plugins(f):
@pytest.mark.usefixtures('version_aware_plugin')
@pytest.mark.usefixtures('version_aware_v2_plugin')
@wraps(f)
def wrapper(self, *args, **kwargs):
self.addCleanup(self._clear_managed_plugins)
return f(self, *args, **kwargs)
return wrapper
class TestPluginUpdate(AgentTestWithPlugins):
versions = ['1.0', '2.0']
dsl_resources_path = resource(os.path.join('dsl', 'agent_tests'))
blueprint_name_prefix = 'plugin_update_'
setup_deployment_ids = None
@uploads_mock_plugins
def test_plugin_update(self):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.base_name = 'base'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.mod_name = 'mod'
self.mod_blueprint_id = 'b{0}'.format(uuid.uuid4())
self._upload_blueprints_and_deploy_base()
# Execute base (V 1.0) workflows
self._execute_workflows()
self._assert_on_values(self.versions[0])
self._perform_update()
# Execute mod (V 2.0) workflows
self._execute_workflows()
self._assert_on_values(self.versions[1])
@uploads_mock_plugins
def test_host_agent_plugin_update(self):
def execute_host_op():
execution = self.client.executions.start(
self.setup_deployment_id,
'execute_operation',
parameters={
'operation': 'test_host.host_op',
'node_ids': ['node']
})
self.wait_for_execution_to_end(execution)
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.base_name = 'host_agent'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.mod_name = 'host_agent_mod'
self.mod_blueprint_id = 'b{0}'.format(uuid.uuid4())
self._upload_blueprints_and_deploy_base()
# Execute base (V 1.0) workflows
execute_host_op()
self._assert_host_values(self.versions[0])
self._perform_update()
# Execute mod (V 2.0) workflows
execute_host_op()
self._assert_host_values(self.versions[1])
@setup_for_sourced_plugins
def test_sourced_plugin_updates(self):
self._upload_blueprints_and_deploy_base()
# Execute base (V 1.0) workflows
self._execute_workflows()
self._assert_on_values(self.versions[0])
self._perform_update()
# Execute mod (V 2.0) workflows
self._execute_workflows()
self._assert_on_values(self.versions[1])
@setup_for_plugins_update
def test_single_deployment_is_updated(self):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.deploy_application(
dsl_path=self._get_dsl_blueprint_path(''),
blueprint_id=self.base_blueprint_id,
deployment_id=self.setup_deployment_id
)
self._upload_v_2_plugin()
# Execute base (V 1.0) workflows
self._execute_workflows()
self._assert_host_values(self.versions[0])
plugins_update = self._perform_plugins_update()
self.assertEqual(plugins_update.state, STATES.SUCCESSFUL)
# Execute mod (V 2.0) workflows
self._execute_workflows()
self._assert_host_values(self.versions[1])
plugins_update = self._perform_plugins_update()
self.assertEqual(plugins_update.state, STATES.NO_CHANGES_REQUIRED)
# Execute mod (V 2.0) workflows
self._execute_workflows()
self._assert_host_values(self.versions[1])
@setup_for_plugins_update
def test_many_deployments_are_updated(self):
self.setup_deployment_ids = ['d{0}'.format(uuid.uuid4())
for _ in range(5)]
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.client.blueprints.upload(
path=self._get_dsl_blueprint_path(''),
entity_id=self.base_blueprint_id)
wait_for_blueprint_upload(self.base_blueprint_id, self.client)
blueprint = self.client.blueprints.get(self.base_blueprint_id)
for dep_id in self.setup_deployment_ids:
self.client.deployments.create(blueprint.id, dep_id)
wait_for_deployment_creation_to_complete(
self.env.container_id,
dep_id,
self.client
)
self.execute_workflow('install', dep_id)
self._upload_v_2_plugin()
# Execute base (V 1.0) workflows
for dep_id in self.setup_deployment_ids:
self.setup_deployment_id = dep_id
self._execute_workflows()
self._assert_host_values(self.versions[0])
plugins_update = self._perform_plugins_update()
self.assertEqual(plugins_update.state, STATES.SUCCESSFUL)
# Execute mod (V 2.0) workflows
for dep_id in self.setup_deployment_ids:
self.setup_deployment_id = dep_id
self._execute_workflows()
self._assert_host_values(self.versions[1])
@setup_for_plugins_update
def test_additional_constraints(self):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.deploy_application(
dsl_path=self._get_dsl_blueprint_path(''),
blueprint_id=self.base_blueprint_id,
deployment_id=self.setup_deployment_id
)
self._upload_v_2_plugin()
# Execute base (V 1.0) workflows
self._execute_workflows()
self._assert_host_values(self.versions[0])
# Update a different (non-existent) plugin - nothing should change
plugins_update = self._perform_plugins_update(plugin_names=['asd'])
self.assertEqual(plugins_update.state, STATES.NO_CHANGES_REQUIRED)
self._execute_workflows()
self._assert_host_values(self.versions[0])
# Update only minor version - nothing should change
plugins_update = self._perform_plugins_update(all_to_minor=True,
all_to_latest=False)
self.assertEqual(plugins_update.state, STATES.NO_CHANGES_REQUIRED)
self._execute_workflows()
self._assert_host_values(self.versions[0])
# This should do the update
plugins_update = self._perform_plugins_update(
to_latest=[self.plugin_name])
self.assertEqual(plugins_update.state, STATES.SUCCESSFUL)
self._execute_workflows()
self._assert_host_values(self.versions[1])
def _perform_plugins_update(self, **kwargs):
plugins_update = self.client.plugins_update.update_plugins(
self.base_blueprint_id, **kwargs)
execution_id = plugins_update.execution_id
execution = self.client.executions.get(execution_id)
self.wait_for_execution_to_end(execution)
return self.client.plugins_update.get(plugins_update.id)
def _prepare_files(self):
# Copy v1.0 twice to different directories
source_dir = self._get_source_dir_for_plugin()
self._copy_plugin_files_to_resources(source_dir, self.versions[0])
target_dir = self._copy_plugin_files_to_resources(source_dir,
self.versions[1])
# Replace 1.0 strings with 2.0
self._replace_version(target_dir, *self.versions)
def _copy_plugin_files_to_resources(self, source_dir, version):
target_dir = os.path.join(self.dsl_resources_path,
'plugins',
self.plugin_dir_name_prefix + version)
if not os.path.exists(target_dir):
shutil.copytree(source_dir, target_dir)
return target_dir
def _remove_files(self):
for version in self.versions:
dir_to_rm = os.path.join(
self.dsl_resources_path,
'plugins',
self.plugin_dir_name_prefix + version)
shutil.rmtree(dir_to_rm, ignore_errors=True)
def _perform_update(self, update_plugins=True):
execution_id = \
self.client.deployment_updates.update_with_existing_blueprint(
deployment_id=self.setup_deployment_id,
blueprint_id=self.mod_blueprint_id,
update_plugins=update_plugins
).execution_id
execution = self.client.executions.get(execution_id)
self.wait_for_execution_to_end(execution)
def _get_work_dir_for_plugin(self):
return str(self.workdir / self.plugin_name)
def _get_source_dir_for_plugin(self):
plugins_dir = os.path.dirname(integration_tests_plugins.__file__)
return os.path.join(plugins_dir, self.plugin_name)
def _copy_plugin_files_to_work_dir(self):
source_dir = self._get_source_dir_for_plugin()
target_dir = self._get_work_dir_for_plugin()
if not os.path.exists(target_dir):
shutil.copytree(source_dir, target_dir)
return target_dir
def _upload_v_2_plugin(self):
plugin_name = 'version_aware_v2'
plugins_dir = os.path.dirname(integration_tests_plugins.__file__)
plugin_source = os.path.join(plugins_dir, plugin_name)
wagon_paths = self._get_or_create_wagon(plugin_source)
for wagon_path in wagon_paths:
yaml_path = os.path.join(plugin_source, 'plugin.yaml')
with zip_files([wagon_path, yaml_path]) as zip_path:
self.client.plugins.upload(zip_path, visibility='global')
self.logger.info(
'Finished uploading {0}...'.format(plugin_name))
@staticmethod
def _replace_version(target_dir, v1, v2):
""" https://stackoverflow.com/a/4205918/978089 """
for dname, dirs, files in os.walk(target_dir):
for fname in files:
fpath = os.path.join(dname, fname)
with open(fpath) as f:
s = f.read()
s = s.replace(v1, v2)
with open(fpath, 'w') as f:
f.write(s)
def _assert_cda_values(self, version):
cda_data = self.get_runtime_property(self.setup_deployment_id,
'cda_op')
self.assertEqual(len(cda_data), 1)
self.assertEqual(cda_data[0], version)
def _assert_host_values(self, version):
host_data = self.get_runtime_property(self.setup_deployment_id,
'host_op')
self.assertEqual(len(host_data), 1)
self.assertEqual(host_data[0], version)
def _assert_on_values(self, version):
self._assert_cda_values(version)
self._assert_host_values(version)
plugins = self.client.plugins.list(package_name='version_aware')
if not plugins:
return
target_plugin = next(p for p in plugins
if p.package_version == version)
other_plugins = [p for p in plugins if p.package_version != version]
assert all(pstate['state'] == 'installed'
for pstate in target_plugin.installation_state)
for other_plugin in other_plugins:
assert all(pstate['state'] != 'installed'
for pstate in other_plugin.installation_state)
def _upload_blueprints_and_deploy_base(self):
self.deploy_application(
dsl_path=self._get_dsl_blueprint_path(self.base_name),
blueprint_id=self.base_blueprint_id,
deployment_id=self.setup_deployment_id
)
self.client.blueprints.upload(
path=self._get_dsl_blueprint_path(self.mod_name),
entity_id=self.mod_blueprint_id
)
wait_for_blueprint_upload(self.mod_blueprint_id, self.client)
def _get_dsl_blueprint_path(self, name):
plugin_path = '{0}{1}.yaml'.format(self.blueprint_name_prefix, name)
return os.path.join(self.dsl_resources_path, plugin_path)
def _execute_workflows(self):
for wf in ('test_cda_wf', 'test_cda_op', 'test_host_op'):
self.execute_workflow(wf, self.setup_deployment_id)
def _clear_managed_plugins(self):
plugins = self.client.plugins.list()
plugin_name = getattr(self, 'plugin_name', None)
if not plugin_name:
return
for p in plugins:
if plugin_name in p.package_name:
self.client.plugins.delete(p.id, force=True)
self._wait_for_execution_by_wf_name('uninstall_plugin')
| 37.775253 | 79 | 0.652584 | ort uuid
import shutil
import pytest
from functools import wraps
import integration_tests_plugins
from manager_rest.plugins_update.constants import STATES
from integration_tests import AgentTestWithPlugins
from integration_tests.tests.utils import (
get_resource as resource,
wait_for_blueprint_upload,
wait_for_deployment_creation_to_complete
)
from integration_tests.framework.utils import zip_files
def setup_for_sourced_plugins(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.plugin_dir_name_prefix = self.plugin_name + '_'
self.base_name = 'base_sourced'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.mod_name = 'mod_sourced'
self.mod_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.addCleanup(self._remove_files)
self._prepare_files()
return f(self, *args, **kwargs)
return wrapper
def setup_for_plugins_update(f):
@pytest.mark.usefixtures('version_aware_plugin')
@wraps(f)
def wrapper(self, *args, **kwargs):
self.blueprint_name_prefix = 'plugins_update'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
_f = uploads_mock_plugins(f)
return _f(self, *args, **kwargs)
return wrapper
def uploads_mock_plugins(f):
@pytest.mark.usefixtures('version_aware_plugin')
@pytest.mark.usefixtures('version_aware_v2_plugin')
@wraps(f)
def wrapper(self, *args, **kwargs):
self.addCleanup(self._clear_managed_plugins)
return f(self, *args, **kwargs)
return wrapper
class TestPluginUpdate(AgentTestWithPlugins):
versions = ['1.0', '2.0']
dsl_resources_path = resource(os.path.join('dsl', 'agent_tests'))
blueprint_name_prefix = 'plugin_update_'
setup_deployment_ids = None
@uploads_mock_plugins
def test_plugin_update(self):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.base_name = 'base'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.mod_name = 'mod'
self.mod_blueprint_id = 'b{0}'.format(uuid.uuid4())
self._upload_blueprints_and_deploy_base()
self._execute_workflows()
self._assert_on_values(self.versions[0])
self._perform_update()
self._execute_workflows()
self._assert_on_values(self.versions[1])
@uploads_mock_plugins
def test_host_agent_plugin_update(self):
def execute_host_op():
execution = self.client.executions.start(
self.setup_deployment_id,
'execute_operation',
parameters={
'operation': 'test_host.host_op',
'node_ids': ['node']
})
self.wait_for_execution_to_end(execution)
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.base_name = 'host_agent'
self.base_blueprint_id = 'b{0}'.format(uuid.uuid4())
self.mod_name = 'host_agent_mod'
self.mod_blueprint_id = 'b{0}'.format(uuid.uuid4())
self._upload_blueprints_and_deploy_base()
execute_host_op()
self._assert_host_values(self.versions[0])
self._perform_update()
execute_host_op()
self._assert_host_values(self.versions[1])
@setup_for_sourced_plugins
def test_sourced_plugin_updates(self):
self._upload_blueprints_and_deploy_base()
self._execute_workflows()
self._assert_on_values(self.versions[0])
self._perform_update()
self._execute_workflows()
self._assert_on_values(self.versions[1])
@setup_for_plugins_update
def test_single_deployment_is_updated(self):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.deploy_application(
dsl_path=self._get_dsl_blueprint_path(''),
blueprint_id=self.base_blueprint_id,
deployment_id=self.setup_deployment_id
)
self._upload_v_2_plugin()
self._execute_workflows()
self._assert_host_values(self.versions[0])
plugins_update = self._perform_plugins_update()
self.assertEqual(plugins_update.state, STATES.SUCCESSFUL)
self._execute_workflows()
self._assert_host_values(self.versions[1])
plugins_update = self._perform_plugins_update()
self.assertEqual(plugins_update.state, STATES.NO_CHANGES_REQUIRED)
self._execute_workflows()
self._assert_host_values(self.versions[1])
@setup_for_plugins_update
def test_many_deployments_are_updated(self):
self.setup_deployment_ids = ['d{0}'.format(uuid.uuid4())
for _ in range(5)]
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.client.blueprints.upload(
path=self._get_dsl_blueprint_path(''),
entity_id=self.base_blueprint_id)
wait_for_blueprint_upload(self.base_blueprint_id, self.client)
blueprint = self.client.blueprints.get(self.base_blueprint_id)
for dep_id in self.setup_deployment_ids:
self.client.deployments.create(blueprint.id, dep_id)
wait_for_deployment_creation_to_complete(
self.env.container_id,
dep_id,
self.client
)
self.execute_workflow('install', dep_id)
self._upload_v_2_plugin()
for dep_id in self.setup_deployment_ids:
self.setup_deployment_id = dep_id
self._execute_workflows()
self._assert_host_values(self.versions[0])
plugins_update = self._perform_plugins_update()
self.assertEqual(plugins_update.state, STATES.SUCCESSFUL)
for dep_id in self.setup_deployment_ids:
self.setup_deployment_id = dep_id
self._execute_workflows()
self._assert_host_values(self.versions[1])
@setup_for_plugins_update
def test_additional_constraints(self):
self.setup_deployment_id = 'd{0}'.format(uuid.uuid4())
self.setup_node_id = 'node'
self.plugin_name = 'version_aware'
self.deploy_application(
dsl_path=self._get_dsl_blueprint_path(''),
blueprint_id=self.base_blueprint_id,
deployment_id=self.setup_deployment_id
)
self._upload_v_2_plugin()
self._execute_workflows()
self._assert_host_values(self.versions[0])
plugins_update = self._perform_plugins_update(plugin_names=['asd'])
self.assertEqual(plugins_update.state, STATES.NO_CHANGES_REQUIRED)
self._execute_workflows()
self._assert_host_values(self.versions[0])
plugins_update = self._perform_plugins_update(all_to_minor=True,
all_to_latest=False)
self.assertEqual(plugins_update.state, STATES.NO_CHANGES_REQUIRED)
self._execute_workflows()
self._assert_host_values(self.versions[0])
plugins_update = self._perform_plugins_update(
to_latest=[self.plugin_name])
self.assertEqual(plugins_update.state, STATES.SUCCESSFUL)
self._execute_workflows()
self._assert_host_values(self.versions[1])
def _perform_plugins_update(self, **kwargs):
plugins_update = self.client.plugins_update.update_plugins(
self.base_blueprint_id, **kwargs)
execution_id = plugins_update.execution_id
execution = self.client.executions.get(execution_id)
self.wait_for_execution_to_end(execution)
return self.client.plugins_update.get(plugins_update.id)
def _prepare_files(self):
source_dir = self._get_source_dir_for_plugin()
self._copy_plugin_files_to_resources(source_dir, self.versions[0])
target_dir = self._copy_plugin_files_to_resources(source_dir,
self.versions[1])
self._replace_version(target_dir, *self.versions)
def _copy_plugin_files_to_resources(self, source_dir, version):
target_dir = os.path.join(self.dsl_resources_path,
'plugins',
self.plugin_dir_name_prefix + version)
if not os.path.exists(target_dir):
shutil.copytree(source_dir, target_dir)
return target_dir
def _remove_files(self):
for version in self.versions:
dir_to_rm = os.path.join(
self.dsl_resources_path,
'plugins',
self.plugin_dir_name_prefix + version)
shutil.rmtree(dir_to_rm, ignore_errors=True)
def _perform_update(self, update_plugins=True):
execution_id = \
self.client.deployment_updates.update_with_existing_blueprint(
deployment_id=self.setup_deployment_id,
blueprint_id=self.mod_blueprint_id,
update_plugins=update_plugins
).execution_id
execution = self.client.executions.get(execution_id)
self.wait_for_execution_to_end(execution)
def _get_work_dir_for_plugin(self):
return str(self.workdir / self.plugin_name)
def _get_source_dir_for_plugin(self):
plugins_dir = os.path.dirname(integration_tests_plugins.__file__)
return os.path.join(plugins_dir, self.plugin_name)
def _copy_plugin_files_to_work_dir(self):
source_dir = self._get_source_dir_for_plugin()
target_dir = self._get_work_dir_for_plugin()
if not os.path.exists(target_dir):
shutil.copytree(source_dir, target_dir)
return target_dir
def _upload_v_2_plugin(self):
plugin_name = 'version_aware_v2'
plugins_dir = os.path.dirname(integration_tests_plugins.__file__)
plugin_source = os.path.join(plugins_dir, plugin_name)
wagon_paths = self._get_or_create_wagon(plugin_source)
for wagon_path in wagon_paths:
yaml_path = os.path.join(plugin_source, 'plugin.yaml')
with zip_files([wagon_path, yaml_path]) as zip_path:
self.client.plugins.upload(zip_path, visibility='global')
self.logger.info(
'Finished uploading {0}...'.format(plugin_name))
@staticmethod
def _replace_version(target_dir, v1, v2):
for dname, dirs, files in os.walk(target_dir):
for fname in files:
fpath = os.path.join(dname, fname)
with open(fpath) as f:
s = f.read()
s = s.replace(v1, v2)
with open(fpath, 'w') as f:
f.write(s)
def _assert_cda_values(self, version):
cda_data = self.get_runtime_property(self.setup_deployment_id,
'cda_op')
self.assertEqual(len(cda_data), 1)
self.assertEqual(cda_data[0], version)
def _assert_host_values(self, version):
host_data = self.get_runtime_property(self.setup_deployment_id,
'host_op')
self.assertEqual(len(host_data), 1)
self.assertEqual(host_data[0], version)
def _assert_on_values(self, version):
self._assert_cda_values(version)
self._assert_host_values(version)
plugins = self.client.plugins.list(package_name='version_aware')
if not plugins:
return
target_plugin = next(p for p in plugins
if p.package_version == version)
other_plugins = [p for p in plugins if p.package_version != version]
assert all(pstate['state'] == 'installed'
for pstate in target_plugin.installation_state)
for other_plugin in other_plugins:
assert all(pstate['state'] != 'installed'
for pstate in other_plugin.installation_state)
def _upload_blueprints_and_deploy_base(self):
self.deploy_application(
dsl_path=self._get_dsl_blueprint_path(self.base_name),
blueprint_id=self.base_blueprint_id,
deployment_id=self.setup_deployment_id
)
self.client.blueprints.upload(
path=self._get_dsl_blueprint_path(self.mod_name),
entity_id=self.mod_blueprint_id
)
wait_for_blueprint_upload(self.mod_blueprint_id, self.client)
def _get_dsl_blueprint_path(self, name):
plugin_path = '{0}{1}.yaml'.format(self.blueprint_name_prefix, name)
return os.path.join(self.dsl_resources_path, plugin_path)
def _execute_workflows(self):
for wf in ('test_cda_wf', 'test_cda_op', 'test_host_op'):
self.execute_workflow(wf, self.setup_deployment_id)
def _clear_managed_plugins(self):
plugins = self.client.plugins.list()
plugin_name = getattr(self, 'plugin_name', None)
if not plugin_name:
return
for p in plugins:
if plugin_name in p.package_name:
self.client.plugins.delete(p.id, force=True)
self._wait_for_execution_by_wf_name('uninstall_plugin')
| true | true |
1c323dd5c55b25237b61591ef9e174f70d4ac376 | 717 | py | Python | tests/project/ethpm/test_release_package.py | ActorForth/brownie | ef0d5af3bb48edcd11abf985626fc99dbc577c7d | [
"MIT"
] | 1,595 | 2020-06-01T19:41:53.000Z | 2022-03-31T16:09:54.000Z | tests/project/ethpm/test_release_package.py | ActorForth/brownie | ef0d5af3bb48edcd11abf985626fc99dbc577c7d | [
"MIT"
] | 532 | 2020-05-30T12:06:17.000Z | 2022-03-31T22:33:41.000Z | tests/project/ethpm/test_release_package.py | ActorForth/brownie | ef0d5af3bb48edcd11abf985626fc99dbc577c7d | [
"MIT"
] | 303 | 2020-06-17T00:38:34.000Z | 2022-03-31T10:59:48.000Z | #!/usr/bin/python3
from brownie.project import ethpm
ETHPM_CONFIG = {
"package_name": "testpackage",
"version": "1.0.0",
"settings": {"deployment_networks": False, "include_dependencies": False},
}
def test_release_package(dep_project, accounts):
registry = dep_project.PackageRegistry.deploy({"from": accounts[0]})
package_config = ETHPM_CONFIG.copy()
package_config["settings"]["include_dependencies"] = False
manifest, uri = ethpm.create_manifest(dep_project._path, package_config, True)
ethpm.release_package(registry.address, accounts[0], "testpackage", "1.0.0", uri)
id_ = registry.getReleaseId("testpackage", "1.0.0")
assert registry.getReleaseData(id_)[-1] == uri
| 34.142857 | 85 | 0.716876 |
from brownie.project import ethpm
ETHPM_CONFIG = {
"package_name": "testpackage",
"version": "1.0.0",
"settings": {"deployment_networks": False, "include_dependencies": False},
}
def test_release_package(dep_project, accounts):
registry = dep_project.PackageRegistry.deploy({"from": accounts[0]})
package_config = ETHPM_CONFIG.copy()
package_config["settings"]["include_dependencies"] = False
manifest, uri = ethpm.create_manifest(dep_project._path, package_config, True)
ethpm.release_package(registry.address, accounts[0], "testpackage", "1.0.0", uri)
id_ = registry.getReleaseId("testpackage", "1.0.0")
assert registry.getReleaseData(id_)[-1] == uri
| true | true |
1c32403f57f0bfce2a895a27800a35689f7f3a13 | 13,264 | py | Python | client/buck.py | HybridDeveloper/pyre-check | 48d1f8bbf19003417e64950561c786e28af1251a | [
"MIT"
] | null | null | null | client/buck.py | HybridDeveloper/pyre-check | 48d1f8bbf19003417e64950561c786e28af1251a | [
"MIT"
] | null | null | null | client/buck.py | HybridDeveloper/pyre-check | 48d1f8bbf19003417e64950561c786e28af1251a | [
"MIT"
] | null | null | null | # Copyright 2004-present Facebook. All rights reserved.
import functools
import glob
import json
import logging
import os
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from json.decoder import JSONDecodeError
from logging import Logger
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
from .filesystem import find_root
LOG: Logger = logging.getLogger(__name__)
class BuckOut(NamedTuple):
source_directories: Set[str]
targets_not_found: Set[str]
class BuckException(Exception):
pass
class BuckBuilder:
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Build the given targets, and return a list of output directories
containing the target output.
"""
raise NotImplementedError
class FastBuckBuilder(BuckBuilder):
def __init__(
self,
buck_root: str,
output_directory: Optional[str] = None,
buck_builder_binary: Optional[str] = None,
debug_mode: bool = False,
buck_mode: Optional[str] = None,
project_name: Optional[str] = None,
) -> None:
self._buck_root = buck_root
self._output_directory: str = output_directory or tempfile.mkdtemp(
prefix="pyre_tmp_"
)
self._buck_builder_binary = buck_builder_binary
self._debug_mode = debug_mode
self._buck_mode = buck_mode
self._project_name = project_name
self.conflicting_files: List[str] = []
self.unsupported_files: List[str] = []
def _get_builder_executable(self) -> str:
builder_binary = self._buck_builder_binary
if builder_binary is None:
raise BuckException(
"--buck-builder-binary must be provided "
"if fast buck builder is used."
)
return builder_binary
def build(self, targets: Iterable[str]) -> List[str]:
command = [
self._get_builder_executable(),
"-J-Djava.net.preferIPv6Addresses=true",
"-J-Djava.net.preferIPv6Stack=true",
"--buck_root",
self._buck_root,
"--output_directory",
self._output_directory,
] + list(targets)
if self._debug_mode:
command.append("--debug")
buck_mode = self._buck_mode
if buck_mode:
command.extend(["--mode", buck_mode])
project_name = self._project_name
if project_name:
command.extend(["--project_name", project_name])
LOG.info("Building buck targets...")
LOG.debug("Buck builder command: `{}`".format(" ".join(command)))
with subprocess.Popen(
command,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
) as buck_builder_process:
# Java's logging conflicts with Python's logging, we capture the
# logs and re-log them with python's logger.
log_processor = threading.Thread(
target=self._read_stderr,
args=(buck_builder_process.stderr, logging.DEBUG),
)
log_processor.daemon = True
log_processor.start()
return_code = buck_builder_process.wait()
# Wait until all stderr have been printed.
log_processor.join()
if return_code == 0:
LOG.info("Finished building targets.")
if self._debug_mode:
# pyre-fixme[6]: Expected `_Reader` for 1st param but got
# `Optional[typing.IO[typing.Any]]`.
debug_output = json.load(buck_builder_process.stdout)
self.conflicting_files += debug_output["conflictingFiles"]
self.unsupported_files += debug_output["unsupportedFiles"]
return [self._output_directory]
else:
raise BuckException(
f"Failed to build targets with:\n`{' '.join(command)}`"
)
def _read_stderr(
self, stream: Iterable[str], default_logging_section: int = logging.ERROR
) -> None:
for line in stream:
line = line.rstrip()
if line.startswith("INFO: "):
LOG.info(line[6:])
elif line.startswith("WARNING: "):
LOG.warning(line[9:])
elif line.startswith("ERROR: "):
LOG.error(line[7:])
elif line.startswith("[WARNING:"):
# Filter away thrift warnings.
pass
else:
LOG.log(default_logging_section, line)
class SimpleBuckBuilder(BuckBuilder):
def build(self, targets: Iterable[str]) -> Iterable[str]:
"""
Shell out to buck to build the targets, then yield the paths to the
link trees.
"""
return generate_source_directories(targets)
def presumed_target_root(target: str) -> str:
root_index = target.find("//")
if root_index != -1:
target = target[root_index + 2 :]
target = target.replace("/...", "")
target = target.split(":")[0]
return target
# Expects the targets to be already normalized.
def _find_built_source_directories(
targets_to_destinations: Iterable[Tuple[str, str]]
) -> BuckOut:
targets_not_found = []
source_directories = []
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
raise Exception("No .buckconfig found in ancestors of the current directory.")
directories = set()
for target, destination in targets_to_destinations:
directories.add((target, os.path.dirname(destination)))
for target, directory in directories:
target_name = target.split(":")[1]
discovered_source_directories = glob.glob(
os.path.join(buck_root, directory, "{}#*link-tree".format(target_name))
)
if len(discovered_source_directories) == 0:
targets_not_found.append(target)
source_directories.extend(
[
tree
for tree in discovered_source_directories
if not tree.endswith(
(
"-vs_debugger#link-tree",
"-interp#link-tree",
"-ipython#link-tree",
)
)
]
)
return BuckOut(set(source_directories), set(targets_not_found))
def _normalize(targets: List[str]) -> List[Tuple[str, str]]:
LOG.info(
"Normalizing target%s `%s`",
"s:" if len(targets) > 1 else "",
"`, `".join(targets),
)
try:
command = (
["buck", "targets", "--show-output"]
+ targets
+ ["--type", "python_binary", "python_test"]
)
targets_to_destinations: List[str] = (
subprocess.check_output(command, stderr=subprocess.PIPE, timeout=600)
.decode()
.strip()
.split("\n")
)
targets_to_destinations = list(filter(bool, targets_to_destinations))
# The output is of the form //target //corresponding.par
result = []
for target in targets_to_destinations:
pair = target.split(" ")
if len(pair) != 2:
pass
else:
result.append((pair[0], pair[1]))
if not result:
LOG.warning(
"Provided targets do not contain any binary or unittest targets."
)
return []
else:
LOG.info(
"Found %d buck target%s.", len(result), "s" if len(result) > 1 else ""
)
return result
except subprocess.TimeoutExpired as error:
LOG.error("Buck output so far: %s", error.stderr.decode().strip())
raise BuckException(
"Seems like `{}` is hanging.\n "
"Try running `buck clean` before trying again.".format(
# pyre-fixme: command not always defined
" ".join(command[:-1])
)
)
except subprocess.CalledProcessError as error:
LOG.error("Buck returned error: %s" % error.stderr.decode().strip())
raise BuckException(
"Could not normalize targets. Check the paths or run `buck clean`."
)
def _build_targets(targets: List[str], original_targets: List[str]) -> None:
LOG.info(
"Building target%s `%s`",
"s:" if len(original_targets) > 1 else "",
"`, `".join(original_targets),
)
command = ["buck", "build"] + targets
try:
subprocess.check_output(command, stderr=subprocess.PIPE)
LOG.warning("Finished building targets.")
except subprocess.CalledProcessError as error:
# The output can be overwhelming, hence print only the last 20 lines.
lines = error.stderr.decode().splitlines()
LOG.error("Buck returned error: %s" % "\n".join(lines[-20:]))
raise BuckException(
"Could not build targets. Check the paths or run `buck clean`."
)
def _map_normalized_targets_to_original(
unbuilt_targets: Iterable[str], original_targets: Iterable[str]
) -> List[str]:
mapped_targets = set()
for target in unbuilt_targets:
# Each original target is either a `/...` glob or a proper target.
# If it's a glob, we're looking for the glob to be a prefix of the unbuilt
# target. Otherwise, we care about exact matches.
name = None
for original in original_targets:
if original.endswith("/..."):
if target.startswith(original[:-4]):
name = original
else:
if target == original:
name = original
# No original target matched, fallback to normalized.
if name is None:
name = target
mapped_targets.add(name)
return list(mapped_targets)
@functools.lru_cache()
def find_buck_root(path: str) -> Optional[str]:
return find_root(path, ".buckconfig")
def query_buck_relative_paths(
project_paths: Iterable[str], targets: Iterable[str]
) -> Dict[str, str]:
"""Return a mapping from each absolute project path to its relative location
in the buck output directory.
This queries buck and only returns paths that are covered by `targets`."""
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
LOG.error(
"Buck root couldn't be found. Returning empty analysis directory mapping."
)
return {}
target_string = " ".join(targets)
command = [
"buck",
"query",
"--json",
"--output-attribute",
".*",
# This will get only those owner targets that are beneath our targets or
# the dependencies of our targets.
f"owner(%s) ^ deps(set({target_string}))",
*project_paths,
]
LOG.info(f"Running command: {command}")
try:
owner_output = json.loads(
subprocess.check_output(command, timeout=30, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
except (
subprocess.TimeoutExpired,
subprocess.CalledProcessError,
JSONDecodeError,
) as error:
raise BuckException("Querying buck for relative paths failed: {}".format(error))
results = {}
for project_path in project_paths:
for target_data in owner_output.values():
prefix = os.path.join(buck_root, target_data["buck.base_path"]) + os.sep
suffix = project_path[len(prefix) :]
if not project_path.startswith(prefix) or suffix not in target_data["srcs"]:
continue
if "buck.base_module" in target_data:
base_path = os.path.join(*target_data["buck.base_module"].split("."))
elif "base_module" in target_data:
base_path = os.path.join(*target_data["base_module"].split("."))
else:
base_path = target_data["buck.base_path"]
results[project_path] = os.path.join(base_path, target_data["srcs"][suffix])
# Break after the first one because there might be multiple matches.
break
return results
def generate_source_directories(original_targets: Iterable[str]) -> Set[str]:
original_targets = list(original_targets)
targets_to_destinations = _normalize(original_targets)
targets = [pair[0] for pair in targets_to_destinations]
_build_targets(targets, original_targets)
buck_out = _find_built_source_directories(targets_to_destinations)
source_directories = buck_out.source_directories
if buck_out.targets_not_found:
message_targets = _map_normalized_targets_to_original(
buck_out.targets_not_found, original_targets
)
raise BuckException(
"Could not find link trees for:\n `{}`.\n "
"See `{} --help` for more information.".format(
" \n".join(message_targets), sys.argv[0]
)
)
return source_directories
| 35.370667 | 88 | 0.592355 |
import functools
import glob
import json
import logging
import os
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from json.decoder import JSONDecodeError
from logging import Logger
from typing import Dict, Iterable, List, NamedTuple, Optional, Set, Tuple
from .filesystem import find_root
LOG: Logger = logging.getLogger(__name__)
class BuckOut(NamedTuple):
source_directories: Set[str]
targets_not_found: Set[str]
class BuckException(Exception):
pass
class BuckBuilder:
def build(self, targets: Iterable[str]) -> Iterable[str]:
raise NotImplementedError
class FastBuckBuilder(BuckBuilder):
def __init__(
self,
buck_root: str,
output_directory: Optional[str] = None,
buck_builder_binary: Optional[str] = None,
debug_mode: bool = False,
buck_mode: Optional[str] = None,
project_name: Optional[str] = None,
) -> None:
self._buck_root = buck_root
self._output_directory: str = output_directory or tempfile.mkdtemp(
prefix="pyre_tmp_"
)
self._buck_builder_binary = buck_builder_binary
self._debug_mode = debug_mode
self._buck_mode = buck_mode
self._project_name = project_name
self.conflicting_files: List[str] = []
self.unsupported_files: List[str] = []
def _get_builder_executable(self) -> str:
builder_binary = self._buck_builder_binary
if builder_binary is None:
raise BuckException(
"--buck-builder-binary must be provided "
"if fast buck builder is used."
)
return builder_binary
def build(self, targets: Iterable[str]) -> List[str]:
command = [
self._get_builder_executable(),
"-J-Djava.net.preferIPv6Addresses=true",
"-J-Djava.net.preferIPv6Stack=true",
"--buck_root",
self._buck_root,
"--output_directory",
self._output_directory,
] + list(targets)
if self._debug_mode:
command.append("--debug")
buck_mode = self._buck_mode
if buck_mode:
command.extend(["--mode", buck_mode])
project_name = self._project_name
if project_name:
command.extend(["--project_name", project_name])
LOG.info("Building buck targets...")
LOG.debug("Buck builder command: `{}`".format(" ".join(command)))
with subprocess.Popen(
command,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
) as buck_builder_process:
log_processor = threading.Thread(
target=self._read_stderr,
args=(buck_builder_process.stderr, logging.DEBUG),
)
log_processor.daemon = True
log_processor.start()
return_code = buck_builder_process.wait()
# Wait until all stderr have been printed.
log_processor.join()
if return_code == 0:
LOG.info("Finished building targets.")
if self._debug_mode:
# pyre-fixme[6]: Expected `_Reader` for 1st param but got
# `Optional[typing.IO[typing.Any]]`.
debug_output = json.load(buck_builder_process.stdout)
self.conflicting_files += debug_output["conflictingFiles"]
self.unsupported_files += debug_output["unsupportedFiles"]
return [self._output_directory]
else:
raise BuckException(
f"Failed to build targets with:\n`{' '.join(command)}`"
)
def _read_stderr(
self, stream: Iterable[str], default_logging_section: int = logging.ERROR
) -> None:
for line in stream:
line = line.rstrip()
if line.startswith("INFO: "):
LOG.info(line[6:])
elif line.startswith("WARNING: "):
LOG.warning(line[9:])
elif line.startswith("ERROR: "):
LOG.error(line[7:])
elif line.startswith("[WARNING:"):
# Filter away thrift warnings.
pass
else:
LOG.log(default_logging_section, line)
class SimpleBuckBuilder(BuckBuilder):
def build(self, targets: Iterable[str]) -> Iterable[str]:
return generate_source_directories(targets)
def presumed_target_root(target: str) -> str:
root_index = target.find("//")
if root_index != -1:
target = target[root_index + 2 :]
target = target.replace("/...", "")
target = target.split(":")[0]
return target
# Expects the targets to be already normalized.
def _find_built_source_directories(
targets_to_destinations: Iterable[Tuple[str, str]]
) -> BuckOut:
targets_not_found = []
source_directories = []
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
raise Exception("No .buckconfig found in ancestors of the current directory.")
directories = set()
for target, destination in targets_to_destinations:
directories.add((target, os.path.dirname(destination)))
for target, directory in directories:
target_name = target.split(":")[1]
discovered_source_directories = glob.glob(
os.path.join(buck_root, directory, "{}#*link-tree".format(target_name))
)
if len(discovered_source_directories) == 0:
targets_not_found.append(target)
source_directories.extend(
[
tree
for tree in discovered_source_directories
if not tree.endswith(
(
"-vs_debugger#link-tree",
"-interp#link-tree",
"-ipython#link-tree",
)
)
]
)
return BuckOut(set(source_directories), set(targets_not_found))
def _normalize(targets: List[str]) -> List[Tuple[str, str]]:
LOG.info(
"Normalizing target%s `%s`",
"s:" if len(targets) > 1 else "",
"`, `".join(targets),
)
try:
command = (
["buck", "targets", "--show-output"]
+ targets
+ ["--type", "python_binary", "python_test"]
)
targets_to_destinations: List[str] = (
subprocess.check_output(command, stderr=subprocess.PIPE, timeout=600)
.decode()
.strip()
.split("\n")
)
targets_to_destinations = list(filter(bool, targets_to_destinations))
# The output is of the form //target //corresponding.par
result = []
for target in targets_to_destinations:
pair = target.split(" ")
if len(pair) != 2:
pass
else:
result.append((pair[0], pair[1]))
if not result:
LOG.warning(
"Provided targets do not contain any binary or unittest targets."
)
return []
else:
LOG.info(
"Found %d buck target%s.", len(result), "s" if len(result) > 1 else ""
)
return result
except subprocess.TimeoutExpired as error:
LOG.error("Buck output so far: %s", error.stderr.decode().strip())
raise BuckException(
"Seems like `{}` is hanging.\n "
"Try running `buck clean` before trying again.".format(
# pyre-fixme: command not always defined
" ".join(command[:-1])
)
)
except subprocess.CalledProcessError as error:
LOG.error("Buck returned error: %s" % error.stderr.decode().strip())
raise BuckException(
"Could not normalize targets. Check the paths or run `buck clean`."
)
def _build_targets(targets: List[str], original_targets: List[str]) -> None:
LOG.info(
"Building target%s `%s`",
"s:" if len(original_targets) > 1 else "",
"`, `".join(original_targets),
)
command = ["buck", "build"] + targets
try:
subprocess.check_output(command, stderr=subprocess.PIPE)
LOG.warning("Finished building targets.")
except subprocess.CalledProcessError as error:
# The output can be overwhelming, hence print only the last 20 lines.
lines = error.stderr.decode().splitlines()
LOG.error("Buck returned error: %s" % "\n".join(lines[-20:]))
raise BuckException(
"Could not build targets. Check the paths or run `buck clean`."
)
def _map_normalized_targets_to_original(
unbuilt_targets: Iterable[str], original_targets: Iterable[str]
) -> List[str]:
mapped_targets = set()
for target in unbuilt_targets:
# Each original target is either a `/...` glob or a proper target.
# If it's a glob, we're looking for the glob to be a prefix of the unbuilt
# target. Otherwise, we care about exact matches.
name = None
for original in original_targets:
if original.endswith("/..."):
if target.startswith(original[:-4]):
name = original
else:
if target == original:
name = original
# No original target matched, fallback to normalized.
if name is None:
name = target
mapped_targets.add(name)
return list(mapped_targets)
@functools.lru_cache()
def find_buck_root(path: str) -> Optional[str]:
return find_root(path, ".buckconfig")
def query_buck_relative_paths(
project_paths: Iterable[str], targets: Iterable[str]
) -> Dict[str, str]:
buck_root = find_buck_root(os.getcwd())
if buck_root is None:
LOG.error(
"Buck root couldn't be found. Returning empty analysis directory mapping."
)
return {}
target_string = " ".join(targets)
command = [
"buck",
"query",
"--json",
"--output-attribute",
".*",
f"owner(%s) ^ deps(set({target_string}))",
*project_paths,
]
LOG.info(f"Running command: {command}")
try:
owner_output = json.loads(
subprocess.check_output(command, timeout=30, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
except (
subprocess.TimeoutExpired,
subprocess.CalledProcessError,
JSONDecodeError,
) as error:
raise BuckException("Querying buck for relative paths failed: {}".format(error))
results = {}
for project_path in project_paths:
for target_data in owner_output.values():
prefix = os.path.join(buck_root, target_data["buck.base_path"]) + os.sep
suffix = project_path[len(prefix) :]
if not project_path.startswith(prefix) or suffix not in target_data["srcs"]:
continue
if "buck.base_module" in target_data:
base_path = os.path.join(*target_data["buck.base_module"].split("."))
elif "base_module" in target_data:
base_path = os.path.join(*target_data["base_module"].split("."))
else:
base_path = target_data["buck.base_path"]
results[project_path] = os.path.join(base_path, target_data["srcs"][suffix])
break
return results
def generate_source_directories(original_targets: Iterable[str]) -> Set[str]:
original_targets = list(original_targets)
targets_to_destinations = _normalize(original_targets)
targets = [pair[0] for pair in targets_to_destinations]
_build_targets(targets, original_targets)
buck_out = _find_built_source_directories(targets_to_destinations)
source_directories = buck_out.source_directories
if buck_out.targets_not_found:
message_targets = _map_normalized_targets_to_original(
buck_out.targets_not_found, original_targets
)
raise BuckException(
"Could not find link trees for:\n `{}`.\n "
"See `{} --help` for more information.".format(
" \n".join(message_targets), sys.argv[0]
)
)
return source_directories
| true | true |
1c3240a9ef8ce71b19868e80a1044a5fad11c103 | 15,393 | py | Python | train_tar_visda.py | Albert0147/G-SFDA | a927c67afd71152090d5415142fe8dec9330eeec | [
"MIT"
] | 50 | 2021-07-25T23:58:13.000Z | 2022-03-24T06:01:19.000Z | train_tar_visda.py | davidpengiupui/G-SFDA | 6ded750224266cd4cdb100a7fcedfa95688d22da | [
"MIT"
] | 10 | 2021-08-10T07:14:29.000Z | 2022-02-11T15:51:53.000Z | train_tar_visda.py | davidpengiupui/G-SFDA | 6ded750224266cd4cdb100a7fcedfa95688d22da | [
"MIT"
] | 9 | 2021-08-11T06:48:39.000Z | 2022-03-03T06:14:41.000Z | import argparse
import os, sys
import os.path as osp
import torchvision
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import network, loss
from torch.utils.data import DataLoader
from data_list import ImageList, ImageList_idx
import random, pdb, math, copy
from sklearn.metrics import confusion_matrix
import torch.nn.functional as F
def op_copy(optimizer):
for param_group in optimizer.param_groups:
param_group['lr0'] = param_group['lr']
return optimizer
def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):
decay = (1 + gamma * iter_num / max_iter)**(-power)
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr0'] * decay
param_group['weight_decay'] = 1e-3
param_group['momentum'] = 0.9
param_group['nesterov'] = True
return optimizer
def image_train(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), normalize
])
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.CenterCrop(crop_size),
transforms.ToTensor(), normalize
])
def data_load(args):
## prepare data
dsets = {}
dset_loaders = {}
train_bs = args.batch_size
txt_src = open(args.s_dset_path).readlines()
txt_tar = open(args.t_dset_path).readlines()
txt_test = open(args.test_dset_path).readlines()
dsize = len(txt_src)
tr_size = int(0.9*dsize)
# print(dsize, tr_size, dsize - tr_size)
tr_txt, te_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size])
dsets["source_tr"] = ImageList(tr_txt, transform=image_train())
dset_loaders["source_tr"] = DataLoader(dsets["source_tr"],
batch_size=train_bs,
shuffle=True,
num_workers=args.worker,
drop_last=False)
dsets["source_te"] = ImageList(te_txt, transform=image_test())
dset_loaders["source_te"] = DataLoader(dsets["source_te"],
batch_size=train_bs,
shuffle=True,
num_workers=args.worker,
drop_last=False)
dsets["target"] = ImageList_idx(txt_tar, transform=image_train())
dset_loaders["target"] = DataLoader(dsets["target"],
batch_size=train_bs,
shuffle=True,
num_workers=args.worker,
drop_last=False)
dsets["test"] = ImageList_idx(txt_test, transform=image_test())
dset_loaders["test"] = DataLoader(dsets["test"],
batch_size=train_bs * 3,
shuffle=False,
num_workers=args.worker,
drop_last=False)
return dset_loaders
def cal_acc(loader, netF, netB, netC,t=0, flag=False):
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
outputs = netC(netB(netF(inputs),t=t)[0])
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(
torch.squeeze(predict).float() == all_label).item() / float(
all_label.size()[0])
mean_ent = torch.mean(loss.Entropy(
nn.Softmax(dim=1)(all_output))).cpu().data.item()
if flag:
matrix = confusion_matrix(all_label, torch.squeeze(predict).float())
acc = matrix.diagonal() / matrix.sum(axis=1) * 100
aacc = acc.mean()
aa = [str(np.round(i, 2)) for i in acc]
acc = ' '.join(aa)
return aacc, acc
else:
return accuracy * 100, mean_ent
def train_target(args):
dset_loaders = data_load(args)
## set base network
netF = network.ResBase(res_name=args.net).cuda()
netB = network.feat_bootleneck_sdaE(type=args.classifier,
feature_dim=netF.in_features,
bottleneck_dim=args.bottleneck).cuda()
netC = network.feat_classifier(type=args.layer,
class_num=args.class_num,
bottleneck_dim=args.bottleneck).cuda()
modelpath = args.output_dir_src + '/source_F.pt'
netF.load_state_dict(torch.load(modelpath))
modelpath = args.output_dir_src + '/source_B.pt'
netB.load_state_dict(torch.load(modelpath))
modelpath = args.output_dir_src + '/source_C.pt'
netC.load_state_dict(torch.load(modelpath))
param_group = []
for k, v in netF.named_parameters():
if k.find('bn')!=-1:
param_group += [{'params': v, 'lr': args.lr * 0.1}]
for k, v in netB.named_parameters():
#if k.find('em')==-1: # the embedding layer can be either trained or not
if True:
param_group += [{'params': v, 'lr': args.lr * 1}]
for k, v in netC.named_parameters():
param_group += [{'params': v, 'lr': args.lr * 1}]
optimizer = optim.SGD(param_group)
optimizer = op_copy(optimizer)
#building feature bank and score bank
loader = dset_loaders["target"]
num_sample=len(loader.dataset)
fea_bank=torch.randn(num_sample,256)
score_bank = torch.randn(num_sample, 12).cuda()
netF.eval()
netB.eval()
netC.eval()
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
inputs = data[0]
indx=data[-1]
#labels = data[1]
inputs = inputs.cuda()
output, _ = netB(netF(inputs), t=1) # a^t
output_norm=F.normalize(output)
outputs = netC(output)
outputs=nn.Softmax(-1)(outputs)
fea_bank[indx] = output_norm.detach().clone().cpu()
score_bank[indx] = outputs.detach().clone() #.cpu()
max_iter = args.max_epoch * len(dset_loaders["target"])
interval_iter = max_iter // args.interval
iter_num = 0
netF.train()
netB.train()
netC.train()
acc_log=0
while iter_num < max_iter:
try:
inputs_test, _, tar_idx = iter_test.next()
except:
iter_test = iter(dset_loaders["target"])
inputs_test, _, tar_idx = iter_test.next()
if inputs_test.size(0) == 1:
continue
inputs_test = inputs_test.cuda()
iter_num += 1
lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)
features_test, masks = netB(netF(inputs_test),t=1)
masks_old = masks
outputs_test = netC(features_test)
softmax_out = nn.Softmax(dim=1)(outputs_test)
output_re = softmax_out.unsqueeze(1)
with torch.no_grad():
output_f_norm=F.normalize(features_test)
fea_bank[tar_idx].fill_(-0.1) #do not use the current mini-batch in fea_bank
output_f_=output_f_norm.cpu().detach().clone()
distance = output_f_@fea_bank.T
_, idx_near = torch.topk(distance,
dim=-1,
largest=True,
k=10)
score_near = score_bank[idx_near] #batch x K x num_class
score_near=score_near.permute(0,2,1)
# update banks
fea_bank[tar_idx] = output_f_.detach().clone().cpu()
score_bank[tar_idx] = softmax_out.detach().clone() #.cpu()
const=torch.log(torch.bmm(output_re,score_near)).sum(-1)
loss=-torch.mean(const)
msoftmax = softmax_out.mean(dim=0)
gentropy_loss = torch.sum(msoftmax *
torch.log(msoftmax + args.epsilon))
loss += gentropy_loss
optimizer.zero_grad()
loss.backward()
for n, p in netB.bottleneck.named_parameters():
if n.find('bias') == -1:
mask_ = ((1 - masks_old)).view(-1, 1).expand(256, 2048).cuda()
p.grad.data *= mask_
else: #no bias here
mask_ = ((1 - masks_old)).squeeze().cuda()
p.grad.data *= mask_
for n, p in netC.named_parameters():
if n.find('weight_v') != -1:
masks__=masks_old.view(1,-1).expand(12,256)
mask_ = ((1 - masks__)).cuda()
p.grad.data *= mask_
for n, p in netB.bn.named_parameters():
mask_ = ((1 - masks_old)).view(-1).cuda()
p.grad.data *= mask_
optimizer.step()
if iter_num % interval_iter == 0 or iter_num == max_iter:
netF.eval()
netB.eval()
netC.eval()
if args.dset == 'visda-2017':
acc_s_te, acc_list = cal_acc(dset_loaders['test'], netF, netB,
netC,t=1,flag= True)
accS_s_te, accS_list = cal_acc(dset_loaders['source_te'], netF, netB,
netC,t=0,flag= True)
log_str = 'Task: {}, Iter:{}/{}; Accuracy on target = {:.2f}%, Accuracy on source = {:.2f}%'.format(
args.name, iter_num, max_iter, acc_s_te, accS_s_te
) + '\n' + 'T: ' + acc_list + '\n' + 'S: ' + accS_list
args.out_file.write(log_str + '\n')
args.out_file.flush()
print(log_str + '\n')
netF.train()
netB.train()
netC.train()
if args.issave:
if acc_s_te>acc_log:
acc_log=acc_s_te
torch.save(
netF.state_dict(),
osp.join(args.output_dir, "target_F_" + 'final' + ".pt"))
torch.save(
netB.state_dict(),
osp.join(args.output_dir, "target_B_" + 'final' + ".pt"))
torch.save(
netC.state_dict(),
osp.join(args.output_dir, "target_C_" + 'final' + ".pt"))
return netF, netB, netC
def print_args(args):
s = "==========================================\n"
for arg, content in args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ours')
parser.add_argument('--gpu_id',
type=str,
nargs='?',
default='8',
help="device id to run")
parser.add_argument('--s', type=int, default=0, help="source")
parser.add_argument('--t', type=int, default=1, help="target")
parser.add_argument('--max_epoch',
type=int,
default=15,
help="max iterations")
parser.add_argument('--interval', type=int, default=15)
parser.add_argument('--batch_size',
type=int,
default=64,
help="batch_size")
parser.add_argument('--worker',
type=int,
default=4,
help="number of workers")
parser.add_argument(
'--dset',
type=str,
default='visda-2017')
parser.add_argument('--lr', type=float, default=1e-3, help="learning rate")
parser.add_argument('--net',
type=str,
default='resnet101')
parser.add_argument('--seed', type=int, default=2020, help="random seed")
parser.add_argument('--bottleneck', type=int, default=256)
parser.add_argument('--epsilon', type=float, default=1e-5)
parser.add_argument('--layer',
type=str,
default="wn",
choices=["linear", "wn"])
parser.add_argument('--classifier',
type=str,
default="bn",
choices=["ori", "bn"])
parser.add_argument('--output', type=str, default='visda/target/')
parser.add_argument('--output_src', type=str, default='visda/source/')
parser.add_argument('--da',
type=str,
default='uda')
parser.add_argument('--issave', type=bool, default=True)
args = parser.parse_args()
if args.dset == 'office-home':
names = ['Art', 'Clipart', 'Product', 'RealWorld']
args.class_num = 65
if args.dset == 'visda-2017':
names = ['train', 'validation']
args.class_num = 12
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
SEED = args.seed
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
torch.backends.cudnn.deterministic = True
for i in range(len(names)):
if i == args.s:
continue
args.t = i
folder = './data/'
args.s_dset_path = folder + args.dset + '/' + names[
args.s] + '_list.txt'
args.t_dset_path = folder + args.dset + '/' + names[
args.t] + '_list.txt'
args.test_dset_path = folder + args.dset + '/' + names[
args.t] + '_list.txt'
args.output_dir_src = osp.join(args.output_src, args.da, args.dset,
names[args.s][0].upper())
args.output_dir = osp.join(
args.output, args.da, args.dset,
names[args.s][0].upper() + names[args.t][0].upper())
args.name = names[args.s][0].upper() + names[args.t][0].upper()
if not osp.exists(args.output_dir):
os.system('mkdir -p ' + args.output_dir)
if not osp.exists(args.output_dir):
os.mkdir(args.output_dir)
args.out_file = open(
osp.join(args.output_dir, 'log_target' + '.txt'), 'w')
args.out_file.write(print_args(args) + '\n')
args.out_file.flush()
train_target(args)
| 37.181159 | 116 | 0.535438 | import argparse
import os, sys
import os.path as osp
import torchvision
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
import network, loss
from torch.utils.data import DataLoader
from data_list import ImageList, ImageList_idx
import random, pdb, math, copy
from sklearn.metrics import confusion_matrix
import torch.nn.functional as F
def op_copy(optimizer):
for param_group in optimizer.param_groups:
param_group['lr0'] = param_group['lr']
return optimizer
def lr_scheduler(optimizer, iter_num, max_iter, gamma=10, power=0.75):
decay = (1 + gamma * iter_num / max_iter)**(-power)
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr0'] * decay
param_group['weight_decay'] = 1e-3
param_group['momentum'] = 0.9
param_group['nesterov'] = True
return optimizer
def image_train(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(), normalize
])
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.CenterCrop(crop_size),
transforms.ToTensor(), normalize
])
def data_load(args):
}
dset_loaders = {}
train_bs = args.batch_size
txt_src = open(args.s_dset_path).readlines()
txt_tar = open(args.t_dset_path).readlines()
txt_test = open(args.test_dset_path).readlines()
dsize = len(txt_src)
tr_size = int(0.9*dsize)
tr_txt, te_txt = torch.utils.data.random_split(txt_src, [tr_size, dsize - tr_size])
dsets["source_tr"] = ImageList(tr_txt, transform=image_train())
dset_loaders["source_tr"] = DataLoader(dsets["source_tr"],
batch_size=train_bs,
shuffle=True,
num_workers=args.worker,
drop_last=False)
dsets["source_te"] = ImageList(te_txt, transform=image_test())
dset_loaders["source_te"] = DataLoader(dsets["source_te"],
batch_size=train_bs,
shuffle=True,
num_workers=args.worker,
drop_last=False)
dsets["target"] = ImageList_idx(txt_tar, transform=image_train())
dset_loaders["target"] = DataLoader(dsets["target"],
batch_size=train_bs,
shuffle=True,
num_workers=args.worker,
drop_last=False)
dsets["test"] = ImageList_idx(txt_test, transform=image_test())
dset_loaders["test"] = DataLoader(dsets["test"],
batch_size=train_bs * 3,
shuffle=False,
num_workers=args.worker,
drop_last=False)
return dset_loaders
def cal_acc(loader, netF, netB, netC,t=0, flag=False):
start_test = True
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
outputs = netC(netB(netF(inputs),t=t)[0])
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
_, predict = torch.max(all_output, 1)
accuracy = torch.sum(
torch.squeeze(predict).float() == all_label).item() / float(
all_label.size()[0])
mean_ent = torch.mean(loss.Entropy(
nn.Softmax(dim=1)(all_output))).cpu().data.item()
if flag:
matrix = confusion_matrix(all_label, torch.squeeze(predict).float())
acc = matrix.diagonal() / matrix.sum(axis=1) * 100
aacc = acc.mean()
aa = [str(np.round(i, 2)) for i in acc]
acc = ' '.join(aa)
return aacc, acc
else:
return accuracy * 100, mean_ent
def train_target(args):
dset_loaders = data_load(args)
k.ResBase(res_name=args.net).cuda()
netB = network.feat_bootleneck_sdaE(type=args.classifier,
feature_dim=netF.in_features,
bottleneck_dim=args.bottleneck).cuda()
netC = network.feat_classifier(type=args.layer,
class_num=args.class_num,
bottleneck_dim=args.bottleneck).cuda()
modelpath = args.output_dir_src + '/source_F.pt'
netF.load_state_dict(torch.load(modelpath))
modelpath = args.output_dir_src + '/source_B.pt'
netB.load_state_dict(torch.load(modelpath))
modelpath = args.output_dir_src + '/source_C.pt'
netC.load_state_dict(torch.load(modelpath))
param_group = []
for k, v in netF.named_parameters():
if k.find('bn')!=-1:
param_group += [{'params': v, 'lr': args.lr * 0.1}]
for k, v in netB.named_parameters():
rams': v, 'lr': args.lr * 1}]
for k, v in netC.named_parameters():
param_group += [{'params': v, 'lr': args.lr * 1}]
optimizer = optim.SGD(param_group)
optimizer = op_copy(optimizer)
loader = dset_loaders["target"]
num_sample=len(loader.dataset)
fea_bank=torch.randn(num_sample,256)
score_bank = torch.randn(num_sample, 12).cuda()
netF.eval()
netB.eval()
netC.eval()
with torch.no_grad():
iter_test = iter(loader)
for i in range(len(loader)):
data = iter_test.next()
inputs = data[0]
indx=data[-1]
inputs = inputs.cuda()
output, _ = netB(netF(inputs), t=1)
output_norm=F.normalize(output)
outputs = netC(output)
outputs=nn.Softmax(-1)(outputs)
fea_bank[indx] = output_norm.detach().clone().cpu()
score_bank[indx] = outputs.detach().clone()
max_iter = args.max_epoch * len(dset_loaders["target"])
interval_iter = max_iter // args.interval
iter_num = 0
netF.train()
netB.train()
netC.train()
acc_log=0
while iter_num < max_iter:
try:
inputs_test, _, tar_idx = iter_test.next()
except:
iter_test = iter(dset_loaders["target"])
inputs_test, _, tar_idx = iter_test.next()
if inputs_test.size(0) == 1:
continue
inputs_test = inputs_test.cuda()
iter_num += 1
lr_scheduler(optimizer, iter_num=iter_num, max_iter=max_iter)
features_test, masks = netB(netF(inputs_test),t=1)
masks_old = masks
outputs_test = netC(features_test)
softmax_out = nn.Softmax(dim=1)(outputs_test)
output_re = softmax_out.unsqueeze(1)
with torch.no_grad():
output_f_norm=F.normalize(features_test)
fea_bank[tar_idx].fill_(-0.1)
output_f_=output_f_norm.cpu().detach().clone()
distance = output_f_@fea_bank.T
_, idx_near = torch.topk(distance,
dim=-1,
largest=True,
k=10)
score_near = score_bank[idx_near]
score_near=score_near.permute(0,2,1)
fea_bank[tar_idx] = output_f_.detach().clone().cpu()
score_bank[tar_idx] = softmax_out.detach().clone()
const=torch.log(torch.bmm(output_re,score_near)).sum(-1)
loss=-torch.mean(const)
msoftmax = softmax_out.mean(dim=0)
gentropy_loss = torch.sum(msoftmax *
torch.log(msoftmax + args.epsilon))
loss += gentropy_loss
optimizer.zero_grad()
loss.backward()
for n, p in netB.bottleneck.named_parameters():
if n.find('bias') == -1:
mask_ = ((1 - masks_old)).view(-1, 1).expand(256, 2048).cuda()
p.grad.data *= mask_
else:
mask_ = ((1 - masks_old)).squeeze().cuda()
p.grad.data *= mask_
for n, p in netC.named_parameters():
if n.find('weight_v') != -1:
masks__=masks_old.view(1,-1).expand(12,256)
mask_ = ((1 - masks__)).cuda()
p.grad.data *= mask_
for n, p in netB.bn.named_parameters():
mask_ = ((1 - masks_old)).view(-1).cuda()
p.grad.data *= mask_
optimizer.step()
if iter_num % interval_iter == 0 or iter_num == max_iter:
netF.eval()
netB.eval()
netC.eval()
if args.dset == 'visda-2017':
acc_s_te, acc_list = cal_acc(dset_loaders['test'], netF, netB,
netC,t=1,flag= True)
accS_s_te, accS_list = cal_acc(dset_loaders['source_te'], netF, netB,
netC,t=0,flag= True)
log_str = 'Task: {}, Iter:{}/{}; Accuracy on target = {:.2f}%, Accuracy on source = {:.2f}%'.format(
args.name, iter_num, max_iter, acc_s_te, accS_s_te
) + '\n' + 'T: ' + acc_list + '\n' + 'S: ' + accS_list
args.out_file.write(log_str + '\n')
args.out_file.flush()
print(log_str + '\n')
netF.train()
netB.train()
netC.train()
if args.issave:
if acc_s_te>acc_log:
acc_log=acc_s_te
torch.save(
netF.state_dict(),
osp.join(args.output_dir, "target_F_" + 'final' + ".pt"))
torch.save(
netB.state_dict(),
osp.join(args.output_dir, "target_B_" + 'final' + ".pt"))
torch.save(
netC.state_dict(),
osp.join(args.output_dir, "target_C_" + 'final' + ".pt"))
return netF, netB, netC
def print_args(args):
s = "==========================================\n"
for arg, content in args.__dict__.items():
s += "{}:{}\n".format(arg, content)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Ours')
parser.add_argument('--gpu_id',
type=str,
nargs='?',
default='8',
help="device id to run")
parser.add_argument('--s', type=int, default=0, help="source")
parser.add_argument('--t', type=int, default=1, help="target")
parser.add_argument('--max_epoch',
type=int,
default=15,
help="max iterations")
parser.add_argument('--interval', type=int, default=15)
parser.add_argument('--batch_size',
type=int,
default=64,
help="batch_size")
parser.add_argument('--worker',
type=int,
default=4,
help="number of workers")
parser.add_argument(
'--dset',
type=str,
default='visda-2017')
parser.add_argument('--lr', type=float, default=1e-3, help="learning rate")
parser.add_argument('--net',
type=str,
default='resnet101')
parser.add_argument('--seed', type=int, default=2020, help="random seed")
parser.add_argument('--bottleneck', type=int, default=256)
parser.add_argument('--epsilon', type=float, default=1e-5)
parser.add_argument('--layer',
type=str,
default="wn",
choices=["linear", "wn"])
parser.add_argument('--classifier',
type=str,
default="bn",
choices=["ori", "bn"])
parser.add_argument('--output', type=str, default='visda/target/')
parser.add_argument('--output_src', type=str, default='visda/source/')
parser.add_argument('--da',
type=str,
default='uda')
parser.add_argument('--issave', type=bool, default=True)
args = parser.parse_args()
if args.dset == 'office-home':
names = ['Art', 'Clipart', 'Product', 'RealWorld']
args.class_num = 65
if args.dset == 'visda-2017':
names = ['train', 'validation']
args.class_num = 12
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
SEED = args.seed
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
torch.backends.cudnn.deterministic = True
for i in range(len(names)):
if i == args.s:
continue
args.t = i
folder = './data/'
args.s_dset_path = folder + args.dset + '/' + names[
args.s] + '_list.txt'
args.t_dset_path = folder + args.dset + '/' + names[
args.t] + '_list.txt'
args.test_dset_path = folder + args.dset + '/' + names[
args.t] + '_list.txt'
args.output_dir_src = osp.join(args.output_src, args.da, args.dset,
names[args.s][0].upper())
args.output_dir = osp.join(
args.output, args.da, args.dset,
names[args.s][0].upper() + names[args.t][0].upper())
args.name = names[args.s][0].upper() + names[args.t][0].upper()
if not osp.exists(args.output_dir):
os.system('mkdir -p ' + args.output_dir)
if not osp.exists(args.output_dir):
os.mkdir(args.output_dir)
args.out_file = open(
osp.join(args.output_dir, 'log_target' + '.txt'), 'w')
args.out_file.write(print_args(args) + '\n')
args.out_file.flush()
train_target(args)
| true | true |
1c3240f5219e381a34891036fae6c23256494899 | 1,255 | py | Python | qlworkbench/ui/event_manager.py | threkk/software-construction | ddabf113b8a58b9e6f6fc2d4c6539df1a38c010a | [
"MIT"
] | null | null | null | qlworkbench/ui/event_manager.py | threkk/software-construction | ddabf113b8a58b9e6f6fc2d4c6539df1a38c010a | [
"MIT"
] | 7 | 2017-04-22T14:51:38.000Z | 2017-12-10T18:51:36.000Z | qlworkbench/ui/event_manager.py | threkk/software-construction | ddabf113b8a58b9e6f6fc2d4c6539df1a38c010a | [
"MIT"
] | 1 | 2017-12-10T18:51:06.000Z | 2017-12-10T18:51:06.000Z | # -*- coding: utf-8 -*-
"""
This module defines a simple event manager that works on a basis of publishers
and subscribers. This is used in combination with the class `ui.context` to
update the values of the variables when they change.
"""
class EventManager(object):
"""
Simple event manager based on the bulletin board pattern. It defines a
unique handler and a unique event for simplicity.
"""
def __init__(self, updater):
"""Initialises the event manager and stores the update function."""
self._subscriptions = {}
self.updater = updater
def get_publishers(self):
"""Retrieves all the declared publishers."""
return self._subscriptions.keys()
def subscribe(self, subscriber, publisher):
"""Subscribers the `subscriber` to the `publisher` events"""
if publisher in self._subscriptions:
self._subscriptions[publisher].append(subscriber)
else:
self._subscriptions[publisher] = [subscriber]
def publish(self, publisher):
"""Updates all the subscribers of the `publisher`"""
if publisher in self._subscriptions:
for subscriber in self._subscriptions[publisher]:
self.updater(subscriber)
| 35.857143 | 78 | 0.67012 |
class EventManager(object):
def __init__(self, updater):
self._subscriptions = {}
self.updater = updater
def get_publishers(self):
return self._subscriptions.keys()
def subscribe(self, subscriber, publisher):
if publisher in self._subscriptions:
self._subscriptions[publisher].append(subscriber)
else:
self._subscriptions[publisher] = [subscriber]
def publish(self, publisher):
if publisher in self._subscriptions:
for subscriber in self._subscriptions[publisher]:
self.updater(subscriber)
| true | true |
1c32419673cc54be598a53cc174e927a74ba172d | 3,866 | py | Python | sdk/identity/azure-identity/azure/identity/aio/_credentials/imds.py | rnestler/azure-sdk-for-python | 3d9c14789a5f468b3a1c8b57575e54cbdad708eb | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/azure/identity/aio/_credentials/imds.py | rnestler/azure-sdk-for-python | 3d9c14789a5f468b3a1c8b57575e54cbdad708eb | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/azure/identity/aio/_credentials/imds.py | rnestler/azure-sdk-for-python | 3d9c14789a5f468b3a1c8b57575e54cbdad708eb | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
from typing import TYPE_CHECKING
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError
from ... import CredentialUnavailableError
from ..._constants import EnvironmentVariables
from .._internal import AsyncContextManager
from .._internal.get_token_mixin import GetTokenMixin
from .._internal.managed_identity_client import AsyncManagedIdentityClient
from ..._credentials.imds import get_request, PIPELINE_SETTINGS
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import AccessToken
class ImdsCredential(AsyncContextManager, GetTokenMixin):
def __init__(self, **kwargs: "Any") -> None:
super().__init__()
self._client = AsyncManagedIdentityClient(get_request, **PIPELINE_SETTINGS, **kwargs)
if EnvironmentVariables.AZURE_POD_IDENTITY_AUTHORITY_HOST in os.environ:
self._endpoint_available = True # type: Optional[bool]
else:
self._endpoint_available = None
self._error_message = None # type: Optional[str]
self._user_assigned_identity = "client_id" in kwargs or "identity_config" in kwargs
async def __aenter__(self):
await self._client.__aenter__()
return self
async def close(self) -> None:
await self._client.close()
async def _acquire_token_silently(self, *scopes: str, **kwargs: "Any") -> "Optional[AccessToken]":
return self._client.get_cached_token(*scopes)
async def _request_token(self, *scopes, **kwargs: "Any") -> "AccessToken": # pylint:disable=unused-argument
if self._endpoint_available is None:
# Lacking another way to determine whether the IMDS endpoint is listening,
# we send a request it would immediately reject (because it lacks the Metadata header),
# setting a short timeout.
try:
await self._client.request_token(*scopes, connection_timeout=0.3, retry_total=0)
self._endpoint_available = True
except HttpResponseError:
# IMDS responded
self._endpoint_available = True
except Exception as ex: # pylint:disable=broad-except
# if anything else was raised, assume the endpoint is unavailable
self._endpoint_available = False
self._error_message = (
"ManagedIdentityCredential authentication unavailable, no response from the IMDS endpoint."
)
raise CredentialUnavailableError(message=self._error_message) from ex
if not self._endpoint_available:
raise CredentialUnavailableError(message=self._error_message)
try:
token = await self._client.request_token(*scopes, headers={"Metadata": "true"})
except HttpResponseError as ex:
# 400 in response to a token request indicates managed identity is disabled,
# or the identity with the specified client_id is not available
if ex.status_code == 400:
self._endpoint_available = False
self._error_message = "ManagedIdentityCredential authentication unavailable. "
if self._user_assigned_identity:
self._error_message += "The requested identity has not been assigned to this resource."
else:
self._error_message += "No identity has been assigned to this resource."
raise CredentialUnavailableError(message=self._error_message) from ex
# any other error is unexpected
raise ClientAuthenticationError(message=ex.message, response=ex.response) from ex
return token
| 46.578313 | 112 | 0.667874 |
import os
from typing import TYPE_CHECKING
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError
from ... import CredentialUnavailableError
from ..._constants import EnvironmentVariables
from .._internal import AsyncContextManager
from .._internal.get_token_mixin import GetTokenMixin
from .._internal.managed_identity_client import AsyncManagedIdentityClient
from ..._credentials.imds import get_request, PIPELINE_SETTINGS
if TYPE_CHECKING:
from typing import Any, Optional
from azure.core.credentials import AccessToken
class ImdsCredential(AsyncContextManager, GetTokenMixin):
def __init__(self, **kwargs: "Any") -> None:
super().__init__()
self._client = AsyncManagedIdentityClient(get_request, **PIPELINE_SETTINGS, **kwargs)
if EnvironmentVariables.AZURE_POD_IDENTITY_AUTHORITY_HOST in os.environ:
self._endpoint_available = True
else:
self._endpoint_available = None
self._error_message = None
self._user_assigned_identity = "client_id" in kwargs or "identity_config" in kwargs
async def __aenter__(self):
await self._client.__aenter__()
return self
async def close(self) -> None:
await self._client.close()
async def _acquire_token_silently(self, *scopes: str, **kwargs: "Any") -> "Optional[AccessToken]":
return self._client.get_cached_token(*scopes)
async def _request_token(self, *scopes, **kwargs: "Any") -> "AccessToken":
if self._endpoint_available is None:
try:
await self._client.request_token(*scopes, connection_timeout=0.3, retry_total=0)
self._endpoint_available = True
except HttpResponseError:
self._endpoint_available = True
except Exception as ex:
self._endpoint_available = False
self._error_message = (
"ManagedIdentityCredential authentication unavailable, no response from the IMDS endpoint."
)
raise CredentialUnavailableError(message=self._error_message) from ex
if not self._endpoint_available:
raise CredentialUnavailableError(message=self._error_message)
try:
token = await self._client.request_token(*scopes, headers={"Metadata": "true"})
except HttpResponseError as ex:
if ex.status_code == 400:
self._endpoint_available = False
self._error_message = "ManagedIdentityCredential authentication unavailable. "
if self._user_assigned_identity:
self._error_message += "The requested identity has not been assigned to this resource."
else:
self._error_message += "No identity has been assigned to this resource."
raise CredentialUnavailableError(message=self._error_message) from ex
raise ClientAuthenticationError(message=ex.message, response=ex.response) from ex
return token
| true | true |
1c3241db512ddaf8a597300f679ee9b33bd2b0ae | 3,087 | py | Python | superviselySDK/help/jupyterlab_scripts/src/backup/project_inference.py | nicehuster/mmdetection-supervisely-person-datasets | ff1b57e16a71378510571dbb9cebfdb712656927 | [
"Apache-2.0"
] | 40 | 2019-05-05T08:08:18.000Z | 2021-10-17T00:07:58.000Z | superviselySDK/help/jupyterlab_scripts/src/backup/project_inference.py | nicehuster/mmdetection-supervisely-person-datasets | ff1b57e16a71378510571dbb9cebfdb712656927 | [
"Apache-2.0"
] | 8 | 2019-06-13T06:00:08.000Z | 2021-07-24T05:25:33.000Z | superviselySDK/help/jupyterlab_scripts/src/backup/project_inference.py | nicehuster/mmdetection-supervisely-person-datasets | ff1b57e16a71378510571dbb9cebfdb712656927 | [
"Apache-2.0"
] | 6 | 2019-07-30T06:36:27.000Z | 2021-06-03T11:57:36.000Z | # coding: utf-8
import time
from tqdm import tqdm
import supervisely_lib as sly
address = 'http://192.168.1.69:5555'
token = 'YGPDnuBkhFmcQ7VNzSEjhgavjg4eFR4Eq1C3jIY4HgV3SQq2JgkXCNtgZy1Fu2ftd4IKui8DsjrdtXjB853cMtBevpSJqFDYiaG1A5qphlH6fFiYYmcVZ5fMR8dDrt5l'
team_name = 'dima'
workspace_name = 'work'
agent_name = 'dima_agent'
model_name = 'road_model'
src_project_name = 'roads_inf'
dst_project_name = 'res'
api = sly.Api(address, token)
team_id = api.team.get_id_by_name(team_name)
workspace_id = api.workspace.get_id_by_name(workspace_name, team_id)
agent_id = api.agent.get_id_by_name(agent_name, team_id)
model_info = api.model.get_info_by_name(model_name, workspace_id)
plugin_id = model_info['pluginId']
plugin_info = api.plugin.get_info_by_id(plugin_id, team_id)
plugin_version = plugin_info['defaultVersion']
tasks_ids = api.model.get_deploy_tasks(model_info['id'])
if len(tasks_ids) == 0:
task_id = api.task.deploy_model(agent_id, model_info['id'], workspace_id, 'never', {}, plugin_id, plugin_version)['taskId']
else:
task_id = tasks_ids[0]
while True:
status = api.task.get_status(task_id)
api.task.raise_for_status(status)
if status is api.task.Status.DEPLOYED:
break
time.sleep(2)
print('Model deployed')
dst_project_name = api.project.get_free_name(dst_project_name, workspace_id)
dst_project_id = api.project.create(dst_project_name, workspace_id)['id']
src_project_id = api.project.get_info_by_name(src_project_name, workspace_id)['id']
src_meta_json = api.project.get_meta(src_project_id)
src_meta = sly.ProjectMeta.from_json(src_meta_json)
model_meta_json = api.model.get_project_meta(model_info['id'])
model_meta = sly.ProjectMeta.from_json(model_meta_json)
dst_meta = src_meta.clone()
dst_meta = dst_meta.merge(model_meta)
api.project.update_meta(dst_project_id, dst_meta.to_json())
def process(img, ann_json):
settings = {'annotation': ann_json, 'meta': src_meta_json}
response = api.model.inference(model_info['id'], img, settings=settings)
return img, response
for dataset_info in api.dataset.get_list(src_project_id):
src_dataset_id = dataset_info['id']
src_dataset_name = dataset_info['name']
print('Project/Dataset: {}/{}'.format(src_project_name, src_dataset_name))
dst_dataset_name = api.dataset.get_free_name(src_dataset_name, dst_project_id)
dst_dataset_id = api.dataset.create(dst_dataset_name, dst_project_id)['id']
for image_info in tqdm(api.image.get_list(src_dataset_id)):
src_image_ext = image_info['meta']['mime'].split('/')[1]
img = api.image.download_np(image_info['id'])
ann_json = api.annotation.download(src_dataset_id, image_info['id'])
inf_results = process(img, ann_json)
inf_image, inf_ann_json = inf_results
dst_img_name = api.image.get_free_name(image_info['name'], dst_dataset_id)
dst_img_hash = api.image.upload_np(src_image_ext, inf_image)['hash']
dst_img_id = api.image.add(dst_img_name, dst_img_hash, dst_dataset_id)['id']
api.annotation.upload(inf_ann_json, dst_img_id)
| 35.482759 | 138 | 0.761905 |
import time
from tqdm import tqdm
import supervisely_lib as sly
address = 'http://192.168.1.69:5555'
token = 'YGPDnuBkhFmcQ7VNzSEjhgavjg4eFR4Eq1C3jIY4HgV3SQq2JgkXCNtgZy1Fu2ftd4IKui8DsjrdtXjB853cMtBevpSJqFDYiaG1A5qphlH6fFiYYmcVZ5fMR8dDrt5l'
team_name = 'dima'
workspace_name = 'work'
agent_name = 'dima_agent'
model_name = 'road_model'
src_project_name = 'roads_inf'
dst_project_name = 'res'
api = sly.Api(address, token)
team_id = api.team.get_id_by_name(team_name)
workspace_id = api.workspace.get_id_by_name(workspace_name, team_id)
agent_id = api.agent.get_id_by_name(agent_name, team_id)
model_info = api.model.get_info_by_name(model_name, workspace_id)
plugin_id = model_info['pluginId']
plugin_info = api.plugin.get_info_by_id(plugin_id, team_id)
plugin_version = plugin_info['defaultVersion']
tasks_ids = api.model.get_deploy_tasks(model_info['id'])
if len(tasks_ids) == 0:
task_id = api.task.deploy_model(agent_id, model_info['id'], workspace_id, 'never', {}, plugin_id, plugin_version)['taskId']
else:
task_id = tasks_ids[0]
while True:
status = api.task.get_status(task_id)
api.task.raise_for_status(status)
if status is api.task.Status.DEPLOYED:
break
time.sleep(2)
print('Model deployed')
dst_project_name = api.project.get_free_name(dst_project_name, workspace_id)
dst_project_id = api.project.create(dst_project_name, workspace_id)['id']
src_project_id = api.project.get_info_by_name(src_project_name, workspace_id)['id']
src_meta_json = api.project.get_meta(src_project_id)
src_meta = sly.ProjectMeta.from_json(src_meta_json)
model_meta_json = api.model.get_project_meta(model_info['id'])
model_meta = sly.ProjectMeta.from_json(model_meta_json)
dst_meta = src_meta.clone()
dst_meta = dst_meta.merge(model_meta)
api.project.update_meta(dst_project_id, dst_meta.to_json())
def process(img, ann_json):
settings = {'annotation': ann_json, 'meta': src_meta_json}
response = api.model.inference(model_info['id'], img, settings=settings)
return img, response
for dataset_info in api.dataset.get_list(src_project_id):
src_dataset_id = dataset_info['id']
src_dataset_name = dataset_info['name']
print('Project/Dataset: {}/{}'.format(src_project_name, src_dataset_name))
dst_dataset_name = api.dataset.get_free_name(src_dataset_name, dst_project_id)
dst_dataset_id = api.dataset.create(dst_dataset_name, dst_project_id)['id']
for image_info in tqdm(api.image.get_list(src_dataset_id)):
src_image_ext = image_info['meta']['mime'].split('/')[1]
img = api.image.download_np(image_info['id'])
ann_json = api.annotation.download(src_dataset_id, image_info['id'])
inf_results = process(img, ann_json)
inf_image, inf_ann_json = inf_results
dst_img_name = api.image.get_free_name(image_info['name'], dst_dataset_id)
dst_img_hash = api.image.upload_np(src_image_ext, inf_image)['hash']
dst_img_id = api.image.add(dst_img_name, dst_img_hash, dst_dataset_id)['id']
api.annotation.upload(inf_ann_json, dst_img_id)
| true | true |
1c32444a262755d1f0837b4dccf7b218ba34597b | 3,247 | py | Python | python/TypeHandlers.py | rsejaa/oracle-db-examples | 8b91baf801e9a67b357f38f187f103824f70d310 | [
"Apache-2.0"
] | 4 | 2019-10-26T06:21:32.000Z | 2021-02-15T15:28:02.000Z | python/TypeHandlers.py | rsejaa/oracle-db-examples | 8b91baf801e9a67b357f38f187f103824f70d310 | [
"Apache-2.0"
] | null | null | null | python/TypeHandlers.py | rsejaa/oracle-db-examples | 8b91baf801e9a67b357f38f187f103824f70d310 | [
"Apache-2.0"
] | 5 | 2019-10-26T06:21:31.000Z | 2022-03-10T12:47:13.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# TypeHandlers.py
# This script demonstrates the use of input and output type handlers as well
# as variable input and output converters. These methods can be used to extend
# cx_Oracle in many ways. This script demonstrates the binding and querying of
# SQL objects as Python objects.
#
# This script requires cx_Oracle 5.0 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import datetime
import SampleEnv
con = cx_Oracle.connect(SampleEnv.GetMainConnectString())
objType = con.gettype("UDT_BUILDING")
class Building(object):
def __init__(self, buildingId, description, numFloors, dateBuilt):
self.buildingId = buildingId
self.description = description
self.numFloors = numFloors
self.dateBuilt = dateBuilt
def __repr__(self):
return "<Building %s: %s>" % (self.buildingId, self.description)
def BuildingInConverter(value):
obj = objType.newobject()
obj.BUILDINGID = value.buildingId
obj.DESCRIPTION = value.description
obj.NUMFLOORS = value.numFloors
obj.DATEBUILT = value.dateBuilt
return obj
def BuildingOutConverter(obj):
return Building(int(obj.BUILDINGID), obj.DESCRIPTION, int(obj.NUMFLOORS),
obj.DATEBUILT)
def InputTypeHandler(cursor, value, numElements):
if isinstance(value, Building):
return cursor.var(cx_Oracle.OBJECT, arraysize = numElements,
inconverter = BuildingInConverter, typename = objType.name)
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.OBJECT:
return cursor.var(cx_Oracle.OBJECT, arraysize = cursor.arraysize,
outconverter = BuildingOutConverter, typename = objType.name)
buildings = [
Building(1, "The First Building", 5, datetime.date(2007, 5, 18)),
Building(2, "The Second Building", 87, datetime.date(2010, 2, 7)),
Building(3, "The Third Building", 12, datetime.date(2005, 6, 19)),
]
cur = con.cursor()
cur.inputtypehandler = InputTypeHandler
for building in buildings:
try:
cur.execute("insert into TestBuildings values (:1, :2)",
(building.buildingId, building))
except cx_Oracle.DatabaseError as e:
error, = e.args
print("CONTEXT:", error.context)
print("MESSAGE:", error.message)
raise
print("NO OUTPUT TYPE HANDLER:")
for row in cur.execute("select * from TestBuildings order by BuildingId"):
print(row)
print()
cur = con.cursor()
cur.outputtypehandler = OutputTypeHandler
print("WITH OUTPUT TYPE HANDLER:")
for row in cur.execute("select * from TestBuildings order by BuildingId"):
print(row)
print()
| 33.822917 | 79 | 0.642439 |
from __future__ import print_function
import cx_Oracle
import datetime
import SampleEnv
con = cx_Oracle.connect(SampleEnv.GetMainConnectString())
objType = con.gettype("UDT_BUILDING")
class Building(object):
def __init__(self, buildingId, description, numFloors, dateBuilt):
self.buildingId = buildingId
self.description = description
self.numFloors = numFloors
self.dateBuilt = dateBuilt
def __repr__(self):
return "<Building %s: %s>" % (self.buildingId, self.description)
def BuildingInConverter(value):
obj = objType.newobject()
obj.BUILDINGID = value.buildingId
obj.DESCRIPTION = value.description
obj.NUMFLOORS = value.numFloors
obj.DATEBUILT = value.dateBuilt
return obj
def BuildingOutConverter(obj):
return Building(int(obj.BUILDINGID), obj.DESCRIPTION, int(obj.NUMFLOORS),
obj.DATEBUILT)
def InputTypeHandler(cursor, value, numElements):
if isinstance(value, Building):
return cursor.var(cx_Oracle.OBJECT, arraysize = numElements,
inconverter = BuildingInConverter, typename = objType.name)
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.OBJECT:
return cursor.var(cx_Oracle.OBJECT, arraysize = cursor.arraysize,
outconverter = BuildingOutConverter, typename = objType.name)
buildings = [
Building(1, "The First Building", 5, datetime.date(2007, 5, 18)),
Building(2, "The Second Building", 87, datetime.date(2010, 2, 7)),
Building(3, "The Third Building", 12, datetime.date(2005, 6, 19)),
]
cur = con.cursor()
cur.inputtypehandler = InputTypeHandler
for building in buildings:
try:
cur.execute("insert into TestBuildings values (:1, :2)",
(building.buildingId, building))
except cx_Oracle.DatabaseError as e:
error, = e.args
print("CONTEXT:", error.context)
print("MESSAGE:", error.message)
raise
print("NO OUTPUT TYPE HANDLER:")
for row in cur.execute("select * from TestBuildings order by BuildingId"):
print(row)
print()
cur = con.cursor()
cur.outputtypehandler = OutputTypeHandler
print("WITH OUTPUT TYPE HANDLER:")
for row in cur.execute("select * from TestBuildings order by BuildingId"):
print(row)
print()
| true | true |
1c3245ac2a6e13af8b1c8775f8d82b178321ba8f | 10,263 | py | Python | Day33_Transfer_learning_Multiclass_classification/assignment3_pythonformat.py | activatedbonkers/100-Days-of-Code-Challenge | a1a376e5373d8fc5fda5df4004115760aa92bfda | [
"MIT"
] | 8 | 2020-07-12T22:45:30.000Z | 2021-06-01T10:44:28.000Z | Day33_Transfer_learning_Multiclass_classification/assignment3_pythonformat.py | activatedbonkers/100-Days-of-Code-Challenge | a1a376e5373d8fc5fda5df4004115760aa92bfda | [
"MIT"
] | null | null | null | Day33_Transfer_learning_Multiclass_classification/assignment3_pythonformat.py | activatedbonkers/100-Days-of-Code-Challenge | a1a376e5373d8fc5fda5df4004115760aa92bfda | [
"MIT"
] | 6 | 2020-06-29T18:36:27.000Z | 2022-01-16T21:18:03.000Z | #!/usr/bin/env python
# coding: utf-8
# In[28]:
# ATTENTION: Please do not alter any of the provided code in the exercise. Only add your own code where indicated
# ATTENTION: Please do not add or remove any cells in the exercise. The grader will check specific cells based on the cell position.
# ATTENTION: Please use the provided epoch values when training.
# Import all the necessary files!
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
from os import getcwd
# In[29]:
path_inception = f"{getcwd()}/../tmp2/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
# Import the inception model
from tensorflow.keras.applications.inception_v3 import InceptionV3
# Create an instance of the inception model from the local pre-trained weights
local_weights_file = path_inception
pre_trained_model = InceptionV3(
input_shape = (150, 150, 3),
include_top = False,
weights = None)
# Your Code Here
pre_trained_model.load_weights(local_weights_file)
# Make all the layers in the pre-trained model non-trainable
for layer in pre_trained_model.layers:
# Your Code Here
layer.trainable = False
# Print the model summary
pre_trained_model.summary()
# Expected Output is extremely large, but should end with:
#batch_normalization_v1_281 (Bat (None, 3, 3, 192) 576 conv2d_281[0][0]
#__________________________________________________________________________________________________
#activation_273 (Activation) (None, 3, 3, 320) 0 batch_normalization_v1_273[0][0]
#__________________________________________________________________________________________________
#mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_275[0][0]
# activation_276[0][0]
#__________________________________________________________________________________________________
#concatenate_5 (Concatenate) (None, 3, 3, 768) 0 activation_279[0][0]
# activation_280[0][0]
#__________________________________________________________________________________________________
#activation_281 (Activation) (None, 3, 3, 192) 0 batch_normalization_v1_281[0][0]
#__________________________________________________________________________________________________
#mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_273[0][0]
# mixed9_1[0][0]
# concatenate_5[0][0]
# activation_281[0][0]
#==================================================================================================
#Total params: 21,802,784
#Trainable params: 0
#Non-trainable params: 21,802,784
# In[30]:
last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output# Your Code Here
# Expected Output:
# ('last layer output shape: ', (None, 7, 7, 768))
# In[31]:
# Define a Callback class that stops training once accuracy reaches 97.0%
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.97):
print("\nReached 97.0% accuracy so cancelling training!")
self.model.stop_training = True
# In[41]:
from tensorflow.keras.optimizers import RMSprop
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
# Add a fully connected layer with 1,024 hidden units and ReLU activation
x = layers.Dense(1024, activation = 'relu')(x)
# Add a dropout rate of 0.2
x = layers.Dropout(0.2)(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation = 'sigmoid')(x)
model = Model(pre_trained_model.input, x)
model.compile(optimizer = RMSprop(lr=0.0001),
loss = 'binary_crossentropy',
metrics = ['acc'])
model.summary()
# Expected output will be large. Last few lines should be:
# mixed7 (Concatenate) (None, 7, 7, 768) 0 activation_248[0][0]
# activation_251[0][0]
# activation_256[0][0]
# activation_257[0][0]
# __________________________________________________________________________________________________
# flatten_4 (Flatten) (None, 37632) 0 mixed7[0][0]
# __________________________________________________________________________________________________
# dense_8 (Dense) (None, 1024) 38536192 flatten_4[0][0]
# __________________________________________________________________________________________________
# dropout_4 (Dropout) (None, 1024) 0 dense_8[0][0]
# __________________________________________________________________________________________________
# dense_9 (Dense) (None, 1) 1025 dropout_4[0][0]
# ==================================================================================================
# Total params: 47,512,481
# Trainable params: 38,537,217
# Non-trainable params: 8,975,264
# In[42]:
# Get the Horse or Human dataset
path_horse_or_human = f"{getcwd()}/../tmp2/horse-or-human.zip"
# Get the Horse or Human Validation dataset
path_validation_horse_or_human = f"{getcwd()}/../tmp2/validation-horse-or-human.zip"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile
import shutil
shutil.rmtree('/tmp')
local_zip = path_horse_or_human
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/training')
zip_ref.close()
local_zip = path_validation_horse_or_human
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation')
zip_ref.close()
# In[43]:
# Define our example directories and files
train_dir = '/tmp/training'
validation_dir = '/tmp/validation'
train_horses_dir = os.path.join(train_dir, 'horses')
train_humans_dir = os.path.join(train_dir, 'humans')
validation_horses_dir = os.path.join(validation_dir, 'horses')
validation_humans_dir = os.path.join(validation_dir, 'humans')
train_horses_fnames = os.listdir(train_horses_dir)
train_humans_fnames = os.listdir(train_humans_dir)
validation_horses_fnames = os.listdir(validation_horses_dir)
validation_humans_fnames = os.listdir(validation_humans_dir)
print(len(train_horses_fnames))
print(len(train_humans_fnames))
print(len(validation_horses_fnames))
print(len(validation_humans_fnames))
# Expected Output:
# 500
# 527
# 128
# 128
# In[44]:
# Add our data-augmentation parameters to ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
zoom_range = 0.2,
shear_range = 0.2,
horizontal_flip = True
)
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale = 1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size = 20,
class_mode = 'binary',
target_size = (150, 150))
# Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(validation_dir,
batch_size = 20,
class_mode = 'binary',
target_size = (150, 150)
)
# Expected Output:
# Found 1027 images belonging to 2 classes.
# Found 256 images belonging to 2 classes.
# In[48]:
# Run this and see how many epochs it should take before the callback
# fires, and stops training at 97% accuracy
callbacks = myCallback()# Your Code Here
history = model.fit_generator(train_generator,
validation_data = validation_generator,
steps_per_epoch = 100,
epochs = 3,
validation_steps = 50,
callbacks = [callbacks])# Your Code Here (set epochs = 3))
# In[49]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
# # Submission Instructions
# In[ ]:
# Now click the 'Submit Assignment' button above.
# # When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners.
# In[ ]:
get_ipython().run_cell_magic('javascript', '', '<!-- Save the notebook -->\nIPython.notebook.save_checkpoint();')
# In[ ]:
get_ipython().run_cell_magic('javascript', '', 'IPython.notebook.session.delete();\nwindow.onbeforeunload = null\nsetTimeout(function() { window.close(); }, 1000);')
| 36.393617 | 179 | 0.614343 |
import os
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
from os import getcwd
path_inception = f"{getcwd()}/../tmp2/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
from tensorflow.keras.applications.inception_v3 import InceptionV3
local_weights_file = path_inception
pre_trained_model = InceptionV3(
input_shape = (150, 150, 3),
include_top = False,
weights = None)
pre_trained_model.load_weights(local_weights_file)
for layer in pre_trained_model.layers:
layer.trainable = False
pre_trained_model.summary()
last_layer = pre_trained_model.get_layer('mixed7')
print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.97):
print("\nReached 97.0% accuracy so cancelling training!")
self.model.stop_training = True
from tensorflow.keras.optimizers import RMSprop
x = layers.Flatten()(last_output)
x = layers.Dense(1024, activation = 'relu')(x)
x = layers.Dropout(0.2)(x)
x = layers.Dense(1, activation = 'sigmoid')(x)
model = Model(pre_trained_model.input, x)
model.compile(optimizer = RMSprop(lr=0.0001),
loss = 'binary_crossentropy',
metrics = ['acc'])
model.summary()
path_horse_or_human = f"{getcwd()}/../tmp2/horse-or-human.zip"
path_validation_horse_or_human = f"{getcwd()}/../tmp2/validation-horse-or-human.zip"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import zipfile
import shutil
shutil.rmtree('/tmp')
local_zip = path_horse_or_human
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/training')
zip_ref.close()
local_zip = path_validation_horse_or_human
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/validation')
zip_ref.close()
train_dir = '/tmp/training'
validation_dir = '/tmp/validation'
train_horses_dir = os.path.join(train_dir, 'horses')
train_humans_dir = os.path.join(train_dir, 'humans')
validation_horses_dir = os.path.join(validation_dir, 'horses')
validation_humans_dir = os.path.join(validation_dir, 'humans')
train_horses_fnames = os.listdir(train_horses_dir)
train_humans_fnames = os.listdir(train_humans_dir)
validation_horses_fnames = os.listdir(validation_horses_dir)
validation_humans_fnames = os.listdir(validation_humans_dir)
print(len(train_horses_fnames))
print(len(train_humans_fnames))
print(len(validation_horses_fnames))
print(len(validation_humans_fnames))
train_datagen = ImageDataGenerator(rescale = 1./255,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
zoom_range = 0.2,
shear_range = 0.2,
horizontal_flip = True
)
test_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = train_datagen.flow_from_directory(train_dir,
batch_size = 20,
class_mode = 'binary',
target_size = (150, 150))
validation_generator = test_datagen.flow_from_directory(validation_dir,
batch_size = 20,
class_mode = 'binary',
target_size = (150, 150)
)
callbacks = myCallback()
history = model.fit_generator(train_generator,
validation_data = validation_generator,
steps_per_epoch = 100,
epochs = 3,
validation_steps = 50,
callbacks = [callbacks])
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
n.notebook.session.delete();\nwindow.onbeforeunload = null\nsetTimeout(function() { window.close(); }, 1000);')
| true | true |
1c3246003698d71f80462f458c37d5f898f28798 | 2,481 | py | Python | tests/unit/model_selection/test_time_splitter.py | betatim/hcrystalball | 693b9b406f05afa23cfc4647c43260166a7076fe | [
"MIT"
] | null | null | null | tests/unit/model_selection/test_time_splitter.py | betatim/hcrystalball | 693b9b406f05afa23cfc4647c43260166a7076fe | [
"MIT"
] | null | null | null | tests/unit/model_selection/test_time_splitter.py | betatim/hcrystalball | 693b9b406f05afa23cfc4647c43260166a7076fe | [
"MIT"
] | null | null | null | import pytest
import types
import numpy as np
from hcrystalball.model_selection import FinerTimeSplit
@pytest.mark.parametrize(
"ts_data, expected_error",
[
("series", None),
("series_with_NaN", None),
("series_with_Inf", None),
("series_with_name", None),
("series_with_index_name", None),
("dataframe", None),
("dataframe_with_name", None),
("dataframe_with_index_name", None),
("dataframe_multicolumn", None),
("dataframe_integer_index", None),
("empty_dataframe", ValueError),
("empty_series", ValueError),
],
indirect=["ts_data"],
)
def test_cv_finertimesplit_split_pandas_container_data(ts_data, expected_error):
n_splits = 2
horizon = 3
fts = FinerTimeSplit(n_splits=n_splits, horizon=horizon)
if expected_error is None:
result = fts.split(ts_data)
assert isinstance(result, types.GeneratorType)
result = list(result)
assert len(result) == n_splits
for i, isplit in enumerate(result):
assert len(isplit) == 2
assert len(isplit[0]) == len(ts_data) - (n_splits - i) * horizon
assert len(isplit[1]) == horizon
assert np.array_equal(isplit[0], np.arange(len(ts_data) - (n_splits - i) * horizon))
assert np.array_equal(isplit[1], np.arange(horizon) + len(ts_data) - (n_splits - i) * horizon)
else:
with pytest.raises(expected_error):
_ = list(fts.split(ts_data))
@pytest.mark.parametrize(
"test_data, expected_error",
[(np.arange(6), None), ([0, 1, 2, 3, 4, 5], None), ((0, 1, 2, 3, 4, 5), None), (13, TypeError)],
)
def test_cv_finertimesplit_split_input_data_types(test_data, expected_error):
n_splits = 2
horizon = 3
fts = FinerTimeSplit(n_splits=n_splits, horizon=horizon)
if expected_error is None:
result = list(fts.split(test_data))
assert len(result) == n_splits
for i, isplit in enumerate(result):
assert len(isplit) == 2
assert len(isplit[0]) == len(test_data) - (n_splits - i) * horizon
assert len(isplit[1]) == horizon
assert np.array_equal(isplit[0], np.arange(len(test_data) - (n_splits - i) * horizon))
assert np.array_equal(isplit[1], np.arange(horizon) + len(test_data) - (n_splits - i) * horizon,)
else:
with pytest.raises(expected_error):
_ = list(fts.split(test_data))
| 37.029851 | 109 | 0.622733 | import pytest
import types
import numpy as np
from hcrystalball.model_selection import FinerTimeSplit
@pytest.mark.parametrize(
"ts_data, expected_error",
[
("series", None),
("series_with_NaN", None),
("series_with_Inf", None),
("series_with_name", None),
("series_with_index_name", None),
("dataframe", None),
("dataframe_with_name", None),
("dataframe_with_index_name", None),
("dataframe_multicolumn", None),
("dataframe_integer_index", None),
("empty_dataframe", ValueError),
("empty_series", ValueError),
],
indirect=["ts_data"],
)
def test_cv_finertimesplit_split_pandas_container_data(ts_data, expected_error):
n_splits = 2
horizon = 3
fts = FinerTimeSplit(n_splits=n_splits, horizon=horizon)
if expected_error is None:
result = fts.split(ts_data)
assert isinstance(result, types.GeneratorType)
result = list(result)
assert len(result) == n_splits
for i, isplit in enumerate(result):
assert len(isplit) == 2
assert len(isplit[0]) == len(ts_data) - (n_splits - i) * horizon
assert len(isplit[1]) == horizon
assert np.array_equal(isplit[0], np.arange(len(ts_data) - (n_splits - i) * horizon))
assert np.array_equal(isplit[1], np.arange(horizon) + len(ts_data) - (n_splits - i) * horizon)
else:
with pytest.raises(expected_error):
_ = list(fts.split(ts_data))
@pytest.mark.parametrize(
"test_data, expected_error",
[(np.arange(6), None), ([0, 1, 2, 3, 4, 5], None), ((0, 1, 2, 3, 4, 5), None), (13, TypeError)],
)
def test_cv_finertimesplit_split_input_data_types(test_data, expected_error):
n_splits = 2
horizon = 3
fts = FinerTimeSplit(n_splits=n_splits, horizon=horizon)
if expected_error is None:
result = list(fts.split(test_data))
assert len(result) == n_splits
for i, isplit in enumerate(result):
assert len(isplit) == 2
assert len(isplit[0]) == len(test_data) - (n_splits - i) * horizon
assert len(isplit[1]) == horizon
assert np.array_equal(isplit[0], np.arange(len(test_data) - (n_splits - i) * horizon))
assert np.array_equal(isplit[1], np.arange(horizon) + len(test_data) - (n_splits - i) * horizon,)
else:
with pytest.raises(expected_error):
_ = list(fts.split(test_data))
| true | true |
1c324767b609766f14f0328cf7c1effc49be065a | 5,067 | py | Python | geoprisma/core/proxies/proxy.py | groupe-conseil-nutshimit-nippour/django-geoprisma | 4732fdb8a0684eb4d7fd50aa43e11b454ee71d08 | [
"BSD-3-Clause"
] | null | null | null | geoprisma/core/proxies/proxy.py | groupe-conseil-nutshimit-nippour/django-geoprisma | 4732fdb8a0684eb4d7fd50aa43e11b454ee71d08 | [
"BSD-3-Clause"
] | 5 | 2020-02-12T00:23:17.000Z | 2021-12-13T19:46:33.000Z | geoprisma/core/proxies/proxy.py | groupe-conseil-nutshimit-nippour/django-geoprisma | 4732fdb8a0684eb4d7fd50aa43e11b454ee71d08 | [
"BSD-3-Clause"
] | null | null | null | import urllib
import requests
from geoprisma.utils import isAuthorized
from geoprisma.models import Resource, Datastore
from django.contrib.auth.models import User
class Proxy(object):
"""
Class Proxy de base.
Tout les autres proxy herite de cette classe.
"""
CRUD_CREATE = "create"
CRUD_READ = "read"
CRUD_UPDATE = "update"
CRUD_DELETE = "delete"
def __init__(self, service, prequest):
"""
Constructeur
Args:
service: Le service
prequest: La requete
"""
self.m_objService = service
self.m_objRequest = prequest
self.m_objResource = []
#def setResources(self, pobjArrayResources):
# self.m_objResource = pobjArrayResources
def getLayers(self):
pass
def addParam(self, pstrUrl):
"""
Ajoute des parametres a un url.
Args:
pstrUrl: L'url
Returns:
un Url
"""
if '?' in pstrUrl:
return pstrUrl+"&"+urllib.urlencode(self.getRequestParams())
else:
return pstrUrl+"?"+urllib.urlencode(self.getRequestParams())
def getPathInfo(self):
"""
Recupere le pathinfo.
Return:
Le path info
"""
if self.m_objRequest.path_info:
return self.m_objRequest.path_info
return ''
def getRequestParams(self):
"""
Recupere les parametres du querystring.
Returns:
Un tableau contenant les parametres
"""
objArrayParams = {}
for (strKey, strParam) in self.m_objRequest.GET.iteritems():
if strKey[:3] != "osm":
objArrayParams[strKey] = strParam
return objArrayParams
def getResourcesFromRequest(self, pobjConfig, pobjRequest=None):
if pobjRequest:
objRequest = pobjRequest
else:
objRequest = pobjConfig
if objRequest.get('service_slug') is None:
raise Exception("osmservice param is missing")
if objRequest.get('resource_slug') is list:
pass
else:
self.m_objResource.append(Resource.objects.getResource(objRequest.get('resource_slug')))
def validateResourcesFromRequest(self, pobjService=None, pobjArrayResources=None):
"""
Valide si la resources est valide avec le service.
Args:
pobjService: Object service optionnel
pobjArrayResources: Object Resource optionnel
Raise:
Exception: Not Authorized by config - Service
"""
if pobjService is None:
objService = self.m_objService
else:
objService = pobjService
if pobjArrayResources is None:
objArrayResources = self.m_objResource
else:
objArrayResources = pobjArrayResources
for objResource in objArrayResources:
if not objResource.datastores.filter(service=objService):
raise Exception('Not Authorized by Config - Service')
def validateLayersFromRequest(self, pobjService=None, pobjArrayResources=None, pobjArrayLayers=None):
"""
Valide les couches d'une requete.
Args:
pobjService: Object service optionnel
pobjArrayResources: Object resource optionnel
pobjArrayLayers: Object couche optionnel
Raise:
Exception: Not Authorized by Datastore layer
"""
if pobjService is None:
objService = self.m_objService
else:
objService = pobjService
if pobjArrayResources is None:
objArrayResource = self.m_objResource
else:
objArrayResource = pobjArrayResources
if pobjArrayLayers is None:
objArraylayers = self.getLayers()
else:
objArraylayers = pobjArrayLayers
bAllAuthorized = True
for strLayer in objArraylayers:
bIsAuthorized = False
for objResource in self.m_objResource:
objDatastore = objResource.datastores.filter(service=objService)
if objDatastore.filter(layers__contains=strLayer):
bIsAuthorized = True
break
if not bIsAuthorized:
bAllAuthorized = False
break
if not bAllAuthorized:
raise Exception('Not Authorized by Datastore layer')
def areResourcesFromRequestAuthorized(self, pobjArrayResources=None):
if pobjArrayResources is None:
objArrayResource = self.m_objResource
else:
objArrayResource = pobjArrayResources
try:
user = User.objects.get(id=self.m_objRequest.user.pk)
except User.DoesNotExist:
raise Exception("Authentification required")
for objResource in objArrayResource:
if not isAuthorized(user, objResource.name, self.getAction()):
raise Exception("Not Authorized by ACL")
| 29.982249 | 105 | 0.606671 | import urllib
import requests
from geoprisma.utils import isAuthorized
from geoprisma.models import Resource, Datastore
from django.contrib.auth.models import User
class Proxy(object):
CRUD_CREATE = "create"
CRUD_READ = "read"
CRUD_UPDATE = "update"
CRUD_DELETE = "delete"
def __init__(self, service, prequest):
self.m_objService = service
self.m_objRequest = prequest
self.m_objResource = []
def getLayers(self):
pass
def addParam(self, pstrUrl):
if '?' in pstrUrl:
return pstrUrl+"&"+urllib.urlencode(self.getRequestParams())
else:
return pstrUrl+"?"+urllib.urlencode(self.getRequestParams())
def getPathInfo(self):
if self.m_objRequest.path_info:
return self.m_objRequest.path_info
return ''
def getRequestParams(self):
objArrayParams = {}
for (strKey, strParam) in self.m_objRequest.GET.iteritems():
if strKey[:3] != "osm":
objArrayParams[strKey] = strParam
return objArrayParams
def getResourcesFromRequest(self, pobjConfig, pobjRequest=None):
if pobjRequest:
objRequest = pobjRequest
else:
objRequest = pobjConfig
if objRequest.get('service_slug') is None:
raise Exception("osmservice param is missing")
if objRequest.get('resource_slug') is list:
pass
else:
self.m_objResource.append(Resource.objects.getResource(objRequest.get('resource_slug')))
def validateResourcesFromRequest(self, pobjService=None, pobjArrayResources=None):
if pobjService is None:
objService = self.m_objService
else:
objService = pobjService
if pobjArrayResources is None:
objArrayResources = self.m_objResource
else:
objArrayResources = pobjArrayResources
for objResource in objArrayResources:
if not objResource.datastores.filter(service=objService):
raise Exception('Not Authorized by Config - Service')
def validateLayersFromRequest(self, pobjService=None, pobjArrayResources=None, pobjArrayLayers=None):
if pobjService is None:
objService = self.m_objService
else:
objService = pobjService
if pobjArrayResources is None:
objArrayResource = self.m_objResource
else:
objArrayResource = pobjArrayResources
if pobjArrayLayers is None:
objArraylayers = self.getLayers()
else:
objArraylayers = pobjArrayLayers
bAllAuthorized = True
for strLayer in objArraylayers:
bIsAuthorized = False
for objResource in self.m_objResource:
objDatastore = objResource.datastores.filter(service=objService)
if objDatastore.filter(layers__contains=strLayer):
bIsAuthorized = True
break
if not bIsAuthorized:
bAllAuthorized = False
break
if not bAllAuthorized:
raise Exception('Not Authorized by Datastore layer')
def areResourcesFromRequestAuthorized(self, pobjArrayResources=None):
if pobjArrayResources is None:
objArrayResource = self.m_objResource
else:
objArrayResource = pobjArrayResources
try:
user = User.objects.get(id=self.m_objRequest.user.pk)
except User.DoesNotExist:
raise Exception("Authentification required")
for objResource in objArrayResource:
if not isAuthorized(user, objResource.name, self.getAction()):
raise Exception("Not Authorized by ACL")
| true | true |
1c3247a0889f339178ae9f87828ca4db9cc9425f | 2,017 | py | Python | profiler/tests/test_metrics_use_case.py | Hydrospheredata/hydro-profiler | bca177f8d71d9d8c93091f9a9fdf9a337582e194 | [
"Apache-2.0"
] | null | null | null | profiler/tests/test_metrics_use_case.py | Hydrospheredata/hydro-profiler | bca177f8d71d9d8c93091f9a9fdf9a337582e194 | [
"Apache-2.0"
] | 14 | 2021-11-22T18:07:28.000Z | 2022-01-14T08:51:45.000Z | profiler/tests/test_metrics_use_case.py | Hydrospheredata/hydro-profiler | bca177f8d71d9d8c93091f9a9fdf9a337582e194 | [
"Apache-2.0"
] | 1 | 2021-11-19T11:16:50.000Z | 2021-11-19T11:16:50.000Z | from datetime import datetime
import logging
import pandas as pd
from profiler.use_cases.aggregation_use_case import AggregationUseCase
from profiler.use_cases.metrics_use_case import MetricsUseCase
from profiler.use_cases.overall_reports_use_case import OverallReportsUseCase
from profiler.use_cases.report_use_case import ReportUseCase
class TestMetricsUseCase:
def test_x(
self,
caplog,
models_repo,
metrics_use_case,
aggregations_use_case,
reports_use_case,
overall_reports_use_case,
adult_model_wrapper,
):
caplog.set_level(logging.INFO)
agg_uc: AggregationUseCase = aggregations_use_case
metrics_uc: MetricsUseCase = metrics_use_case
reports_uc: ReportUseCase = reports_use_case
overall_uc: OverallReportsUseCase = overall_reports_use_case
model = adult_model_wrapper.model
train_file = pd.read_csv(adult_model_wrapper.get_batch("train.csv"))
batch_file = pd.read_csv(adult_model_wrapper.get_batch("batch_3.csv"))
models_repo.save(model)
metrics_uc.generate_metrics(model, train_file)
metrics = metrics_uc.get_by_model(model)
training_report = reports_uc.generate_report(
model, "training", datetime.now(), train_file
)
reports_uc.save_report(training_report)
report = reports_uc.generate_report(
model, "batch_2", datetime.now(), batch_file
)
reports_uc.save_report(report)
agg_uc.generate_aggregation(report)
agg = agg_uc.get(model.name, model.version)
overall_uc.generate_overall_report(training_report)
overall_uc.generate_overall_report(report)
overall_report = overall_uc.get_report(model.name, model.version, "batch_2")
stat = overall_uc.calculate_batch_stats(model.name, model.version, "batch_2")
assert overall_report
assert metrics
assert report
assert agg
assert stat
| 33.065574 | 85 | 0.71294 | from datetime import datetime
import logging
import pandas as pd
from profiler.use_cases.aggregation_use_case import AggregationUseCase
from profiler.use_cases.metrics_use_case import MetricsUseCase
from profiler.use_cases.overall_reports_use_case import OverallReportsUseCase
from profiler.use_cases.report_use_case import ReportUseCase
class TestMetricsUseCase:
def test_x(
self,
caplog,
models_repo,
metrics_use_case,
aggregations_use_case,
reports_use_case,
overall_reports_use_case,
adult_model_wrapper,
):
caplog.set_level(logging.INFO)
agg_uc: AggregationUseCase = aggregations_use_case
metrics_uc: MetricsUseCase = metrics_use_case
reports_uc: ReportUseCase = reports_use_case
overall_uc: OverallReportsUseCase = overall_reports_use_case
model = adult_model_wrapper.model
train_file = pd.read_csv(adult_model_wrapper.get_batch("train.csv"))
batch_file = pd.read_csv(adult_model_wrapper.get_batch("batch_3.csv"))
models_repo.save(model)
metrics_uc.generate_metrics(model, train_file)
metrics = metrics_uc.get_by_model(model)
training_report = reports_uc.generate_report(
model, "training", datetime.now(), train_file
)
reports_uc.save_report(training_report)
report = reports_uc.generate_report(
model, "batch_2", datetime.now(), batch_file
)
reports_uc.save_report(report)
agg_uc.generate_aggregation(report)
agg = agg_uc.get(model.name, model.version)
overall_uc.generate_overall_report(training_report)
overall_uc.generate_overall_report(report)
overall_report = overall_uc.get_report(model.name, model.version, "batch_2")
stat = overall_uc.calculate_batch_stats(model.name, model.version, "batch_2")
assert overall_report
assert metrics
assert report
assert agg
assert stat
| true | true |
1c324880a6a23d41039b545031dffb2f396c642f | 640 | py | Python | pokebot/bots/utils.py | nacharya114/pokebot | b9028c86c5ee58178f348c75c39225f7b55507aa | [
"MIT"
] | 1 | 2020-05-20T04:52:24.000Z | 2020-05-20T04:52:24.000Z | pokebot/bots/utils.py | nacharya114/pokebot | b9028c86c5ee58178f348c75c39225f7b55507aa | [
"MIT"
] | null | null | null | pokebot/bots/utils.py | nacharya114/pokebot | b9028c86c5ee58178f348c75c39225f7b55507aa | [
"MIT"
] | null | null | null | #########################
# Author: Neil Acharya
#
# Helper Functions
#########################
def teampreview_performance(mon_a, mon_b):
# We evaluate the performance on mon_a against mon_b as its type advantage
a_on_b = b_on_a = -np.inf
for type_ in mon_a.types:
if type_:
a_on_b = max(a_on_b, type_.damage_multiplier(*mon_b.types))
# We do the same for mon_b over mon_a
for type_ in mon_b.types:
if type_:
b_on_a = max(b_on_a, type_.damage_multiplier(*mon_a.types))
# Our performance metric is the different between the two
return a_on_b - b_on_a
| 33.684211 | 79 | 0.604688 | true | true | |
1c3248f3bf60c84c3eccf88a80508685e532ac42 | 110,825 | py | Python | python/pyarrow/tests/test_parquet.py | maxburke/arrow | 344ed4bed675c4913db5cc7b17d0e6cc57ea55c4 | [
"Apache-2.0"
] | 1 | 2019-12-27T14:15:07.000Z | 2019-12-27T14:15:07.000Z | python/pyarrow/tests/test_parquet.py | maxburke/arrow | 344ed4bed675c4913db5cc7b17d0e6cc57ea55c4 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_parquet.py | maxburke/arrow | 344ed4bed675c4913db5cc7b17d0e6cc57ea55c4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import datetime
import decimal
import io
import json
import os
import six
import pickle
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.compat import guid, u, BytesIO, unichar, PY2
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.filesystem import LocalFileSystem, FileSystem
try:
import pyarrow.parquet as pq
except ImportError:
pq = None
try:
import pandas as pd
import pandas.util.testing as tm
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
except ImportError:
pd = tm = None
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not parquet'
pytestmark = pytest.mark.parquet
@pytest.fixture(scope='module')
def datadir(datadir):
return datadir / 'parquet'
def _write_table(table, path, **kwargs):
# So we see the ImportError somewhere
import pyarrow.parquet as pq
if _pandas_api.is_data_frame(table):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
return pq.read_table(*args, **kwargs)
def _roundtrip_table(table, read_table_kwargs=None,
write_table_kwargs=None):
read_table_kwargs = read_table_kwargs or {}
write_table_kwargs = write_table_kwargs or {}
buf = io.BytesIO()
_write_table(table, buf, **write_table_kwargs)
buf.seek(0)
return _read_table(buf, **read_table_kwargs)
def _check_roundtrip(table, expected=None, read_table_kwargs=None,
**write_table_kwargs):
if expected is None:
expected = table
read_table_kwargs = read_table_kwargs or {}
# intentionally check twice
result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
def _roundtrip_pandas_dataframe(df, write_kwargs):
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, **write_kwargs)
buf.seek(0)
table1 = _read_table(buf)
return table1.to_pandas()
@pytest.mark.parametrize('dtype', [int, float])
def test_single_pylist_column_roundtrip(tempdir, dtype):
filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=['a'])
_write_table(table, filename)
table_read = _read_table(filename)
for i in range(table.num_columns):
col_written = table[i]
col_read = table_read[i]
assert table.field(i).name == table_read.field(i).name
assert col_read.num_chunks == 1
data_written = col_written.chunk(0)
data_read = col_read.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0, categorical=False):
np.random.seed(seed)
arrays = {
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Test other timestamp resolutions now that arrow supports
# them
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': pd.Series([str(x) for x in range(size)]),
'empty_str': [''] * size,
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'null': [None] * size,
'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],
}
if categorical:
arrays['str_category'] = arrays['str'].astype('category')
return pd.DataFrame(arrays)
@pytest.mark.pandas
@pytest.mark.parametrize('chunk_size', [None, 1000])
def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):
df = alltypes_sample(size=10000, categorical=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version="2.0",
coerce_timestamps='ms', chunk_size=chunk_size)
table_read = pq.read_pandas(filename)
assert table_read.schema.pandas_metadata is not None
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def test_set_data_page_size():
arr = pa.array([1, 2, 3] * 1000000)
t = pa.Table.from_arrays([arr], names=['f0'])
# 128K, 256K, 512K
page_sizes = [2 << 16, 2 << 17, 2 << 18]
for target_page_size in page_sizes:
_check_roundtrip(t, data_page_size=target_page_size)
@pytest.mark.pandas
def test_chunked_table_write():
# ARROW-232
df = alltypes_sample(size=10)
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
df, _ = dataframe_with_lists()
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_memory_map(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'memory_map': True},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, memory_map=True)
assert table_read.equals(table)
@pytest.mark.pandas
def test_enable_buffered_stream(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'buffer_size': 1025},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, buffer_size=4096)
assert table_read.equals(table)
def test_special_chars_filename(tempdir):
table = pa.Table.from_arrays([pa.array([42])], ["ints"])
filename = "foo # bar"
path = tempdir / filename
assert not path.exists()
_write_table(table, str(path))
assert path.exists()
table_read = _read_table(str(path))
assert table_read.equals(table)
@pytest.mark.pandas
def test_empty_table_roundtrip():
df = alltypes_sample(size=10)
# Create a non-empty table to infer the types correctly, then slice to 0
table = pa.Table.from_pandas(df)
table = pa.Table.from_arrays(
[col.chunk(0)[:0] for col in table.itercolumns()],
names=table.schema.names)
assert table.schema.field('null').type == pa.null()
assert table.schema.field('null_list').type == pa.list_(pa.null())
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_empty_table_no_columns():
df = pd.DataFrame()
empty = pa.Table.from_pandas(df, preserve_index=False)
_check_roundtrip(empty)
def test_empty_lists_table_roundtrip():
# ARROW-2744: Shouldn't crash when writing an array of empty lists
arr = pa.array([[], []], type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr], ["A"])
_check_roundtrip(table)
def test_nested_list_nonnullable_roundtrip_bug():
# Reproduce failure in ARROW-5630
typ = pa.list_(pa.field("item", pa.float32(), False))
num_rows = 10000
t = pa.table([
pa.array(([[0] * ((i + 5) % 10) for i in range(0, 10)]
* (num_rows // 10)), type=typ)
], ['a'])
_check_roundtrip(t, data_page_size=4096)
@pytest.mark.pandas
def test_pandas_parquet_datetime_tz():
s = pd.Series([datetime.datetime(2017, 9, 6)])
s = s.dt.tz_localize('utc')
s.index = s
# Both a column and an index to hit both use cases
df = pd.DataFrame({'tz_aware': s,
'tz_eastern': s.dt.tz_convert('US/Eastern')},
index=s)
f = BytesIO()
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, f, coerce_timestamps='ms')
f.seek(0)
table_read = pq.read_pandas(f)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since '
'python version 3.2')
def test_datetime_timezone_tzinfo():
value = datetime.datetime(2018, 1, 1, 1, 23, 45,
tzinfo=datetime.timezone.utc)
df = pd.DataFrame({'foo': [value]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_pandas_parquet_column_multiindex(tempdir):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_1_0_roundtrip(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename, version='1.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_multiple_path_types(tempdir):
# Test compatibility with PEP 519 path-like objects
path = tempdir / 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# Test compatibility with plain string paths
path = str(tempdir) + 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_column_selection(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(filename, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(filename, columns=['uint8', 'uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@pytest.mark.pandas
def test_pandas_parquet_native_file_roundtrip(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_parquet_incremental_file_build(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_read_pandas_column_subset(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
def test_pandas_parquet_empty_roundtrip(tempdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_pyfile_roundtrip(tempdir):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_configuration_options(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.0',
use_dictionary=use_dictionary)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.0',
write_statistics=write_statistics)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
_write_table(arrow_table, filename, version='2.0',
compression=compression)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(table_or_df):
if isinstance(table_or_df, pa.Table):
a_table = table_or_df
else:
a_table = pa.Table.from_pandas(table_or_df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
def test_compression_level():
arr = pa.array(list(map(int, range(1000))))
data = [arr, arr]
table = pa.Table.from_arrays(data, names=['a', 'b'])
# Check one compression level.
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=1)
# Check another one to make sure that compression_level=1 does not
# coincide with the default one in Arrow.
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=5)
# Check that the user can provide a compression level per column
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=[{'a': 2, 'b': 3}])
# Check that specifying a compression level for a codec which does allow
# specifying one, results into an error.
# Uncompressed, snappy, lz4 and lzo do not support specifying a compression
# level.
# GZIP (zlib) allows for specifying a compression level but as of up
# to version 1.2.11 the valid range is [-1, 9].
invalid_combinations = [("snappy", 4), ("lz4", 5), ("gzip", -1337),
("None", 444), ("lzo", 14)]
buf = io.BytesIO()
for (codec, level) in invalid_combinations:
with pytest.raises(IOError):
_write_table(table, buf, compression=codec,
compression_level=level)
@pytest.mark.pandas
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
df.index = np.random.randint(0, 1000000, size=len(df))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
assert isinstance(meta.serialized_size, int)
assert isinstance(meta.metadata, dict)
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.converted_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
for rg in range(meta.num_row_groups):
rg_meta = meta.row_group(rg)
assert isinstance(rg_meta, pq.RowGroupMetaData)
repr(rg_meta)
for col in range(rg_meta.num_columns):
col_meta = rg_meta.column(col)
assert isinstance(col_meta, pq.ColumnChunkMetaData)
repr(col_meta)
with pytest.raises(IndexError):
meta.row_group(-1)
with pytest.raises(IndexError):
meta.row_group(meta.num_row_groups + 1)
rg_meta = meta.row_group(0)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
assert rg_meta.total_byte_size > 0
with pytest.raises(IndexError):
col_meta = rg_meta.column(-1)
with pytest.raises(IndexError):
col_meta = rg_meta.column(ncols + 2)
col_meta = rg_meta.column(0)
assert col_meta.file_offset > 0
assert col_meta.file_path == '' # created from BytesIO
assert col_meta.physical_type == 'BOOLEAN'
assert col_meta.num_values == 10000
assert col_meta.path_in_schema == 'bool'
assert col_meta.is_stats_set is True
assert isinstance(col_meta.statistics, pq.Statistics)
assert col_meta.compression == 'SNAPPY'
assert col_meta.encodings == ('PLAIN', 'RLE')
assert col_meta.has_dictionary_page is False
assert col_meta.dictionary_page_offset is None
assert col_meta.data_page_offset > 0
assert col_meta.total_compressed_size > 0
assert col_meta.total_uncompressed_size > 0
with pytest.raises(NotImplementedError):
col_meta.has_index_page
with pytest.raises(NotImplementedError):
col_meta.index_page_offset
def test_parquet_metadata_lifetime(tempdir):
# ARROW-6642 - ensure that chained access keeps parent objects alive
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, tempdir / 'test_metadata_segfault.parquet')
dataset = pq.ParquetDataset(tempdir / 'test_metadata_segfault.parquet')
dataset.pieces[0].get_metadata().row_group(0).column(0).statistics
@pytest.mark.pandas
@pytest.mark.parametrize(
(
'data',
'type',
'physical_type',
'min_value',
'max_value',
'null_count',
'num_values',
'distinct_count'
),
[
([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float32(),
'FLOAT', -1.1, 4.4, 1, 4, 0
),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float64(),
'DOUBLE', -1.1, 4.4, 1, 4, 0
),
(
[u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),
'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0
),
(
[True, False, False, True, True], pa.bool_(),
'BOOLEAN', False, True, 0, 5, 0
),
(
[b'\x00', b'b', b'12', None, b'aaa'], pa.binary(),
'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0
),
]
)
def test_parquet_column_statistics_api(data, type, physical_type, min_value,
max_value, null_count, num_values,
distinct_count):
df = pd.DataFrame({'data': data})
schema = pa.schema([pa.field('data', type)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
fileh = make_sample_file(table)
meta = fileh.metadata
rg_meta = meta.row_group(0)
col_meta = rg_meta.column(0)
stat = col_meta.statistics
assert stat.has_min_max
assert _close(type, stat.min, min_value)
assert _close(type, stat.max, max_value)
assert stat.null_count == null_count
assert stat.num_values == num_values
# TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount
# method, missing distinct_count is represented as zero instead of None
assert stat.distinct_count == distinct_count
assert stat.physical_type == physical_type
# ARROW-6339
@pytest.mark.pandas
def test_parquet_raise_on_unset_statistics():
df = pd.DataFrame({"t": pd.Series([pd.NaT], dtype="datetime64[ns]")})
meta = make_sample_file(pa.Table.from_pandas(df)).metadata
assert not meta.row_group(0).column(0).statistics.has_min_max
assert meta.row_group(0).column(0).statistics.max is None
def _close(type, left, right):
if type == pa.float32():
return abs(left - right) < 1E-7
elif type == pa.float64():
return abs(left - right) < 1E-13
else:
return left == right
def test_statistics_convert_logical_types(tempdir):
# ARROW-5166, ARROW-4139
# (min, max, type)
cases = [(10, 11164359321221007157, pa.uint64()),
(10, 4294967295, pa.uint32()),
(u"ähnlich", u"öffentlich", pa.utf8()),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time32('ms')),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time64('us')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('ms')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('us'))]
for i, (min_val, max_val, typ) in enumerate(cases):
t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],
['col'])
path = str(tempdir / ('example{}.parquet'.format(i)))
pq.write_table(t, path, version='2.0')
pf = pq.ParquetFile(path)
stats = pf.metadata.row_group(0).column(0).statistics
assert stats.min == min_val
assert stats.max == max_val
def test_parquet_write_disable_statistics(tempdir):
table = pa.Table.from_pydict(
{'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})
_write_table(table, tempdir / 'data.parquet')
meta = pq.read_metadata(tempdir / 'data.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is True
assert cc.statistics is not None
_write_table(table, tempdir / 'data2.parquet', write_statistics=False)
meta = pq.read_metadata(tempdir / 'data2.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is False
assert cc.statistics is None
_write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])
meta = pq.read_metadata(tempdir / 'data3.parquet')
cc_a = meta.row_group(0).column(0)
assert cc_a.is_stats_set is True
assert cc_a.statistics is not None
cc_b = meta.row_group(0).column(1)
assert cc_b.is_stats_set is False
assert cc_b.statistics is None
@pytest.mark.pandas
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
# ParquetSchema
assert isinstance(fileh.schema, pq.ParquetSchema)
assert fileh.schema.equals(fileh.schema)
assert fileh.schema == fileh.schema
assert fileh.schema.equals(fileh2.schema)
assert fileh.schema == fileh2.schema
assert fileh.schema != 'arbitrary object'
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema != fileh3.schema
# ColumnSchema
assert isinstance(fileh.schema[0], pq.ColumnSchema)
assert fileh.schema[0].equals(fileh.schema[0])
assert fileh.schema[0] == fileh.schema[0]
assert not fileh.schema[0].equals(fileh.schema[1])
assert fileh.schema[0] != fileh.schema[1]
assert fileh.schema[0] != 'arbitrary object'
def test_validate_schema_write_table(tempdir):
# ARROW-2926
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
# simple_table schema does not match simple_schema
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.0',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
def test_column_of_arrays(tempdir):
df, schema = dataframe_with_arrays()
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_coerce_timestamps(tempdir):
from collections import OrderedDict
# ARROW-622
arrays = OrderedDict()
fields = [pa.field('datetime64',
pa.list_(pa.timestamp('ms')))]
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='us')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename, version='2.0',
coerce_timestamps='unknown')
@pytest.mark.pandas
def test_coerce_timestamps_truncated(tempdir):
"""
ARROW-2555: Test that we can truncate timestamps when coercing if
explicitly allowed.
"""
dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1, microsecond=1)
dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1)
fields_us = [pa.field('datetime64', pa.timestamp('us'))]
arrays_us = {'datetime64': [dt_us, dt_ms]}
df_us = pd.DataFrame(arrays_us)
schema_us = pa.schema(fields_us)
filename = tempdir / 'pandas_truncated.parquet'
table_us = pa.Table.from_pandas(df_us, schema=schema_us)
_write_table(table_us, filename, version="2.0", coerce_timestamps='ms',
allow_truncated_timestamps=True)
table_ms = _read_table(filename)
df_ms = table_ms.to_pandas()
arrays_expected = {'datetime64': [dt_ms, dt_ms]}
df_expected = pd.DataFrame(arrays_expected)
tm.assert_frame_equal(df_expected, df_ms)
@pytest.mark.pandas
def test_column_of_lists(tempdir):
df, schema = dataframe_with_lists(parquet_compatible=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version='2.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
if PY2:
# assert_frame_equal fails when comparing datetime.date and
# np.datetime64, even with check_datetimelike_compat=True so
# convert the values to np.datetime64 instead
for col in ['date32[day]_list', 'date64[ms]_list']:
df[col] = df[col].apply(
lambda x: list(map(np.datetime64, x)) if x else x
)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_date_time_types(tempdir):
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.array(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.array(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.array(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.array(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.array(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.array(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.array(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.array(data7, type=t7)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
# date64 as date32
# time32[s] to time32[ms]
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
t0 = pa.timestamp('ms')
data0 = np.arange(4, dtype='int64')
a0 = pa.array(data0, type=t0)
t1 = pa.timestamp('us')
data1 = np.arange(4, dtype='int64')
a1 = pa.array(data1, type=t1)
t2 = pa.timestamp('ns')
data2 = np.arange(4, dtype='int64')
a2 = pa.array(data2, type=t2)
table = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
expected = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int64 for all timestamps supported by default
filename = tempdir / 'int64_timestamps.parquet'
_write_table(table, filename, version='2.0')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT64'
read_table = _read_table(filename)
assert read_table.equals(expected)
t0_ns = pa.timestamp('ns')
data0_ns = np.array(data0 * 1000000, dtype='int64')
a0_ns = pa.array(data0_ns, type=t0_ns)
t1_ns = pa.timestamp('ns')
data1_ns = np.array(data1 * 1000, dtype='int64')
a1_ns = pa.array(data1_ns, type=t1_ns)
expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
# int96 nanosecond timestamps produced upon request
filename = tempdir / 'explicit_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
use_deprecated_int96_timestamps=True)
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
# int96 nanosecond timestamps implied by flavor 'spark'
filename = tempdir / 'spark_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
flavor='spark')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
def test_timestamp_restore_timezone():
# ARROW-5888, restore timezone from serialized metadata
ty = pa.timestamp('ms', tz='America/New_York')
arr = pa.array([1, 2, 3], type=ty)
t = pa.table([arr], names=['f0'])
_check_roundtrip(t)
@pytest.mark.pandas
def test_list_of_datetime_time_roundtrip():
# ARROW-4135
times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',
'11:30', '12:00'])
df = pd.DataFrame({'time': [times.time]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_parquet_version_timestamp_differences():
i_s = pd.Timestamp('2010-01-01').value / 1000000000 # := 1262304000
d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')
d_ms = d_s * 1000
d_us = d_ms * 1000
d_ns = d_us * 1000
a_s = pa.array(d_s, type=pa.timestamp('s'))
a_ms = pa.array(d_ms, type=pa.timestamp('ms'))
a_us = pa.array(d_us, type=pa.timestamp('us'))
a_ns = pa.array(d_ns, type=pa.timestamp('ns'))
names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']
table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)
# Using Parquet version 1.0, seconds should be coerced to milliseconds
# and nanoseconds should be coerced to microseconds by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)
_check_roundtrip(table, expected)
# Using Parquet version 2.0, seconds should be coerced to milliseconds
# and nanoseconds should be retained by default
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)
_check_roundtrip(table, expected, version='2.0')
# Using Parquet version 1.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)
_check_roundtrip(table, expected, coerce_timestamps='ms')
# Using Parquet version 2.0, coercing to milliseconds or microseconds
# is allowed
expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)
_check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')
# TODO: after pyarrow allows coerce_timestamps='ns', tests like the
# following should pass ...
# Using Parquet version 1.0, coercing to nanoseconds is not allowed
# expected = None
# with pytest.raises(NotImplementedError):
# _roundtrip_table(table, coerce_timestamps='ns')
# Using Parquet version 2.0, coercing to nanoseconds is allowed
# expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
# _check_roundtrip(table, expected, version='2.0', coerce_timestamps='ns')
# For either Parquet version, coercing to nanoseconds is allowed
# if Int96 storage is used
expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
_check_roundtrip(table, expected,
use_deprecated_int96_timestamps=True)
_check_roundtrip(table, expected, version='2.0',
use_deprecated_int96_timestamps=True)
def test_large_list_records():
# This was fixed in PARQUET-1100
list_lengths = np.random.randint(0, 500, size=50)
list_lengths[::10] = 0
list_values = [list(map(int, np.random.randint(0, 100, size=x)))
if i % 8 else None
for i, x in enumerate(list_lengths)]
a1 = pa.array(list_values)
table = pa.Table.from_arrays([a1], ['int_lists'])
_check_roundtrip(table)
def test_sanitized_spark_field_names():
a0 = pa.array([0, 1, 2, 3, 4])
name = 'prohib; ,\t{}'
table = pa.Table.from_arrays([a0], [name])
result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})
expected_name = 'prohib______'
assert result.schema[0].name == expected_name
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
@pytest.mark.pandas
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, use_threads=True)
buf.seek(0)
table2 = _read_table(buf, use_threads=False)
assert table1.equals(table2)
@pytest.mark.pandas
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@pytest.mark.pandas
def test_pass_separate_metadata():
# ARROW-471
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@pytest.mark.pandas
def test_read_single_row_group():
# ARROW-471
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_single_row_group_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_read_multiple_row_groups():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
result = pf.read_row_groups(range(K))
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_multiple_row_groups_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
result = pf.read_row_groups(range(K), columns=cols)
tm.assert_frame_equal(df[cols], result.to_pandas())
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
result = pf.read_row_groups(range(K), columns=cols + cols)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_scan_contents():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.scan_contents() == 10000
assert pf.scan_contents(df.columns[:4]) == 10000
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table == table1
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@pytest.mark.pandas
def test_read_partitioned_directory(tempdir):
fs = LocalFileSystem.get_instance()
_partition_test_for_filesystem(fs, tempdir)
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
def test_equivalency(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
# Old filters syntax:
# integer == 1 AND string != b AND boolean == True
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', True)]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
# filters in disjunctive normal form:
# (integer == 1 AND string != b AND boolean == True) OR
# (integer == 2 AND boolean == False)
# TODO(ARROW-3388): boolean columns are reconstructed as string
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
# Check that all rows in the DF fulfill the filter
# Pandas 0.23.x has problems with indexing constant memoryviews in
# categoricals. Thus we need to make an explicity copy here with np.array.
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
# Check for \0 in predicate values. Until they are correctly implemented
# in ARROW-3391, they would otherwise lead to weird results with the
# current code.
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', u'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
@pytest.mark.pandas
def test_cutoff_exclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@pytest.mark.xfail(
raises=TypeError,
reason='Loss of type information in creation of categoricals.'
)
def test_cutoff_exclusive_datetime(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
def test_inclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
def test_inclusive_set(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),
('boolean', 'in', {True})]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
def test_invalid_pred_op(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '=<', 3),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', 'in', set()),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '!=', {3}),
])
@pytest.mark.pandas
def test_filters_read_table(tempdir):
# test that filters keyword is passed through in read_table
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)])
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]])
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)])
assert table.num_rows == 3
@pytest.fixture
def s3_bucket(request, minio_server):
boto3 = pytest.importorskip('boto3')
botocore = pytest.importorskip('botocore')
address, access_key, secret_key = minio_server
s3 = boto3.resource(
's3',
endpoint_url='http://{}'.format(address),
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
config=botocore.client.Config(signature_version='s3v4'),
region_name='us-east-1'
)
bucket = s3.Bucket('test-s3fs')
bucket.create()
return 'test-s3fs'
@pytest.fixture
def s3_example(minio_server, s3_bucket):
s3fs = pytest.importorskip('s3fs')
address, access_key, secret_key = minio_server
fs = s3fs.S3FileSystem(
key=access_key,
secret=secret_key,
client_kwargs={
'endpoint_url': 'http://{}'.format(address)
}
)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(s3_bucket, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.pandas
@pytest.mark.s3
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
# Check that we can auto-wrap
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
# partition_spec : list of lists, e.g. [['foo', [0, 1, 2],
# ['bar', ['a', 'b', 'c']]
# part_table : a pyarrow.Table to write to each partition
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = fs._path_join(
str(base_dir),
'{0}={1}'.format(name, value)
)
fs.mkdir(level_dir)
if level == DEPTH - 1:
# Generate example data
file_path = fs._path_join(level_dir, guid())
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
file_success = fs._path_join(level_dir, '_SUCCESS')
with fs.open(file_success, 'wb') as f:
pass
else:
_visit_level(level_dir, level + 1, this_part_keys)
file_success = fs._path_join(level_dir, '_SUCCESS')
with fs.open(file_success, 'wb') as f:
pass
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
# handle list of one directory
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
@pytest.mark.pandas
def test_read_schema(tempdir):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'test.parquet'
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
read1 = pq.read_schema(data_path)
read2 = pq.read_schema(data_path, memory_map=True)
assert table.schema.equals(read1, check_metadata=False)
assert table.schema.equals(read2, check_metadata=False)
assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
# to avoid pandas warning
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@pytest.mark.pandas
def test_read_multiple_files(tempdir):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pa.localfs.read_parquet(dirpath, columns=col_names)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, use_threads=True)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
def test_dataset_read_pandas(tempdir):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_dataset_memory_map(tempdir):
# ARROW-2627: Check that we can use ParquetDataset with memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
dataset = pq.ParquetDataset(dirpath, memory_map=True)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
def test_dataset_enable_buffered_stream(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
with pytest.raises(ValueError):
pq.ParquetDataset(dirpath, buffer_size=-64)
for buffer_size in [128, 1024]:
dataset = pq.ParquetDataset(dirpath, buffer_size=buffer_size)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize('preserve_index', [True, False, None])
def test_dataset_read_pandas_common_metadata(tempdir, preserve_index):
# ARROW-1103
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df, preserve_index=preserve_index)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(
df, preserve_index=preserve_index
)
pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
expected.index.name = (
df.index.name if preserve_index is not False else None)
tm.assert_frame_equal(result, expected)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
@pytest.mark.pandas
def test_ignore_private_directories(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '_impala_staging').mkdir()
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_dot(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_underscore(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_multiindex_duplicate_values(tempdir):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
def test_write_error_deletes_incomplete_file(tempdir):
# ARROW-1285
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3, freq='ns')})
pdf = pa.Table.from_pandas(df)
filename = tempdir / 'tmp_file'
try:
_write_table(pdf, filename)
except pa.ArrowException:
pass
assert not filename.exists()
@pytest.mark.pandas
def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# ARROW-1957: the Parquet version 2.0 writer preserves Arrow
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.date_range('2017-01-01', freq='1n', periods=n))
tb = pa.Table.from_pandas(df)
filename = tempdir / 'written.parquet'
try:
pq.write_table(tb, filename, version='2.0')
except Exception:
pass
assert filename.exists()
recovered_table = pq.read_table(filename)
assert tb.equals(recovered_table)
# Loss of data thru coercion (without explicit override) still an error
filename = tempdir / 'not_written.parquet'
with pytest.raises(ValueError):
pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')
def test_read_non_existent_file(tempdir):
path = 'non-existent-file.parquet'
try:
pq.read_table(path)
except Exception as e:
assert path in e.args[0]
def test_read_table_doesnt_warn(datadir):
with pytest.warns(None) as record:
pq.read_table(datadir / 'v0.7.1.parquet')
assert len(record) == 0
def _test_write_to_dataset_with_partitions(base_path,
filesystem=None,
schema=None,
index_name=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem)
metadata_path = os.path.join(base_path, '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
dataset_cols = set(dataset.schema.to_arrow_schema().names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
# Partitioned columns become 'categorical' dtypes
input_df = input_df[cols]
for col in partition_by:
output_df[col] = output_df[col].astype('category')
assert output_df.equals(input_df)
def _test_write_to_dataset_no_partitions(base_path, filesystem=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem.get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(base_path)
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(base_path,
filesystem=filesystem).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_schema(tempdir):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(str(tempdir), schema=schema)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_index_name(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir),
index_name='index_name')
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(tempdir):
_test_write_to_dataset_no_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{0}-{1}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.large_memory
def test_large_table_int32_overflow():
size = np.iinfo('int32').max + 1
arr = np.ones(size, dtype='uint8')
parr = pa.array(arr, type=pa.uint8())
table = pa.Table.from_arrays([parr], names=['one'])
f = io.BytesIO()
_write_table(table, f)
def _simple_table_roundtrip(table, **write_kwargs):
stream = pa.BufferOutputStream()
_write_table(table, stream, **write_kwargs)
buf = stream.getvalue()
return _read_table(buf)
@pytest.mark.large_memory
def test_byte_array_exactly_2gb():
# Test edge case reported in ARROW-3762
val = b'x' * (1 << 10)
base = pa.array([val] * ((1 << 21) - 1))
cases = [
[b'x' * 1023], # 2^31 - 1
[b'x' * 1024], # 2^31
[b'x' * 1025] # 2^31 + 1
]
for case in cases:
values = pa.chunked_array([base, pa.array(case)])
t = pa.table([values], names=['f0'])
result = _simple_table_roundtrip(t, use_dictionary=False)
assert t.equals(result)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_binary_array_overflow_to_chunked():
# ARROW-3762
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
tbl = pa.Table.from_pandas(df, preserve_index=False)
read_tbl = _simple_table_roundtrip(tbl)
col0_data = read_tbl[0]
assert isinstance(col0_data, pa.ChunkedArray)
# Split up into 2GB chunks
assert col0_data.num_chunks == 2
assert tbl.equals(read_tbl)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_list_of_binary_large_cell():
# ARROW-4688
data = []
# TODO(wesm): handle chunked children
# 2^31 - 1 bytes in a single cell
# data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])
# A little under 2GB in cell each containing approximately 10MB each
data.extend([[b'x' * 1000000] * 10] * 214)
arr = pa.array(data)
table = pa.Table.from_arrays([arr], ['chunky_cells'])
read_table = _simple_table_roundtrip(table)
assert table.equals(read_table)
@pytest.mark.pandas
def test_index_column_name_duplicate(tempdir):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
def test_parquet_nested_convenience(tempdir):
# ARROW-1684
df = pd.DataFrame({
'a': [[1, 2, 3], None, [4, 5], []],
'b': [[1.], None, None, [6., 7.]],
})
path = str(tempdir / 'nested_convenience.parquet')
table = pa.Table.from_pandas(df, preserve_index=False)
_write_table(table, path)
read = pq.read_table(path, columns=['a'])
tm.assert_frame_equal(read.to_pandas(), df[['a']])
read = pq.read_table(path, columns=['a', 'b'])
tm.assert_frame_equal(read.to_pandas(), df)
@pytest.mark.pandas
def test_backwards_compatible_index_naming(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(datadir / 'v0.7.1.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_some_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_column_metadata_handling(datadir):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(path, columns=['a'])
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem.get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
@pytest.mark.pandas
@pytest.mark.parametrize('pickler', [
pytest.param(pickle, id='builtin'),
pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')
])
def test_pickle_dataset(tempdir, datadir, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
dataset = _make_dataset_for_pickling(tempdir)
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset.pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_decimal_roundtrip(tempdir):
num_values = 10
columns = {}
for precision in range(1, 39):
for scale in range(0, precision + 1):
with util.random_seed(0):
random_decimal_values = [
util.randdecimal(precision, scale)
for _ in range(num_values)
]
column_name = ('dec_precision_{:d}_scale_{:d}'
.format(precision, scale))
columns[column_name] = random_decimal_values
expected = pd.DataFrame(columns)
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
table = pa.Table.from_pandas(expected)
_write_table(table, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@pytest.mark.xfail(
raises=pa.ArrowException, reason='Parquet does not support negative scale'
)
def test_decimal_roundtrip_negative_scale(tempdir):
expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
t = pa.Table.from_pandas(expected)
_write_table(t, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj_with_exception(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_zlib_compression_bug():
# ARROW-3514: "zlib deflate failed, output buffer too small"
table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])
f = io.BytesIO()
pq.write_table(table, f, compression='gzip')
f.seek(0)
roundtrip = pq.read_table(f)
tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema)
assert table1.schema.equals(table2.schema, check_metadata=False)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
def test_empty_row_groups(tempdir):
# ARROW-3020
table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])
path = tempdir / 'empty_row_groups.parquet'
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
for i in range(num_groups):
assert reader.read_row_group(i).equals(table)
@pytest.mark.pandas
def test_parquet_writer_with_caller_provided_filesystem():
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(pa.BufferReader(buf))
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
def test_writing_empty_lists():
# ARROW-2591: [Python] Segmentation fault issue in pq.write_table
arr1 = pa.array([[], []], pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr1], ['list(int32)'])
_check_roundtrip(table)
def test_write_nested_zero_length_array_chunk_failure():
# Bug report in ARROW-3792
cols = OrderedDict(
int32=pa.int32(),
list_string=pa.list_(pa.string())
)
data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]
# This produces a table with a column like
# <Column name='list_string' type=ListType(list<item: string>)>
# [
# [],
# [
# [
# "G"
# ]
# ]
# ]
#
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, schema=pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl)
@pytest.mark.pandas
def test_partitioned_dataset(tempdir):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(path).read()
pq.write_table(table, path / "output.parquet")
def test_read_column_invalid_index():
table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])],
names=['ints', 'strs'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
f = pq.ParquetFile(bio.getvalue())
assert f.reader.read_column(0).to_pylist() == [4, 5]
assert f.reader.read_column(1).to_pylist() == ["foo", "bar"]
for index in (-1, 2):
with pytest.raises((ValueError, IndexError)):
f.reader.read_column(index)
@pytest.mark.pandas
def test_direct_read_dictionary():
# ARROW-3325
repeats = 10
nunique = 5
data = [
[tm.rands(10) for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0'])
# Compute dictionary-encoded subfield
expected = pa.table([table[0].dictionary_encode()], names=['f0'])
assert result.equals(expected)
@pytest.mark.pandas
def test_dataset_read_dictionary(tempdir):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(path, read_dictionary=['f0']).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
@pytest.mark.pandas
def test_direct_read_dictionary_subfield():
repeats = 10
nunique = 5
data = [
[[tm.rands(10)] for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0.list.item'])
arr = pa.array(data[0])
values_as_dict = arr.values.dictionary_encode()
inner_indices = values_as_dict.indices.cast('int32')
new_values = pa.DictionaryArray.from_arrays(inner_indices,
values_as_dict.dictionary)
offsets = pa.array(range(51), type='int32')
expected_arr = pa.ListArray.from_arrays(offsets, new_values)
expected = pa.table([expected_arr], names=['f0'])
assert result.equals(expected)
assert result[0].num_chunks == 1
@pytest.mark.pandas
def test_dataset_metadata(tempdir):
path = tempdir / "ARROW-1983-dataset"
# create and write a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
metadata_list = []
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'],
metadata_collector=metadata_list)
# open the dataset and collect metadata from pieces:
dataset = pq.ParquetDataset(path)
metadata_list2 = [p.get_metadata() for p in dataset.pieces]
# compare metadata list content:
assert len(metadata_list) == len(metadata_list2)
for md, md2 in zip(metadata_list, metadata_list2):
d = md.to_dict()
d2 = md2.to_dict()
# serialized_size is initialized in the reader:
assert d.pop('serialized_size') == 0
assert d2.pop('serialized_size') > 0
assert d == d2
def test_parquet_file_too_small(tempdir):
path = str(tempdir / "test.parquet")
with pytest.raises(pa.ArrowIOError,
match='size is 0 bytes'):
with open(path, 'wb') as f:
pass
pq.read_table(path)
with pytest.raises(pa.ArrowIOError,
match='size is 4 bytes'):
with open(path, 'wb') as f:
f.write(b'ffff')
pq.read_table(path)
@pytest.mark.pandas
def test_categorical_index_survives_roundtrip():
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(bos.getvalue()).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
@pytest.mark.pandas
def test_categorical_order_survives_roundtrip():
# ARROW-6302
df = pd.DataFrame({"a": pd.Categorical(
["a", "b", "c", "a"], categories=["b", "c", "d"], ordered=True)})
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
contents = bos.getvalue()
result = pq.read_pandas(contents).to_pandas()
tm.assert_frame_equal(result, df)
def test_dictionary_array_automatically_read():
# ARROW-3246
# Make a large dictionary, a little over 4MB of data
dict_length = 4000
dict_values = pa.array([('x' * 1000 + '_{}'.format(i))
for i in range(dict_length)])
num_chunks = 10
chunk_size = 100
chunks = []
for i in range(num_chunks):
indices = np.random.randint(0, dict_length,
size=chunk_size).astype(np.int32)
chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),
dict_values))
table = pa.table([pa.chunked_array(chunks)], names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents))
assert result.equals(table)
# The only key in the metadata was the Arrow schema key
assert result.schema.metadata is None
@pytest.mark.pandas
def test_pandas_categorical_na_type_row_groups():
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version="2.0", chunk_size=10)
result = pq.read_table(buf.getvalue())
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
def test_pandas_categorical_roundtrip():
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(buf.getvalue()).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_multi_dataset_metadata(tempdir):
filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"]
metapath = str(tempdir / "_metadata")
# create a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
# write dataset twice and collect/merge metadata
_meta = None
for filename in filenames:
meta = []
pq.write_table(table, str(tempdir / filename),
metadata_collector=meta)
meta[0].set_file_path(filename)
if _meta is None:
_meta = meta[0]
else:
_meta.append_row_groups(meta[0])
# Write merged metadata-only file
with open(metapath, "wb") as f:
_meta.write_metadata_file(f)
# Read back the metadata
meta = pq.read_metadata(metapath)
md = meta.to_dict()
_md = _meta.to_dict()
for key in _md:
if key != 'serialized_size':
assert _md[key] == md[key]
assert _md['num_columns'] == 3
assert _md['num_rows'] == 6
assert _md['num_row_groups'] == 2
assert _md['serialized_size'] == 0
assert md['serialized_size'] > 0
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
@pytest.mark.pandas
@pytest.mark.fastparquet
@pytest.mark.filterwarnings("ignore:RangeIndex:DeprecationWarning")
def test_fastparquet_cross_compatibility(tempdir):
fp = pytest.importorskip('fastparquet')
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(4.0, 7.0, dtype="float64"),
"d": [True, False, True],
"e": pd.date_range("20130101", periods=3),
"f": pd.Categorical(["a", "b", "a"]),
# fastparquet writes list as BYTE_ARRAY JSON, so no roundtrip
# "g": [[1, 2], None, [1, 2, 3]],
}
)
table = pa.table(df)
# Arrow -> fastparquet
file_arrow = str(tempdir / "cross_compat_arrow.parquet")
pq.write_table(table, file_arrow, compression=None)
fp_file = fp.ParquetFile(file_arrow)
df_fp = fp_file.to_pandas()
tm.assert_frame_equal(df, df_fp)
# Fastparquet -> arrow
file_fastparquet = str(tempdir / "cross_compat_fastparquet.parquet")
fp.write(file_fastparquet, df)
table_fp = pq.read_pandas(file_fastparquet)
# for fastparquet written file, categoricals comes back as strings
# (no arrow schema in parquet metadata)
df['f'] = df['f'].astype(object)
tm.assert_frame_equal(table_fp.to_pandas(), df)
| 32.471433 | 79 | 0.62808 |
from collections import OrderedDict
import datetime
import decimal
import io
import json
import os
import six
import pickle
import pytest
import numpy as np
import pyarrow as pa
from pyarrow.compat import guid, u, BytesIO, unichar, PY2
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.filesystem import LocalFileSystem, FileSystem
try:
import pyarrow.parquet as pq
except ImportError:
pq = None
try:
import pandas as pd
import pandas.util.testing as tm
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
except ImportError:
pd = tm = None
pytestmark = pytest.mark.parquet
@pytest.fixture(scope='module')
def datadir(datadir):
return datadir / 'parquet'
def _write_table(table, path, **kwargs):
import pyarrow.parquet as pq
if _pandas_api.is_data_frame(table):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
return pq.read_table(*args, **kwargs)
def _roundtrip_table(table, read_table_kwargs=None,
write_table_kwargs=None):
read_table_kwargs = read_table_kwargs or {}
write_table_kwargs = write_table_kwargs or {}
buf = io.BytesIO()
_write_table(table, buf, **write_table_kwargs)
buf.seek(0)
return _read_table(buf, **read_table_kwargs)
def _check_roundtrip(table, expected=None, read_table_kwargs=None,
**write_table_kwargs):
if expected is None:
expected = table
read_table_kwargs = read_table_kwargs or {}
result = _roundtrip_table(table, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
result = _roundtrip_table(result, read_table_kwargs=read_table_kwargs,
write_table_kwargs=write_table_kwargs)
assert result.equals(expected)
def _roundtrip_pandas_dataframe(df, write_kwargs):
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, **write_kwargs)
buf.seek(0)
table1 = _read_table(buf)
return table1.to_pandas()
@pytest.mark.parametrize('dtype', [int, float])
def test_single_pylist_column_roundtrip(tempdir, dtype):
filename = tempdir / 'single_{}_column.parquet'.format(dtype.__name__)
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=['a'])
_write_table(table, filename)
table_read = _read_table(filename)
for i in range(table.num_columns):
col_written = table[i]
col_read = table_read[i]
assert table.field(i).name == table_read.field(i).name
assert col_read.num_chunks == 1
data_written = col_written.chunk(0)
data_read = col_read.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0, categorical=False):
np.random.seed(seed)
arrays = {
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': pd.Series([str(x) for x in range(size)]),
'empty_str': [''] * size,
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'null': [None] * size,
'null_list': [None] * 2 + [[None] * (x % 4) for x in range(size - 2)],
}
if categorical:
arrays['str_category'] = arrays['str'].astype('category')
return pd.DataFrame(arrays)
@pytest.mark.pandas
@pytest.mark.parametrize('chunk_size', [None, 1000])
def test_pandas_parquet_2_0_roundtrip(tempdir, chunk_size):
df = alltypes_sample(size=10000, categorical=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version="2.0",
coerce_timestamps='ms', chunk_size=chunk_size)
table_read = pq.read_pandas(filename)
assert table_read.schema.pandas_metadata is not None
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def test_set_data_page_size():
arr = pa.array([1, 2, 3] * 1000000)
t = pa.Table.from_arrays([arr], names=['f0'])
page_sizes = [2 << 16, 2 << 17, 2 << 18]
for target_page_size in page_sizes:
_check_roundtrip(t, data_page_size=target_page_size)
@pytest.mark.pandas
def test_chunked_table_write():
df = alltypes_sample(size=10)
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
df, _ = dataframe_with_lists()
batch = pa.RecordBatch.from_pandas(df)
table = pa.Table.from_batches([batch] * 3)
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_memory_map(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'memory_map': True},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, memory_map=True)
assert table_read.equals(table)
@pytest.mark.pandas
def test_enable_buffered_stream(tempdir):
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
_check_roundtrip(table, read_table_kwargs={'buffer_size': 1025},
version='2.0')
filename = str(tempdir / 'tmp_file')
with open(filename, 'wb') as f:
_write_table(table, f, version='2.0')
table_read = pq.read_pandas(filename, buffer_size=4096)
assert table_read.equals(table)
def test_special_chars_filename(tempdir):
table = pa.Table.from_arrays([pa.array([42])], ["ints"])
filename = "foo # bar"
path = tempdir / filename
assert not path.exists()
_write_table(table, str(path))
assert path.exists()
table_read = _read_table(str(path))
assert table_read.equals(table)
@pytest.mark.pandas
def test_empty_table_roundtrip():
df = alltypes_sample(size=10)
table = pa.Table.from_pandas(df)
table = pa.Table.from_arrays(
[col.chunk(0)[:0] for col in table.itercolumns()],
names=table.schema.names)
assert table.schema.field('null').type == pa.null()
assert table.schema.field('null_list').type == pa.list_(pa.null())
_check_roundtrip(table, version='2.0')
@pytest.mark.pandas
def test_empty_table_no_columns():
df = pd.DataFrame()
empty = pa.Table.from_pandas(df, preserve_index=False)
_check_roundtrip(empty)
def test_empty_lists_table_roundtrip():
arr = pa.array([[], []], type=pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr], ["A"])
_check_roundtrip(table)
def test_nested_list_nonnullable_roundtrip_bug():
# Reproduce failure in ARROW-5630
typ = pa.list_(pa.field("item", pa.float32(), False))
num_rows = 10000
t = pa.table([
pa.array(([[0] * ((i + 5) % 10) for i in range(0, 10)]
* (num_rows // 10)), type=typ)
], ['a'])
_check_roundtrip(t, data_page_size=4096)
@pytest.mark.pandas
def test_pandas_parquet_datetime_tz():
s = pd.Series([datetime.datetime(2017, 9, 6)])
s = s.dt.tz_localize('utc')
s.index = s
# Both a column and an index to hit both use cases
df = pd.DataFrame({'tz_aware': s,
'tz_eastern': s.dt.tz_convert('US/Eastern')},
index=s)
f = BytesIO()
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, f, coerce_timestamps='ms')
f.seek(0)
table_read = pq.read_pandas(f)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
@pytest.mark.skipif(six.PY2, reason='datetime.timezone is available since '
'python version 3.2')
def test_datetime_timezone_tzinfo():
value = datetime.datetime(2018, 1, 1, 1, 23, 45,
tzinfo=datetime.timezone.utc)
df = pd.DataFrame({'foo': [value]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_pandas_parquet_custom_metadata(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
metadata = pq.read_metadata(filename).metadata
assert b'pandas' in metadata
js = json.loads(metadata[b'pandas'].decode('utf8'))
assert js['index_columns'] == [{'kind': 'range',
'name': None,
'start': 0, 'stop': 10000,
'step': 1}]
@pytest.mark.pandas
def test_pandas_parquet_column_multiindex(tempdir):
df = alltypes_sample(size=10)
df.columns = pd.MultiIndex.from_tuples(
list(zip(df.columns, df.columns[::-1])),
names=['level_1', 'level_2']
)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
assert arrow_table.schema.pandas_metadata is not None
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_2_0_roundtrip_read_pandas_no_index_written(tempdir):
df = alltypes_sample(size=10000)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = arrow_table.schema.pandas_metadata
assert not js['index_columns']
# ARROW-2170
# While index_columns should be empty, columns needs to be filled still.
assert js['columns']
_write_table(arrow_table, filename, version='2.0', coerce_timestamps='ms')
table_read = pq.read_pandas(filename)
js = table_read.schema.pandas_metadata
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_1_0_roundtrip(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename, version='1.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_multiple_path_types(tempdir):
# Test compatibility with PEP 519 path-like objects
path = tempdir / 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
# Test compatibility with plain string paths
path = str(tempdir) + 'zzz.parquet'
df = pd.DataFrame({'x': np.arange(10, dtype=np.int64)})
_write_table(df, path)
table_read = _read_table(path)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_column_selection(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename)
table_read = _read_table(filename, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
# ARROW-4267: Selection of duplicate columns still leads to these columns
# being read uniquely.
table_read = _read_table(filename, columns=['uint8', 'uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@pytest.mark.pandas
def test_pandas_parquet_native_file_roundtrip(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_parquet_incremental_file_build(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
writer = pq.ParquetWriter(out, arrow_table.schema, version='2.0')
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
writer.close()
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_read_pandas_column_subset(tempdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@pytest.mark.pandas
def test_pandas_parquet_empty_roundtrip(tempdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.getvalue()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_pyfile_roundtrip(tempdir):
filename = tempdir / 'pandas_pyfile_roundtrip.parquet'
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with filename.open('wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(filename.read_bytes())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_pandas_parquet_configuration_options(tempdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename, version='2.0',
use_dictionary=use_dictionary)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for write_statistics in [True, False]:
_write_table(arrow_table, filename, version='2.0',
write_statistics=write_statistics)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP', 'LZ4', 'ZSTD']:
_write_table(arrow_table, filename, version='2.0',
compression=compression)
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(table_or_df):
if isinstance(table_or_df, pa.Table):
a_table = table_or_df
else:
a_table = pa.Table.from_pandas(table_or_df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
def test_compression_level():
arr = pa.array(list(map(int, range(1000))))
data = [arr, arr]
table = pa.Table.from_arrays(data, names=['a', 'b'])
# Check one compression level.
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=1)
# Check another one to make sure that compression_level=1 does not
# coincide with the default one in Arrow.
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=5)
# Check that the user can provide a compression level per column
_check_roundtrip(table, expected=table, compression="gzip",
compression_level=[{'a': 2, 'b': 3}])
# Check that specifying a compression level for a codec which does allow
# specifying one, results into an error.
# Uncompressed, snappy, lz4 and lzo do not support specifying a compression
# level.
# GZIP (zlib) allows for specifying a compression level but as of up
# to version 1.2.11 the valid range is [-1, 9].
invalid_combinations = [("snappy", 4), ("lz4", 5), ("gzip", -1337),
("None", 444), ("lzo", 14)]
buf = io.BytesIO()
for (codec, level) in invalid_combinations:
with pytest.raises(IOError):
_write_table(table, buf, compression=codec,
compression_level=level)
@pytest.mark.pandas
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
df.index = np.random.randint(0, 1000000, size=len(df))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
assert isinstance(meta.serialized_size, int)
assert isinstance(meta.metadata, dict)
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.converted_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
for rg in range(meta.num_row_groups):
rg_meta = meta.row_group(rg)
assert isinstance(rg_meta, pq.RowGroupMetaData)
repr(rg_meta)
for col in range(rg_meta.num_columns):
col_meta = rg_meta.column(col)
assert isinstance(col_meta, pq.ColumnChunkMetaData)
repr(col_meta)
with pytest.raises(IndexError):
meta.row_group(-1)
with pytest.raises(IndexError):
meta.row_group(meta.num_row_groups + 1)
rg_meta = meta.row_group(0)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
assert rg_meta.total_byte_size > 0
with pytest.raises(IndexError):
col_meta = rg_meta.column(-1)
with pytest.raises(IndexError):
col_meta = rg_meta.column(ncols + 2)
col_meta = rg_meta.column(0)
assert col_meta.file_offset > 0
assert col_meta.file_path == '' # created from BytesIO
assert col_meta.physical_type == 'BOOLEAN'
assert col_meta.num_values == 10000
assert col_meta.path_in_schema == 'bool'
assert col_meta.is_stats_set is True
assert isinstance(col_meta.statistics, pq.Statistics)
assert col_meta.compression == 'SNAPPY'
assert col_meta.encodings == ('PLAIN', 'RLE')
assert col_meta.has_dictionary_page is False
assert col_meta.dictionary_page_offset is None
assert col_meta.data_page_offset > 0
assert col_meta.total_compressed_size > 0
assert col_meta.total_uncompressed_size > 0
with pytest.raises(NotImplementedError):
col_meta.has_index_page
with pytest.raises(NotImplementedError):
col_meta.index_page_offset
def test_parquet_metadata_lifetime(tempdir):
# ARROW-6642 - ensure that chained access keeps parent objects alive
table = pa.table({'a': [1, 2, 3]})
pq.write_table(table, tempdir / 'test_metadata_segfault.parquet')
dataset = pq.ParquetDataset(tempdir / 'test_metadata_segfault.parquet')
dataset.pieces[0].get_metadata().row_group(0).column(0).statistics
@pytest.mark.pandas
@pytest.mark.parametrize(
(
'data',
'type',
'physical_type',
'min_value',
'max_value',
'null_count',
'num_values',
'distinct_count'
),
[
([1, 2, 2, None, 4], pa.uint8(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint16(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint32(), 'INT32', 1, 4, 1, 4, 0),
([1, 2, 2, None, 4], pa.uint64(), 'INT64', 1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int8(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0),
([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float32(),
'FLOAT', -1.1, 4.4, 1, 4, 0
),
(
[-1.1, 2.2, 2.3, None, 4.4], pa.float64(),
'DOUBLE', -1.1, 4.4, 1, 4, 0
),
(
[u'', u'b', unichar(1000), None, u'aaa'], pa.binary(),
'BYTE_ARRAY', b'', unichar(1000).encode('utf-8'), 1, 4, 0
),
(
[True, False, False, True, True], pa.bool_(),
'BOOLEAN', False, True, 0, 5, 0
),
(
[b'\x00', b'b', b'12', None, b'aaa'], pa.binary(),
'BYTE_ARRAY', b'\x00', b'b', 1, 4, 0
),
]
)
def test_parquet_column_statistics_api(data, type, physical_type, min_value,
max_value, null_count, num_values,
distinct_count):
df = pd.DataFrame({'data': data})
schema = pa.schema([pa.field('data', type)])
table = pa.Table.from_pandas(df, schema=schema, safe=False)
fileh = make_sample_file(table)
meta = fileh.metadata
rg_meta = meta.row_group(0)
col_meta = rg_meta.column(0)
stat = col_meta.statistics
assert stat.has_min_max
assert _close(type, stat.min, min_value)
assert _close(type, stat.max, max_value)
assert stat.null_count == null_count
assert stat.num_values == num_values
# TODO(kszucs) until parquet-cpp API doesn't expose HasDistinctCount
assert stat.distinct_count == distinct_count
assert stat.physical_type == physical_type
@pytest.mark.pandas
def test_parquet_raise_on_unset_statistics():
df = pd.DataFrame({"t": pd.Series([pd.NaT], dtype="datetime64[ns]")})
meta = make_sample_file(pa.Table.from_pandas(df)).metadata
assert not meta.row_group(0).column(0).statistics.has_min_max
assert meta.row_group(0).column(0).statistics.max is None
def _close(type, left, right):
if type == pa.float32():
return abs(left - right) < 1E-7
elif type == pa.float64():
return abs(left - right) < 1E-13
else:
return left == right
def test_statistics_convert_logical_types(tempdir):
cases = [(10, 11164359321221007157, pa.uint64()),
(10, 4294967295, pa.uint32()),
(u"ähnlich", u"öffentlich", pa.utf8()),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time32('ms')),
(datetime.time(10, 30, 0, 1000), datetime.time(15, 30, 0, 1000),
pa.time64('us')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('ms')),
(datetime.datetime(2019, 6, 24, 0, 0, 0, 1000),
datetime.datetime(2019, 6, 25, 0, 0, 0, 1000),
pa.timestamp('us'))]
for i, (min_val, max_val, typ) in enumerate(cases):
t = pa.Table.from_arrays([pa.array([min_val, max_val], type=typ)],
['col'])
path = str(tempdir / ('example{}.parquet'.format(i)))
pq.write_table(t, path, version='2.0')
pf = pq.ParquetFile(path)
stats = pf.metadata.row_group(0).column(0).statistics
assert stats.min == min_val
assert stats.max == max_val
def test_parquet_write_disable_statistics(tempdir):
table = pa.Table.from_pydict(
{'a': pa.array([1, 2, 3]), 'b': pa.array(['a', 'b', 'c'])})
_write_table(table, tempdir / 'data.parquet')
meta = pq.read_metadata(tempdir / 'data.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is True
assert cc.statistics is not None
_write_table(table, tempdir / 'data2.parquet', write_statistics=False)
meta = pq.read_metadata(tempdir / 'data2.parquet')
for col in [0, 1]:
cc = meta.row_group(0).column(col)
assert cc.is_stats_set is False
assert cc.statistics is None
_write_table(table, tempdir / 'data3.parquet', write_statistics=['a'])
meta = pq.read_metadata(tempdir / 'data3.parquet')
cc_a = meta.row_group(0).column(0)
assert cc_a.is_stats_set is True
assert cc_a.statistics is not None
cc_b = meta.row_group(0).column(1)
assert cc_b.is_stats_set is False
assert cc_b.statistics is None
@pytest.mark.pandas
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
assert isinstance(fileh.schema, pq.ParquetSchema)
assert fileh.schema.equals(fileh.schema)
assert fileh.schema == fileh.schema
assert fileh.schema.equals(fileh2.schema)
assert fileh.schema == fileh2.schema
assert fileh.schema != 'arbitrary object'
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema != fileh3.schema
assert isinstance(fileh.schema[0], pq.ColumnSchema)
assert fileh.schema[0].equals(fileh.schema[0])
assert fileh.schema[0] == fileh.schema[0]
assert not fileh.schema[0].equals(fileh.schema[1])
assert fileh.schema[0] != fileh.schema[1]
assert fileh.schema[0] != 'arbitrary object'
def test_validate_schema_write_table(tempdir):
simple_fields = [
pa.field('POS', pa.uint32()),
pa.field('desc', pa.string())
]
simple_schema = pa.schema(simple_fields)
simple_from_array = [pa.array([1]), pa.array(['bla'])]
simple_table = pa.Table.from_arrays(simple_from_array, ['POS', 'desc'])
path = tempdir / 'simple_validate_schema.parquet'
with pq.ParquetWriter(path, simple_schema,
version='2.0',
compression='snappy', flavor='spark') as w:
with pytest.raises(ValueError):
w.write_table(simple_table)
@pytest.mark.pandas
def test_column_of_arrays(tempdir):
df, schema = dataframe_with_arrays()
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='ms')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_coerce_timestamps(tempdir):
from collections import OrderedDict
arrays = OrderedDict()
fields = [pa.field('datetime64',
pa.list_(pa.timestamp('ms')))]
arrays['datetime64'] = [
np.array(['2007-07-13T01:23:34.123456789',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
None,
None,
np.array(['2007-07-13T02',
None,
'2010-08-13T05:46:57.437699912'],
dtype='datetime64[ms]'),
]
df = pd.DataFrame(arrays)
schema = pa.schema(fields)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version="2.0", coerce_timestamps='us')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename, version='2.0',
coerce_timestamps='unknown')
@pytest.mark.pandas
def test_coerce_timestamps_truncated(tempdir):
dt_us = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1, microsecond=1)
dt_ms = datetime.datetime(year=2017, month=1, day=1, hour=1, minute=1,
second=1)
fields_us = [pa.field('datetime64', pa.timestamp('us'))]
arrays_us = {'datetime64': [dt_us, dt_ms]}
df_us = pd.DataFrame(arrays_us)
schema_us = pa.schema(fields_us)
filename = tempdir / 'pandas_truncated.parquet'
table_us = pa.Table.from_pandas(df_us, schema=schema_us)
_write_table(table_us, filename, version="2.0", coerce_timestamps='ms',
allow_truncated_timestamps=True)
table_ms = _read_table(filename)
df_ms = table_ms.to_pandas()
arrays_expected = {'datetime64': [dt_ms, dt_ms]}
df_expected = pd.DataFrame(arrays_expected)
tm.assert_frame_equal(df_expected, df_ms)
@pytest.mark.pandas
def test_column_of_lists(tempdir):
df, schema = dataframe_with_lists(parquet_compatible=True)
filename = tempdir / 'pandas_roundtrip.parquet'
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename, version='2.0')
table_read = _read_table(filename)
df_read = table_read.to_pandas()
if PY2:
for col in ['date32[day]_list', 'date64[ms]_list']:
df[col] = df[col].apply(
lambda x: list(map(np.datetime64, x)) if x else x
)
tm.assert_frame_equal(df, df_read)
@pytest.mark.pandas
def test_date_time_types(tempdir):
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.array(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.array(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.array(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.array(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.array(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.array(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.array(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.array(data7, type=t7)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
t0 = pa.timestamp('ms')
data0 = np.arange(4, dtype='int64')
a0 = pa.array(data0, type=t0)
t1 = pa.timestamp('us')
data1 = np.arange(4, dtype='int64')
a1 = pa.array(data1, type=t1)
t2 = pa.timestamp('ns')
data2 = np.arange(4, dtype='int64')
a2 = pa.array(data2, type=t2)
table = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
expected = pa.Table.from_arrays([a0, a1, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
filename = tempdir / 'int64_timestamps.parquet'
_write_table(table, filename, version='2.0')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT64'
read_table = _read_table(filename)
assert read_table.equals(expected)
t0_ns = pa.timestamp('ns')
data0_ns = np.array(data0 * 1000000, dtype='int64')
a0_ns = pa.array(data0_ns, type=t0_ns)
t1_ns = pa.timestamp('ns')
data1_ns = np.array(data1 * 1000, dtype='int64')
a1_ns = pa.array(data1_ns, type=t1_ns)
expected = pa.Table.from_arrays([a0_ns, a1_ns, a2],
['ts[ms]', 'ts[us]', 'ts[ns]'])
filename = tempdir / 'explicit_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
use_deprecated_int96_timestamps=True)
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
filename = tempdir / 'spark_int96_timestamps.parquet'
_write_table(table, filename, version='2.0',
flavor='spark')
parquet_schema = pq.ParquetFile(filename).schema
for i in range(3):
assert parquet_schema.column(i).physical_type == 'INT96'
read_table = _read_table(filename)
assert read_table.equals(expected)
def test_timestamp_restore_timezone():
ty = pa.timestamp('ms', tz='America/New_York')
arr = pa.array([1, 2, 3], type=ty)
t = pa.table([arr], names=['f0'])
_check_roundtrip(t)
@pytest.mark.pandas
def test_list_of_datetime_time_roundtrip():
times = pd.to_datetime(['09:00', '09:30', '10:00', '10:30', '11:00',
'11:30', '12:00'])
df = pd.DataFrame({'time': [times.time]})
_roundtrip_pandas_dataframe(df, write_kwargs={})
@pytest.mark.pandas
def test_parquet_version_timestamp_differences():
i_s = pd.Timestamp('2010-01-01').value / 1000000000
d_s = np.arange(i_s, i_s + 10, 1, dtype='int64')
d_ms = d_s * 1000
d_us = d_ms * 1000
d_ns = d_us * 1000
a_s = pa.array(d_s, type=pa.timestamp('s'))
a_ms = pa.array(d_ms, type=pa.timestamp('ms'))
a_us = pa.array(d_us, type=pa.timestamp('us'))
a_ns = pa.array(d_ns, type=pa.timestamp('ns'))
names = ['ts:s', 'ts:ms', 'ts:us', 'ts:ns']
table = pa.Table.from_arrays([a_s, a_ms, a_us, a_ns], names)
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_us], names)
_check_roundtrip(table, expected)
expected = pa.Table.from_arrays([a_ms, a_ms, a_us, a_ns], names)
_check_roundtrip(table, expected, version='2.0')
expected = pa.Table.from_arrays([a_ms, a_ms, a_ms, a_ms], names)
_check_roundtrip(table, expected, coerce_timestamps='ms')
expected = pa.Table.from_arrays([a_us, a_us, a_us, a_us], names)
_check_roundtrip(table, expected, version='2.0', coerce_timestamps='us')
expected = pa.Table.from_arrays([a_ns, a_ns, a_ns, a_ns], names)
_check_roundtrip(table, expected,
use_deprecated_int96_timestamps=True)
_check_roundtrip(table, expected, version='2.0',
use_deprecated_int96_timestamps=True)
def test_large_list_records():
list_lengths = np.random.randint(0, 500, size=50)
list_lengths[::10] = 0
list_values = [list(map(int, np.random.randint(0, 100, size=x)))
if i % 8 else None
for i, x in enumerate(list_lengths)]
a1 = pa.array(list_values)
table = pa.Table.from_arrays([a1], ['int_lists'])
_check_roundtrip(table)
def test_sanitized_spark_field_names():
a0 = pa.array([0, 1, 2, 3, 4])
name = 'prohib; ,\t{}'
table = pa.Table.from_arrays([a0], [name])
result = _roundtrip_table(table, write_table_kwargs={'flavor': 'spark'})
expected_name = 'prohib______'
assert result.schema[0].name == expected_name
@pytest.mark.pandas
def test_spark_flavor_preserves_pandas_metadata():
df = _test_dataframe(size=100)
df.index = np.arange(0, 10 * len(df), 10)
df.index.name = 'foo'
result = _roundtrip_pandas_dataframe(df, {'version': '2.0',
'flavor': 'spark'})
tm.assert_frame_equal(result, df)
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
@pytest.mark.pandas
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, use_threads=True)
buf.seek(0)
table2 = _read_table(buf, use_threads=False)
assert table1.equals(table2)
@pytest.mark.pandas
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@pytest.mark.pandas
def test_pass_separate_metadata():
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@pytest.mark.pandas
def test_read_single_row_group():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_single_row_group_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
row_groups = [pf.read_row_group(i, columns=cols + cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_read_multiple_row_groups():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
result = pf.read_row_groups(range(K))
tm.assert_frame_equal(df, result.to_pandas())
@pytest.mark.pandas
def test_read_multiple_row_groups_with_column_subset():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = list(df.columns[:2])
result = pf.read_row_groups(range(K), columns=cols)
tm.assert_frame_equal(df[cols], result.to_pandas())
result = pf.read_row_groups(range(K), columns=cols + cols)
tm.assert_frame_equal(df[cols], result.to_pandas())
@pytest.mark.pandas
def test_scan_contents():
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.scan_contents() == 10000
assert pf.scan_contents(df.columns[:4]) == 10000
@pytest.mark.pandas
def test_parquet_piece_read(tempdir):
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@pytest.mark.pandas
def test_parquet_piece_open_and_get_metadata(tempdir):
df = _test_dataframe(100)
table = pa.Table.from_pandas(df)
path = tempdir / 'parquet_piece_read.parquet'
_write_table(table, path, version='2.0')
piece = pq.ParquetDatasetPiece(path)
table1 = piece.read()
assert isinstance(table1, pa.Table)
meta1 = piece.get_metadata()
assert isinstance(meta1, pq.FileMetaData)
assert table == table1
def test_parquet_piece_basics():
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
def test_partition_set_dictionary_type():
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@pytest.mark.pandas
def test_read_partitioned_directory(tempdir):
fs = LocalFileSystem.get_instance()
_partition_test_for_filesystem(fs, tempdir)
@pytest.mark.pandas
def test_create_parquet_dataset_multi_threaded(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
_partition_test_for_filesystem(fs, base_path)
manifest = pq.ParquetManifest(base_path, filesystem=fs,
metadata_nthreads=1)
dataset = pq.ParquetDataset(base_path, filesystem=fs, metadata_nthreads=16)
assert len(dataset.pieces) > 0
partitions = dataset.partitions
assert len(partitions.partition_names) > 0
assert partitions.partition_names == manifest.partitions.partition_names
assert len(partitions.levels) == len(manifest.partitions.levels)
@pytest.mark.pandas
def test_equivalency(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', '=', 1), ('string', '!=', 'b'),
('boolean', '==', True)]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'b' not in result_df['string'].values
assert False not in result_df['boolean'].values
filters = [
[
('integer', '=', 1),
('string', '!=', 'b'),
('boolean', '==', 'True')
],
[('integer', '=', 0), ('boolean', '==', 'False')]
]
dataset = pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
table = dataset.read()
result_df = table.to_pandas().reset_index(drop=True)
df_filter_1 = (np.array(result_df['integer']) == 1) \
& (np.array(result_df['string']) != 'b') \
& (np.array(result_df['boolean']) == 'True')
df_filter_2 = (np.array(result_df['integer']) == 0) \
& (np.array(result_df['boolean']) == 'False')
assert df_filter_1.sum() > 0
assert df_filter_2.sum() > 0
assert result_df.shape[0] == (df_filter_1.sum() + df_filter_2.sum())
with pytest.raises(NotImplementedError):
filters = [[('string', '==', b'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
with pytest.raises(NotImplementedError):
filters = [[('string', '==', u'1\0a')]]
pq.ParquetDataset(base_path, filesystem=fs, filters=filters)
@pytest.mark.pandas
def test_cutoff_exclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<', 4),
('integers', '>', 1),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [x for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
@pytest.mark.xfail(
raises=TypeError,
reason='Loss of type information in creation of categoricals.'
)
def test_cutoff_exclusive_datetime(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
date_keys = [
datetime.date(2018, 4, 9),
datetime.date(2018, 4, 10),
datetime.date(2018, 4, 11),
datetime.date(2018, 4, 12),
datetime.date(2018, 4, 13)
]
partition_spec = [
['dates', date_keys]
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'dates': np.array(date_keys, dtype='datetime64'),
}, columns=['index', 'dates'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('dates', '<', "2018-04-12"),
('dates', '>', "2018-04-10")
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected = pd.Categorical(
np.array([datetime.date(2018, 4, 11)], dtype='datetime64'),
categories=np.array(date_keys, dtype='datetime64'))
assert result_df['dates'].values == expected
@pytest.mark.pandas
def test_inclusive_integer(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[
('integers', '<=', 3),
('integers', '>=', 2),
]
)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
result_list = [int(x) for x in map(int, result_df['integers'].values)]
assert result_list == [2, 3]
@pytest.mark.pandas
def test_inclusive_set(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1]
string_keys = ['a', 'b', 'c']
boolean_keys = [True, False]
partition_spec = [
['integer', integer_keys],
['string', string_keys],
['boolean', boolean_keys]
]
df = pd.DataFrame({
'integer': np.array(integer_keys, dtype='i4').repeat(15),
'string': np.tile(np.tile(np.array(string_keys, dtype=object), 5), 2),
'boolean': np.tile(np.tile(np.array(boolean_keys, dtype='bool'), 5),
3),
}, columns=['integer', 'string', 'boolean'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(
base_path, filesystem=fs,
filters=[('integer', 'in', {1}), ('string', 'in', {'a', 'b'}),
('boolean', 'in', {True})]
)
table = dataset.read()
result_df = (table.to_pandas().reset_index(drop=True))
assert 0 not in result_df['integer'].values
assert 'c' not in result_df['string'].values
assert False not in result_df['boolean'].values
@pytest.mark.pandas
def test_invalid_pred_op(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '=<', 3),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', 'in', set()),
])
with pytest.raises(ValueError):
pq.ParquetDataset(base_path,
filesystem=fs,
filters=[
('integers', '!=', {3}),
])
@pytest.mark.pandas
def test_filters_read_table(tempdir):
fs = LocalFileSystem.get_instance()
base_path = tempdir
integer_keys = [0, 1, 2, 3, 4]
partition_spec = [
['integers', integer_keys],
]
N = 5
df = pd.DataFrame({
'index': np.arange(N),
'integers': np.array(integer_keys, dtype='i4'),
}, columns=['index', 'integers'])
_generate_partition_directories(fs, base_path, partition_spec, df)
table = pq.read_table(
base_path, filesystem=fs, filters=[('integers', '<', 3)])
assert table.num_rows == 3
table = pq.read_table(
base_path, filesystem=fs, filters=[[('integers', '<', 3)]])
assert table.num_rows == 3
table = pq.read_pandas(
base_path, filters=[('integers', '<', 3)])
assert table.num_rows == 3
@pytest.fixture
def s3_bucket(request, minio_server):
boto3 = pytest.importorskip('boto3')
botocore = pytest.importorskip('botocore')
address, access_key, secret_key = minio_server
s3 = boto3.resource(
's3',
endpoint_url='http://{}'.format(address),
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
config=botocore.client.Config(signature_version='s3v4'),
region_name='us-east-1'
)
bucket = s3.Bucket('test-s3fs')
bucket.create()
return 'test-s3fs'
@pytest.fixture
def s3_example(minio_server, s3_bucket):
s3fs = pytest.importorskip('s3fs')
address, access_key, secret_key = minio_server
fs = s3fs.S3FileSystem(
key=access_key,
secret=secret_key,
client_kwargs={
'endpoint_url': 'http://{}'.format(address)
}
)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(s3_bucket, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.pandas
@pytest.mark.s3
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
foo_keys = [0, 1]
bar_keys = ['a', 'b', 'c']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = fs._path_join(
str(base_dir),
'{0}={1}'.format(name, value)
)
fs.mkdir(level_dir)
if level == DEPTH - 1:
file_path = fs._path_join(level_dir, guid())
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
assert fs.exists(file_path)
file_success = fs._path_join(level_dir, '_SUCCESS')
with fs.open(file_success, 'wb') as f:
pass
else:
_visit_level(level_dir, level + 1, this_part_keys)
file_success = fs._path_join(level_dir, '_SUCCESS')
with fs.open(file_success, 'wb') as f:
pass
_visit_level(base_dir, 0, [])
def _test_read_common_metadata_files(fs, base_path):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(base_path)
data_path = os.path.join(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = os.path.join(base_path, '_common_metadata')
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
assert dataset.common_metadata_path == str(metadata_path)
with fs.open(data_path) as f:
common_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(common_schema)
dataset2 = pq.ParquetDataset([base_path], filesystem=fs)
assert dataset2.schema.equals(dataset.schema)
@pytest.mark.pandas
def test_read_common_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
_test_read_common_metadata_files(fs, tempdir)
@pytest.mark.pandas
def test_read_metadata_files(tempdir):
fs = LocalFileSystem.get_instance()
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'data.parquet'
table = pa.Table.from_pandas(df)
with fs.open(data_path, 'wb') as f:
_write_table(table, f)
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
with fs.open(data_path) as f:
metadata_schema = pq.read_metadata(f).schema
assert dataset.schema.equals(metadata_schema)
@pytest.mark.pandas
def test_read_schema(tempdir):
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = tempdir / 'test.parquet'
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
read1 = pq.read_schema(data_path)
read2 = pq.read_schema(data_path, memory_map=True)
assert table.schema.equals(read1, check_metadata=False)
assert table.schema.equals(read2, check_metadata=False)
assert table.schema.metadata[b'pandas'] == read1.metadata[b'pandas']
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
if isinstance(value, (datetime.date, datetime.datetime)):
value = pd.Timestamp(value)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@pytest.mark.pandas
def test_read_multiple_files(tempdir):
nfiles = 10
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df['uint32'] = df['uint32'].astype(np.int64)
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
(dirpath / '_SUCCESS.crc').touch()
def read_multiple_files(paths, columns=None, use_threads=True, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, use_threads=use_threads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [0, 2, 6, result.num_columns - 1]
col_names = [result.field(i).name for i in to_read]
out = pa.localfs.read_parquet(dirpath, columns=col_names)
expected = pa.Table.from_arrays([result.column(i) for i in to_read],
names=col_names,
metadata=result.schema.metadata)
assert out.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, use_threads=True)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tempdir / '{}.parquet'.format(guid())
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@pytest.mark.pandas
def test_dataset_read_pandas(tempdir):
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_dataset_memory_map(tempdir):
# ARROW-2627: Check that we can use ParquetDataset with memory-mapping
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
dataset = pq.ParquetDataset(dirpath, memory_map=True)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
def test_dataset_enable_buffered_stream(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
df = _test_dataframe(10, seed=0)
path = dirpath / '{}.parquet'.format(0)
table = pa.Table.from_pandas(df)
_write_table(table, path, version='2.0')
with pytest.raises(ValueError):
pq.ParquetDataset(dirpath, buffer_size=-64)
for buffer_size in [128, 1024]:
dataset = pq.ParquetDataset(dirpath, buffer_size=buffer_size)
assert dataset.pieces[0].read().equals(table)
@pytest.mark.pandas
@pytest.mark.parametrize('preserve_index', [True, False, None])
def test_dataset_read_pandas_common_metadata(tempdir, preserve_index):
# ARROW-1103
nfiles = 5
size = 5
dirpath = tempdir / guid()
dirpath.mkdir()
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size), name='index')
path = dirpath / '{}.parquet'.format(i)
table = pa.Table.from_pandas(df, preserve_index=preserve_index)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(
df, preserve_index=preserve_index
)
pq.write_metadata(table_for_metadata.schema, dirpath / '_metadata')
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
expected.index.name = (
df.index.name if preserve_index is not False else None)
tm.assert_frame_equal(result, expected)
def _make_example_multifile_dataset(base_path, nfiles=10, file_nrows=5):
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(file_nrows, seed=i)
path = base_path / '{}.parquet'.format(i)
test_data.append(_write_table(df, path))
paths.append(path)
return paths
@pytest.mark.pandas
def test_ignore_private_directories(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
# private directory
(dirpath / '_impala_staging').mkdir()
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_dot(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '.DS_Store').open('wb') as f:
f.write(b'gibberish')
with (dirpath / '.private').open('wb') as f:
f.write(b'gibberish')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_ignore_hidden_files_underscore(tempdir):
dirpath = tempdir / guid()
dirpath.mkdir()
paths = _make_example_multifile_dataset(dirpath, nfiles=10,
file_nrows=5)
with (dirpath / '_committed_123').open('wb') as f:
f.write(b'abcd')
with (dirpath / '_started_321').open('wb') as f:
f.write(b'abcd')
dataset = pq.ParquetDataset(dirpath)
assert set(map(str, paths)) == set(x.path for x in dataset.pieces)
@pytest.mark.pandas
def test_multiindex_duplicate_values(tempdir):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = pd.DataFrame({'numbers': numbers}, index=index)
table = pa.Table.from_pandas(df)
filename = tempdir / 'dup_multi_index_levels.parquet'
_write_table(table, filename)
result_table = _read_table(filename)
assert table.equals(result_table)
result_df = result_table.to_pandas()
tm.assert_frame_equal(result_df, df)
@pytest.mark.pandas
def test_write_error_deletes_incomplete_file(tempdir):
# ARROW-1285
df = pd.DataFrame({'a': list('abc'),
'b': list(range(1, 4)),
'c': np.arange(3, 6).astype('u1'),
'd': np.arange(4.0, 7.0, dtype='float64'),
'e': [True, False, True],
'f': pd.Categorical(list('abc')),
'g': pd.date_range('20130101', periods=3),
'h': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'i': pd.date_range('20130101', periods=3, freq='ns')})
pdf = pa.Table.from_pandas(df)
filename = tempdir / 'tmp_file'
try:
_write_table(pdf, filename)
except pa.ArrowException:
pass
assert not filename.exists()
@pytest.mark.pandas
def test_noncoerced_nanoseconds_written_without_exception(tempdir):
# ARROW-1957: the Parquet version 2.0 writer preserves Arrow
# nanosecond timestamps by default
n = 9
df = pd.DataFrame({'x': range(n)},
index=pd.date_range('2017-01-01', freq='1n', periods=n))
tb = pa.Table.from_pandas(df)
filename = tempdir / 'written.parquet'
try:
pq.write_table(tb, filename, version='2.0')
except Exception:
pass
assert filename.exists()
recovered_table = pq.read_table(filename)
assert tb.equals(recovered_table)
# Loss of data thru coercion (without explicit override) still an error
filename = tempdir / 'not_written.parquet'
with pytest.raises(ValueError):
pq.write_table(tb, filename, coerce_timestamps='ms', version='2.0')
def test_read_non_existent_file(tempdir):
path = 'non-existent-file.parquet'
try:
pq.read_table(path)
except Exception as e:
assert path in e.args[0]
def test_read_table_doesnt_warn(datadir):
with pytest.warns(None) as record:
pq.read_table(datadir / 'v0.7.1.parquet')
assert len(record) == 0
def _test_write_to_dataset_with_partitions(base_path,
filesystem=None,
schema=None,
index_name=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df, schema=schema, safe=False,
preserve_index=False)
pq.write_to_dataset(output_table, base_path, partition_by,
filesystem=filesystem)
metadata_path = os.path.join(base_path, '_common_metadata')
if filesystem is not None:
with filesystem.open(metadata_path, 'wb') as f:
pq.write_metadata(output_table.schema, f)
else:
pq.write_metadata(output_table.schema, metadata_path)
# ARROW-2891: Ensure the output_schema is preserved when writing a
# partitioned dataset
dataset = pq.ParquetDataset(base_path,
filesystem=filesystem,
validate_schema=True)
# ARROW-2209: Ensure the dataset schema also includes the partition columns
dataset_cols = set(dataset.schema.to_arrow_schema().names)
assert dataset_cols == set(output_table.schema.names)
input_table = dataset.read()
input_df = input_table.to_pandas()
# Read data back in and compare with original DataFrame
# Partitioned columns added to the end of the DataFrame when read
input_df_cols = input_df.columns.tolist()
assert partition_by == input_df_cols[-1 * len(partition_by):]
# Partitioned columns become 'categorical' dtypes
input_df = input_df[cols]
for col in partition_by:
output_df[col] = output_df[col].astype('category')
assert output_df.equals(input_df)
def _test_write_to_dataset_no_partitions(base_path, filesystem=None):
# ARROW-1400
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
cols = output_df.columns.tolist()
output_table = pa.Table.from_pandas(output_df)
if filesystem is None:
filesystem = LocalFileSystem.get_instance()
# Without partitions, append files to root_path
n = 5
for i in range(n):
pq.write_to_dataset(output_table, base_path,
filesystem=filesystem)
output_files = [file for file in filesystem.ls(base_path)
if file.endswith(".parquet")]
assert len(output_files) == n
# Deduplicated incoming DataFrame should match
# original outgoing Dataframe
input_table = pq.ParquetDataset(base_path,
filesystem=filesystem).read()
input_df = input_table.to_pandas()
input_df = input_df.drop_duplicates()
input_df = input_df[cols]
assert output_df.equals(input_df)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_schema(tempdir):
schema = pa.schema([pa.field('group1', type=pa.string()),
pa.field('group2', type=pa.string()),
pa.field('num', type=pa.int64()),
pa.field('nan', type=pa.int32()),
pa.field('date', type=pa.timestamp(unit='us'))])
_test_write_to_dataset_with_partitions(str(tempdir), schema=schema)
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_index_name(tempdir):
_test_write_to_dataset_with_partitions(str(tempdir),
index_name='index_name')
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(tempdir):
_test_write_to_dataset_no_partitions(str(tempdir))
@pytest.mark.pandas
def test_write_to_dataset_with_partitions_and_custom_filenames(tempdir):
output_df = pd.DataFrame({'group1': list('aaabbbbccc'),
'group2': list('eefeffgeee'),
'num': list(range(10)),
'nan': [pd.np.nan] * 10,
'date': np.arange('2017-01-01', '2017-01-11',
dtype='datetime64[D]')})
partition_by = ['group1', 'group2']
output_table = pa.Table.from_pandas(output_df)
path = str(tempdir)
def partition_filename_callback(keys):
return "{0}-{1}.parquet".format(*keys)
pq.write_to_dataset(output_table, path,
partition_by, partition_filename_callback)
dataset = pq.ParquetDataset(path)
# ARROW-3538: Ensure partition filenames match the given pattern
# defined in the local function partition_filename_callback
expected_basenames = [
'a-e.parquet', 'a-f.parquet',
'b-e.parquet', 'b-f.parquet',
'b-g.parquet', 'c-e.parquet'
]
output_basenames = [os.path.basename(p.path) for p in dataset.pieces]
assert sorted(expected_basenames) == sorted(output_basenames)
@pytest.mark.large_memory
def test_large_table_int32_overflow():
size = np.iinfo('int32').max + 1
arr = np.ones(size, dtype='uint8')
parr = pa.array(arr, type=pa.uint8())
table = pa.Table.from_arrays([parr], names=['one'])
f = io.BytesIO()
_write_table(table, f)
def _simple_table_roundtrip(table, **write_kwargs):
stream = pa.BufferOutputStream()
_write_table(table, stream, **write_kwargs)
buf = stream.getvalue()
return _read_table(buf)
@pytest.mark.large_memory
def test_byte_array_exactly_2gb():
# Test edge case reported in ARROW-3762
val = b'x' * (1 << 10)
base = pa.array([val] * ((1 << 21) - 1))
cases = [
[b'x' * 1023], # 2^31 - 1
[b'x' * 1024], # 2^31
[b'x' * 1025] # 2^31 + 1
]
for case in cases:
values = pa.chunked_array([base, pa.array(case)])
t = pa.table([values], names=['f0'])
result = _simple_table_roundtrip(t, use_dictionary=False)
assert t.equals(result)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_binary_array_overflow_to_chunked():
# ARROW-3762
# 2^31 + 1 bytes
values = [b'x'] + [
b'x' * (1 << 20)
] * 2 * (1 << 10)
df = pd.DataFrame({'byte_col': values})
tbl = pa.Table.from_pandas(df, preserve_index=False)
read_tbl = _simple_table_roundtrip(tbl)
col0_data = read_tbl[0]
assert isinstance(col0_data, pa.ChunkedArray)
# Split up into 2GB chunks
assert col0_data.num_chunks == 2
assert tbl.equals(read_tbl)
@pytest.mark.pandas
@pytest.mark.large_memory
def test_list_of_binary_large_cell():
# ARROW-4688
data = []
# TODO(wesm): handle chunked children
# 2^31 - 1 bytes in a single cell
# data.append([b'x' * (1 << 20)] * 2047 + [b'x' * ((1 << 20) - 1)])
# A little under 2GB in cell each containing approximately 10MB each
data.extend([[b'x' * 1000000] * 10] * 214)
arr = pa.array(data)
table = pa.Table.from_arrays([arr], ['chunky_cells'])
read_table = _simple_table_roundtrip(table)
assert table.equals(read_table)
@pytest.mark.pandas
def test_index_column_name_duplicate(tempdir):
data = {
'close': {
pd.Timestamp('2017-06-30 01:31:00'): 154.99958999999998,
pd.Timestamp('2017-06-30 01:32:00'): 154.99958999999998,
},
'time': {
pd.Timestamp('2017-06-30 01:31:00'): pd.Timestamp(
'2017-06-30 01:31:00'
),
pd.Timestamp('2017-06-30 01:32:00'): pd.Timestamp(
'2017-06-30 01:32:00'
),
}
}
path = str(tempdir / 'data.parquet')
dfx = pd.DataFrame(data).set_index('time', drop=False)
tdfx = pa.Table.from_pandas(dfx)
_write_table(tdfx, path)
arrow_table = _read_table(path)
result_df = arrow_table.to_pandas()
tm.assert_frame_equal(result_df, dfx)
@pytest.mark.pandas
def test_parquet_nested_convenience(tempdir):
# ARROW-1684
df = pd.DataFrame({
'a': [[1, 2, 3], None, [4, 5], []],
'b': [[1.], None, None, [6., 7.]],
})
path = str(tempdir / 'nested_convenience.parquet')
table = pa.Table.from_pandas(df, preserve_index=False)
_write_table(table, path)
read = pq.read_table(path, columns=['a'])
tm.assert_frame_equal(read.to_pandas(), df[['a']])
read = pq.read_table(path, columns=['a', 'b'])
tm.assert_frame_equal(read.to_pandas(), df)
@pytest.mark.pandas
def test_backwards_compatible_index_naming(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=None, header=0, engine='python')
table = _read_table(datadir / 'v0.7.1.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string), sep=r'\s{2,}',
index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
table = _read_table(datadir / 'v0.7.1.all-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_index_multi_level_some_named(datadir):
expected_string = b"""\
carat cut color clarity depth table price x y z
0.23 Ideal E SI2 61.5 55.0 326 3.95 3.98 2.43
0.21 Premium E SI1 59.8 61.0 326 3.89 3.84 2.31
0.23 Good E VS1 56.9 65.0 327 4.05 4.07 2.31
0.29 Premium I VS2 62.4 58.0 334 4.20 4.23 2.63
0.31 Good J SI2 63.3 58.0 335 4.34 4.35 2.75
0.24 Very Good J VVS2 62.8 57.0 336 3.94 3.96 2.48
0.24 Very Good I VVS1 62.3 57.0 336 3.95 3.98 2.47
0.26 Very Good H SI1 61.9 55.0 337 4.07 4.11 2.53
0.22 Fair E VS2 65.1 61.0 337 3.87 3.78 2.49
0.23 Very Good H VS1 59.4 61.0 338 4.00 4.05 2.39"""
expected = pd.read_csv(
io.BytesIO(expected_string),
sep=r'\s{2,}', index_col=['cut', 'color', 'clarity'],
header=0, engine='python'
).sort_index()
expected.index = expected.index.set_names(['cut', None, 'clarity'])
table = _read_table(datadir / 'v0.7.1.some-named-index.parquet')
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_backwards_compatible_column_metadata_handling(datadir):
expected = pd.DataFrame(
{'a': [1, 2, 3], 'b': [.1, .2, .3],
'c': pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')})
expected.index = pd.MultiIndex.from_arrays(
[['a', 'b', 'c'],
pd.date_range("2017-01-01", periods=3, tz='Europe/Brussels')],
names=['index', None])
path = datadir / 'v0.7.1.column-metadata-handling.parquet'
table = _read_table(path)
result = table.to_pandas()
tm.assert_frame_equal(result, expected)
table = _read_table(path, columns=['a'])
result = table.to_pandas()
tm.assert_frame_equal(result, expected[['a']].reset_index(drop=True))
def _make_dataset_for_pickling(tempdir, N=100):
path = tempdir / 'data.parquet'
fs = LocalFileSystem.get_instance()
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
table = pa.Table.from_pandas(df)
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
metadata_path = tempdir / '_metadata'
with fs.open(metadata_path, 'wb') as f:
pq.write_metadata(table.schema, f)
dataset = pq.ParquetDataset(tempdir, filesystem=fs)
assert dataset.metadata_path == str(metadata_path)
return dataset
@pytest.mark.pandas
@pytest.mark.parametrize('pickler', [
pytest.param(pickle, id='builtin'),
pytest.param(pytest.importorskip('cloudpickle'), id='cloudpickle')
])
def test_pickle_dataset(tempdir, datadir, pickler):
def is_pickleable(obj):
return obj == pickler.loads(pickler.dumps(obj))
dataset = _make_dataset_for_pickling(tempdir)
assert is_pickleable(dataset)
assert is_pickleable(dataset.metadata)
assert is_pickleable(dataset.metadata.schema)
assert len(dataset.metadata.schema)
for column in dataset.metadata.schema:
assert is_pickleable(column)
for piece in dataset.pieces:
assert is_pickleable(piece)
metadata = piece.get_metadata()
assert metadata.num_row_groups
for i in range(metadata.num_row_groups):
assert is_pickleable(metadata.row_group(i))
@pytest.mark.pandas
def test_decimal_roundtrip(tempdir):
num_values = 10
columns = {}
for precision in range(1, 39):
for scale in range(0, precision + 1):
with util.random_seed(0):
random_decimal_values = [
util.randdecimal(precision, scale)
for _ in range(num_values)
]
column_name = ('dec_precision_{:d}_scale_{:d}'
.format(precision, scale))
columns[column_name] = random_decimal_values
expected = pd.DataFrame(columns)
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
table = pa.Table.from_pandas(expected)
_write_table(table, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
@pytest.mark.xfail(
raises=pa.ArrowException, reason='Parquet does not support negative scale'
)
def test_decimal_roundtrip_negative_scale(tempdir):
expected = pd.DataFrame({'decimal_num': [decimal.Decimal('1.23E4')]})
filename = tempdir / 'decimals.parquet'
string_filename = str(filename)
t = pa.Table.from_pandas(expected)
_write_table(t, string_filename)
result_table = _read_table(string_filename)
result = result_table.to_pandas()
tm.assert_frame_equal(result, expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
with pq.ParquetWriter(out, arrow_table.schema, version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_parquet_writer_context_obj_with_exception(tempdir):
df = _test_dataframe(100)
df['unique_id'] = 0
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
out = pa.BufferOutputStream()
error_text = 'Artificial Error'
try:
with pq.ParquetWriter(out,
arrow_table.schema,
version='2.0') as writer:
frames = []
for i in range(10):
df['unique_id'] = i
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
writer.write_table(arrow_table)
frames.append(df.copy())
if i == 5:
raise ValueError(error_text)
except Exception as e:
assert str(e) == error_text
buf = out.getvalue()
result = _read_table(pa.BufferReader(buf))
expected = pd.concat(frames, ignore_index=True)
tm.assert_frame_equal(result.to_pandas(), expected)
@pytest.mark.pandas
def test_zlib_compression_bug():
# ARROW-3514: "zlib deflate failed, output buffer too small"
table = pa.Table.from_arrays([pa.array(['abc', 'def'])], ['some_col'])
f = io.BytesIO()
pq.write_table(table, f, compression='gzip')
f.seek(0)
roundtrip = pq.read_table(f)
tm.assert_frame_equal(roundtrip.to_pandas(), table.to_pandas())
@pytest.mark.pandas
def test_merging_parquet_tables_with_different_pandas_metadata(tempdir):
# ARROW-3728: Merging Parquet Files - Pandas Meta in Schema Mismatch
schema = pa.schema([
pa.field('int', pa.int16()),
pa.field('float', pa.float32()),
pa.field('string', pa.string())
])
df1 = pd.DataFrame({
'int': np.arange(3, dtype=np.uint8),
'float': np.arange(3, dtype=np.float32),
'string': ['ABBA', 'EDDA', 'ACDC']
})
df2 = pd.DataFrame({
'int': [4, 5],
'float': [1.1, None],
'string': [None, None]
})
table1 = pa.Table.from_pandas(df1, schema=schema, preserve_index=False)
table2 = pa.Table.from_pandas(df2, schema=schema, preserve_index=False)
assert not table1.schema.equals(table2.schema)
assert table1.schema.equals(table2.schema, check_metadata=False)
writer = pq.ParquetWriter(tempdir / 'merged.parquet', schema=schema)
writer.write_table(table1)
writer.write_table(table2)
def test_empty_row_groups(tempdir):
# ARROW-3020
table = pa.Table.from_arrays([pa.array([], type='int32')], ['f0'])
path = tempdir / 'empty_row_groups.parquet'
num_groups = 3
with pq.ParquetWriter(path, table.schema) as writer:
for i in range(num_groups):
writer.write_table(table)
reader = pq.ParquetFile(path)
assert reader.metadata.num_row_groups == num_groups
for i in range(num_groups):
assert reader.read_row_group(i).equals(table)
@pytest.mark.pandas
def test_parquet_writer_with_caller_provided_filesystem():
out = pa.BufferOutputStream()
class CustomFS(FileSystem):
def __init__(self):
self.path = None
self.mode = None
def open(self, path, mode='rb'):
self.path = path
self.mode = mode
return out
fs = CustomFS()
fname = 'expected_fname.parquet'
df = _test_dataframe(100)
table = pa.Table.from_pandas(df, preserve_index=False)
with pq.ParquetWriter(fname, table.schema, filesystem=fs, version='2.0') \
as writer:
writer.write_table(table)
assert fs.path == fname
assert fs.mode == 'wb'
assert out.closed
buf = out.getvalue()
table_read = _read_table(pa.BufferReader(buf))
df_read = table_read.to_pandas()
tm.assert_frame_equal(df_read, df)
# Should raise ValueError when filesystem is passed with file-like object
with pytest.raises(ValueError) as err_info:
pq.ParquetWriter(pa.BufferOutputStream(), table.schema, filesystem=fs)
expected_msg = ("filesystem passed but where is file-like, so"
" there is nothing to open with filesystem.")
assert str(err_info) == expected_msg
def test_writing_empty_lists():
# ARROW-2591: [Python] Segmentation fault issue in pq.write_table
arr1 = pa.array([[], []], pa.list_(pa.int32()))
table = pa.Table.from_arrays([arr1], ['list(int32)'])
_check_roundtrip(table)
def test_write_nested_zero_length_array_chunk_failure():
# Bug report in ARROW-3792
cols = OrderedDict(
int32=pa.int32(),
list_string=pa.list_(pa.string())
)
data = [[], [OrderedDict(int32=1, list_string=('G',)), ]]
# This produces a table with a column like
# <Column name='list_string' type=ListType(list<item: string>)>
# [
# [],
# [
# [
# "G"
# ]
# ]
# ]
#
# Each column is a ChunkedArray with 2 elements
my_arrays = [pa.array(batch, type=pa.struct(cols)).flatten()
for batch in data]
my_batches = [pa.RecordBatch.from_arrays(batch, schema=pa.schema(cols))
for batch in my_arrays]
tbl = pa.Table.from_batches(my_batches, pa.schema(cols))
_check_roundtrip(tbl)
@pytest.mark.pandas
def test_partitioned_dataset(tempdir):
# ARROW-3208: Segmentation fault when reading a Parquet partitioned dataset
# to a Parquet file
path = tempdir / "ARROW-3208"
df = pd.DataFrame({
'one': [-1, 10, 2.5, 100, 1000, 1, 29.2],
'two': [-1, 10, 2, 100, 1000, 1, 11],
'three': [0, 0, 0, 0, 0, 0, 0]
})
table = pa.Table.from_pandas(df)
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'])
table = pq.ParquetDataset(path).read()
pq.write_table(table, path / "output.parquet")
def test_read_column_invalid_index():
table = pa.table([pa.array([4, 5]), pa.array(["foo", "bar"])],
names=['ints', 'strs'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
f = pq.ParquetFile(bio.getvalue())
assert f.reader.read_column(0).to_pylist() == [4, 5]
assert f.reader.read_column(1).to_pylist() == ["foo", "bar"]
for index in (-1, 2):
with pytest.raises((ValueError, IndexError)):
f.reader.read_column(index)
@pytest.mark.pandas
def test_direct_read_dictionary():
# ARROW-3325
repeats = 10
nunique = 5
data = [
[tm.rands(10) for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0'])
# Compute dictionary-encoded subfield
expected = pa.table([table[0].dictionary_encode()], names=['f0'])
assert result.equals(expected)
@pytest.mark.pandas
def test_dataset_read_dictionary(tempdir):
path = tempdir / "ARROW-3325-dataset"
t1 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
t2 = pa.table([[tm.rands(10) for i in range(5)] * 10], names=['f0'])
pq.write_to_dataset(t1, root_path=str(path))
pq.write_to_dataset(t2, root_path=str(path))
result = pq.ParquetDataset(path, read_dictionary=['f0']).read()
# The order of the chunks is non-deterministic
ex_chunks = [t1[0].chunk(0).dictionary_encode(),
t2[0].chunk(0).dictionary_encode()]
assert result[0].num_chunks == 2
c0, c1 = result[0].chunk(0), result[0].chunk(1)
if c0.equals(ex_chunks[0]):
assert c1.equals(ex_chunks[1])
else:
assert c0.equals(ex_chunks[1])
assert c1.equals(ex_chunks[0])
@pytest.mark.pandas
def test_direct_read_dictionary_subfield():
repeats = 10
nunique = 5
data = [
[[tm.rands(10)] for i in range(nunique)] * repeats,
]
table = pa.table(data, names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents),
read_dictionary=['f0.list.item'])
arr = pa.array(data[0])
values_as_dict = arr.values.dictionary_encode()
inner_indices = values_as_dict.indices.cast('int32')
new_values = pa.DictionaryArray.from_arrays(inner_indices,
values_as_dict.dictionary)
offsets = pa.array(range(51), type='int32')
expected_arr = pa.ListArray.from_arrays(offsets, new_values)
expected = pa.table([expected_arr], names=['f0'])
assert result.equals(expected)
assert result[0].num_chunks == 1
@pytest.mark.pandas
def test_dataset_metadata(tempdir):
path = tempdir / "ARROW-1983-dataset"
# create and write a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
metadata_list = []
pq.write_to_dataset(table, root_path=str(path),
partition_cols=['one', 'two'],
metadata_collector=metadata_list)
# open the dataset and collect metadata from pieces:
dataset = pq.ParquetDataset(path)
metadata_list2 = [p.get_metadata() for p in dataset.pieces]
# compare metadata list content:
assert len(metadata_list) == len(metadata_list2)
for md, md2 in zip(metadata_list, metadata_list2):
d = md.to_dict()
d2 = md2.to_dict()
# serialized_size is initialized in the reader:
assert d.pop('serialized_size') == 0
assert d2.pop('serialized_size') > 0
assert d == d2
def test_parquet_file_too_small(tempdir):
path = str(tempdir / "test.parquet")
with pytest.raises(pa.ArrowIOError,
match='size is 0 bytes'):
with open(path, 'wb') as f:
pass
pq.read_table(path)
with pytest.raises(pa.ArrowIOError,
match='size is 4 bytes'):
with open(path, 'wb') as f:
f.write(b'ffff')
pq.read_table(path)
@pytest.mark.pandas
def test_categorical_index_survives_roundtrip():
# ARROW-3652, addressed by ARROW-3246
df = pd.DataFrame([['a', 'b'], ['c', 'd']], columns=['c1', 'c2'])
df['c1'] = df['c1'].astype('category')
df = df.set_index(['c1'])
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
ref_df = pq.read_pandas(bos.getvalue()).to_pandas()
assert isinstance(ref_df.index, pd.CategoricalIndex)
assert ref_df.index.equals(df.index)
@pytest.mark.pandas
def test_categorical_order_survives_roundtrip():
# ARROW-6302
df = pd.DataFrame({"a": pd.Categorical(
["a", "b", "c", "a"], categories=["b", "c", "d"], ordered=True)})
table = pa.Table.from_pandas(df)
bos = pa.BufferOutputStream()
pq.write_table(table, bos)
contents = bos.getvalue()
result = pq.read_pandas(contents).to_pandas()
tm.assert_frame_equal(result, df)
def test_dictionary_array_automatically_read():
# ARROW-3246
# Make a large dictionary, a little over 4MB of data
dict_length = 4000
dict_values = pa.array([('x' * 1000 + '_{}'.format(i))
for i in range(dict_length)])
num_chunks = 10
chunk_size = 100
chunks = []
for i in range(num_chunks):
indices = np.random.randint(0, dict_length,
size=chunk_size).astype(np.int32)
chunks.append(pa.DictionaryArray.from_arrays(pa.array(indices),
dict_values))
table = pa.table([pa.chunked_array(chunks)], names=['f0'])
bio = pa.BufferOutputStream()
pq.write_table(table, bio)
contents = bio.getvalue()
result = pq.read_table(pa.BufferReader(contents))
assert result.equals(table)
# The only key in the metadata was the Arrow schema key
assert result.schema.metadata is None
@pytest.mark.pandas
def test_pandas_categorical_na_type_row_groups():
# ARROW-5085
df = pd.DataFrame({"col": [None] * 100, "int": [1.0] * 100})
df_category = df.astype({"col": "category", "int": "category"})
table = pa.Table.from_pandas(df)
table_cat = pa.Table.from_pandas(df_category)
buf = pa.BufferOutputStream()
# it works
pq.write_table(table_cat, buf, version="2.0", chunk_size=10)
result = pq.read_table(buf.getvalue())
# Result is non-categorical
assert result[0].equals(table[0])
assert result[1].equals(table[1])
@pytest.mark.pandas
def test_pandas_categorical_roundtrip():
# ARROW-5480, this was enabled by ARROW-3246
# Have one of the categories unobserved and include a null (-1)
codes = np.array([2, 0, 0, 2, 0, -1, 2], dtype='int32')
categories = ['foo', 'bar', 'baz']
df = pd.DataFrame({'x': pd.Categorical.from_codes(
codes, categories=categories)})
buf = pa.BufferOutputStream()
pq.write_table(pa.table(df), buf)
result = pq.read_table(buf.getvalue()).to_pandas()
assert result.x.dtype == 'category'
assert (result.x.cat.categories == categories).all()
tm.assert_frame_equal(result, df)
@pytest.mark.pandas
def test_multi_dataset_metadata(tempdir):
filenames = ["ARROW-1983-dataset.0", "ARROW-1983-dataset.1"]
metapath = str(tempdir / "_metadata")
# create a test dataset
df = pd.DataFrame({
'one': [1, 2, 3],
'two': [-1, -2, -3],
'three': [[1, 2], [2, 3], [3, 4]],
})
table = pa.Table.from_pandas(df)
# write dataset twice and collect/merge metadata
_meta = None
for filename in filenames:
meta = []
pq.write_table(table, str(tempdir / filename),
metadata_collector=meta)
meta[0].set_file_path(filename)
if _meta is None:
_meta = meta[0]
else:
_meta.append_row_groups(meta[0])
# Write merged metadata-only file
with open(metapath, "wb") as f:
_meta.write_metadata_file(f)
# Read back the metadata
meta = pq.read_metadata(metapath)
md = meta.to_dict()
_md = _meta.to_dict()
for key in _md:
if key != 'serialized_size':
assert _md[key] == md[key]
assert _md['num_columns'] == 3
assert _md['num_rows'] == 6
assert _md['num_row_groups'] == 2
assert _md['serialized_size'] == 0
assert md['serialized_size'] > 0
@pytest.mark.pandas
def test_filter_before_validate_schema(tempdir):
# ARROW-4076 apply filter before schema validation
# to avoid checking unneeded schemas
# create partitioned dataset with mismatching schemas which would
# otherwise raise if first validation all schemas
dir1 = tempdir / 'A=0'
dir1.mkdir()
table1 = pa.Table.from_pandas(pd.DataFrame({'B': [1, 2, 3]}))
pq.write_table(table1, dir1 / 'data.parquet')
dir2 = tempdir / 'A=1'
dir2.mkdir()
table2 = pa.Table.from_pandas(pd.DataFrame({'B': ['a', 'b', 'c']}))
pq.write_table(table2, dir2 / 'data.parquet')
# read single file using filter
table = pq.read_table(tempdir, filters=[[('A', '==', 0)]])
assert table.column('B').equals(pa.chunked_array([[1, 2, 3]]))
@pytest.mark.pandas
@pytest.mark.fastparquet
@pytest.mark.filterwarnings("ignore:RangeIndex:DeprecationWarning")
def test_fastparquet_cross_compatibility(tempdir):
fp = pytest.importorskip('fastparquet')
df = pd.DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(4.0, 7.0, dtype="float64"),
"d": [True, False, True],
"e": pd.date_range("20130101", periods=3),
"f": pd.Categorical(["a", "b", "a"]),
# fastparquet writes list as BYTE_ARRAY JSON, so no roundtrip
# "g": [[1, 2], None, [1, 2, 3]],
}
)
table = pa.table(df)
# Arrow -> fastparquet
file_arrow = str(tempdir / "cross_compat_arrow.parquet")
pq.write_table(table, file_arrow, compression=None)
fp_file = fp.ParquetFile(file_arrow)
df_fp = fp_file.to_pandas()
tm.assert_frame_equal(df, df_fp)
# Fastparquet -> arrow
file_fastparquet = str(tempdir / "cross_compat_fastparquet.parquet")
fp.write(file_fastparquet, df)
table_fp = pq.read_pandas(file_fastparquet)
# for fastparquet written file, categoricals comes back as strings
# (no arrow schema in parquet metadata)
df['f'] = df['f'].astype(object)
tm.assert_frame_equal(table_fp.to_pandas(), df)
| true | true |
1c32496c17e9581c86d34b7887b8f58526c8d8c6 | 8,188 | py | Python | txaws/client/discover/tests/test_command.py | antisvin/txAWS | 752fb1ac243b56f8eef8b5773faf7dc475e0a2d3 | [
"MIT"
] | null | null | null | txaws/client/discover/tests/test_command.py | antisvin/txAWS | 752fb1ac243b56f8eef8b5773faf7dc475e0a2d3 | [
"MIT"
] | null | null | null | txaws/client/discover/tests/test_command.py | antisvin/txAWS | 752fb1ac243b56f8eef8b5773faf7dc475e0a2d3 | [
"MIT"
] | null | null | null | # Copyright (C) 2010 Jamu Kakar <jkakar@kakar.ca>
# Licenced under the txaws licence available at /LICENSE in the txaws source.
"""Unit tests for L{Command}."""
from cStringIO import StringIO
from twisted.internet.defer import succeed, fail
from twisted.web.error import Error
from txaws.client.discover.command import Command
from txaws.ec2.client import Query
from txaws.testing.base import TXAWSTestCase
class FakeHTTPClient(object):
def __init__(self, status, url):
self.status = status
self.url = url
class CommandTestCase(TXAWSTestCase):
def prepare_command(self, response, status, action, parameters={},
get_page=None, error=None):
"""Prepare a L{Command} for testing."""
self.url = None
self.method = None
self.error = error
self.response = response
self.status = status
self.output = StringIO()
self.query = None
if get_page is None:
get_page = self.get_page
self.get_page_function = get_page
self.command = Command("key", "secret", "endpoint", action, parameters,
self.output, self.query_factory)
def query_factory(self, other_params=None, time_tuple=None,
api_version=None, *args, **kwargs):
"""
Create a query with a hard-coded time to generate a fake response.
"""
time_tuple = (2010, 6, 4, 23, 40, 0, 0, 0, 0)
self.query = Query(other_params, time_tuple, api_version,
*args, **kwargs)
self.query.get_page = self.get_page_function
return self.query
def get_page(self, url, method=None, timeout=0):
"""Fake C{get_page} method simulates a successful request."""
self.url = url
self.method = method
self.query.client = FakeHTTPClient(self.status, url)
return succeed(self.response)
def get_error_page(self, url, method=None, timeout=0):
"""Fake C{get_page} method simulates an error."""
self.url = url
self.method = method
self.query.client = FakeHTTPClient(self.status, url)
return fail(self.error or Exception(self.response))
def test_run(self):
"""
When a method is invoked its HTTP status code and response text is
written to the output stream.
"""
self.prepare_command("The response", 200, "DescribeRegions")
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&"
"Signature=7fyxNidMkL%2B85udGOxqm%2BgM2o1gLyeLG2a0UOmfBOXQ%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 200\n"
"\n"
"The response\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addCallback(check)
return deferred
def test_run_with_parameters(self):
"""Extra method parameters are included in the request."""
self.prepare_command("The response", 200, "DescribeRegions",
{"RegionName.0": "us-west-1"})
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=FL4JjDKbWdg531q1KKUPild%2BvyqspA5wxSmOeWXWsJI%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 200\n"
"\n"
"The response\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addCallback(check)
return deferred
def test_run_with_error(self):
"""
If an error message is returned by the backend cloud, it will be
written to the output stream.
"""
self.prepare_command("The error response", 400, "DescribeRegions",
{"RegionName.0": "us-west-1"},
self.get_error_page)
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=FL4JjDKbWdg531q1KKUPild%2BvyqspA5wxSmOeWXWsJI%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 400\n"
"\n"
"The error response\n" % url,
self.output.getvalue())
deferred = self.command.run()
return self.assertFailure(deferred, Exception).addErrback(check)
def test_run_with_error_strips_non_response_text(self):
"""
The builtin L{AWSError} exception adds 'Error message: ' to beginning
of the text retuned by the backend cloud. This is stripped when the
message is written to the output stream.
"""
self.prepare_command("Error Message: The error response", 400,
"DescribeRegions", {"RegionName.0": "us-west-1"},
self.get_error_page)
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=P6C7cQJ7j93uIJyv2dTbpQG3EI7ArGBJT%2FzVH%2BDFhyY%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2009-11-30")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 400\n"
"\n"
"The error response\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addErrback(check)
return deferred
def test_run_with_error_payload(self):
"""
If the returned HTTP error contains a payload, it's printed out.
"""
self.prepare_command("Bad Request", 400,
"DescribeRegions", {"RegionName.0": "us-west-1"},
self.get_error_page, Error(400, None, "bar"))
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=FL4JjDKbWdg531q1KKUPild%2BvyqspA5wxSmOeWXWsJI%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 400\n"
"\n"
"400 Bad Request\n"
"\n"
"bar\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addCallback(check)
return deferred
| 39.941463 | 79 | 0.541402 |
from cStringIO import StringIO
from twisted.internet.defer import succeed, fail
from twisted.web.error import Error
from txaws.client.discover.command import Command
from txaws.ec2.client import Query
from txaws.testing.base import TXAWSTestCase
class FakeHTTPClient(object):
def __init__(self, status, url):
self.status = status
self.url = url
class CommandTestCase(TXAWSTestCase):
def prepare_command(self, response, status, action, parameters={},
get_page=None, error=None):
self.url = None
self.method = None
self.error = error
self.response = response
self.status = status
self.output = StringIO()
self.query = None
if get_page is None:
get_page = self.get_page
self.get_page_function = get_page
self.command = Command("key", "secret", "endpoint", action, parameters,
self.output, self.query_factory)
def query_factory(self, other_params=None, time_tuple=None,
api_version=None, *args, **kwargs):
time_tuple = (2010, 6, 4, 23, 40, 0, 0, 0, 0)
self.query = Query(other_params, time_tuple, api_version,
*args, **kwargs)
self.query.get_page = self.get_page_function
return self.query
def get_page(self, url, method=None, timeout=0):
self.url = url
self.method = method
self.query.client = FakeHTTPClient(self.status, url)
return succeed(self.response)
def get_error_page(self, url, method=None, timeout=0):
self.url = url
self.method = method
self.query.client = FakeHTTPClient(self.status, url)
return fail(self.error or Exception(self.response))
def test_run(self):
self.prepare_command("The response", 200, "DescribeRegions")
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&"
"Signature=7fyxNidMkL%2B85udGOxqm%2BgM2o1gLyeLG2a0UOmfBOXQ%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 200\n"
"\n"
"The response\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addCallback(check)
return deferred
def test_run_with_parameters(self):
self.prepare_command("The response", 200, "DescribeRegions",
{"RegionName.0": "us-west-1"})
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=FL4JjDKbWdg531q1KKUPild%2BvyqspA5wxSmOeWXWsJI%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 200\n"
"\n"
"The response\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addCallback(check)
return deferred
def test_run_with_error(self):
self.prepare_command("The error response", 400, "DescribeRegions",
{"RegionName.0": "us-west-1"},
self.get_error_page)
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=FL4JjDKbWdg531q1KKUPild%2BvyqspA5wxSmOeWXWsJI%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 400\n"
"\n"
"The error response\n" % url,
self.output.getvalue())
deferred = self.command.run()
return self.assertFailure(deferred, Exception).addErrback(check)
def test_run_with_error_strips_non_response_text(self):
self.prepare_command("Error Message: The error response", 400,
"DescribeRegions", {"RegionName.0": "us-west-1"},
self.get_error_page)
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=P6C7cQJ7j93uIJyv2dTbpQG3EI7ArGBJT%2FzVH%2BDFhyY%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2009-11-30")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 400\n"
"\n"
"The error response\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addErrback(check)
return deferred
def test_run_with_error_payload(self):
self.prepare_command("Bad Request", 400,
"DescribeRegions", {"RegionName.0": "us-west-1"},
self.get_error_page, Error(400, None, "bar"))
def check(result):
url = (
"http://endpoint?AWSAccessKeyId=key&"
"Action=DescribeRegions&RegionName.0=us-west-1&"
"Signature=FL4JjDKbWdg531q1KKUPild%2BvyqspA5wxSmOeWXWsJI%3D&"
"SignatureMethod=HmacSHA256&SignatureVersion=2&"
"Timestamp=2010-06-04T23%3A40%3A00Z&Version=2012-08-15")
self.assertEqual("GET", self.method)
self.assertEqual(url, self.url)
self.assertEqual("URL: %s\n"
"\n"
"HTTP status code: 400\n"
"\n"
"400 Bad Request\n"
"\n"
"bar\n" % url,
self.output.getvalue())
deferred = self.command.run()
deferred.addCallback(check)
return deferred
| true | true |
1c32498b312c0dafb9282c89fa7abd0d5bad2428 | 5,246 | py | Python | src/wip/gesture_sensor.py | dooley-ch/microbit-grove | e25213de74d982b8ab49412e6f8b2dbe205ca932 | [
"MIT"
] | null | null | null | src/wip/gesture_sensor.py | dooley-ch/microbit-grove | e25213de74d982b8ab49412e6f8b2dbe205ca932 | [
"MIT"
] | null | null | null | src/wip/gesture_sensor.py | dooley-ch/microbit-grove | e25213de74d982b8ab49412e6f8b2dbe205ca932 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------------------
# Copyright James A. Dooley 2021.
#
# Distributed under the MIT License.
# (See accompanying file license.md file or copy at http://opensource.org/licenses/MIT)
#
# ------------------------------------------------------------------------------------------
from microbit import sleep, i2c, display, button_b
_DEFAULT_ADDRESS = 0x73
#Register Bank select
_PAJ_BANK_SELECT = 0xEF
#Register Bank 0
_PAJ_SUSPEND = 0x03
_PAJ_INT_FLAG1_MASK = 0x41
_PAJ_INT_FLAG2_MASK = 0x42
_PAJ_INT_FLAG1 = 0x43
_PAJ_INT_FLAG2 = 0x44
_PAJ_STATE = 0x45
_PAJ_PS_HIGH_THRESHOLD = 0x69
_PAJ_PS_LOW_THRESHOLD = 0x6A
_PAJ_PS_APPROACH_STATE = 0x6B
_PAJ_PS_DATA = 0x6C
_PAJ_OBJ_BRIGHTNESS = 0xB0
_PAJ_OBJ_SIZE_L = 0xB1
_PAJ_OBJ_SIZE_H = 0xB2
#Register Bank 1
_PAJ_PS_GAIN = 0x44
_PAJ_IDLE_S1_STEP_L = 0x67
_PAJ_IDLE_S1_STEP_H = 0x68
_PAJ_IDLE_S2_STEP_L = 0x69
_PAJ_IDLE_S2_STEP_H = 0x6A
_PAJ_OPTOS1_TIME_L = 0x6B
_PAJ_OPTOS2_TIME_H = 0x6C
_PAJ_S1TOS2_TIME_L = 0x6D
_PAJ_S1TOS2_TIME_H = 0x6E
_PAJ_EN = 0x72
#Gesture detection interrupt flag
_PAJ_UP = 0x01
_PAJ_DOWN = 0x02
_PAJ_LEFT = 0x04
_PAJ_RIGHT = 0x08
_PAJ_FORWARD = 0x10
_PAJ_BACKWARD = 0x20
_PAJ_CLOCKWISE = 0x40
_PAJ_COUNT_CLOCKWISE = 0x80
_PAJ_WAVE = 0x100
_INITIALIZE_REGISTER = (
(0xEF,0x00),
(0x37,0x07),
(0x38,0x17),
(0x39,0x06),
(0x41,0x00),
(0x42,0x00),
(0x46,0x2D),
(0x47,0x0F),
(0x48,0x3C),
(0x49,0x00),
(0x4A,0x1E),
(0x4C,0x20),
(0x51,0x10),
(0x5E,0x10),
(0x60,0x27),
(0x80,0x42),
(0x81,0x44),
(0x82,0x04),
(0x8B,0x01),
(0x90,0x06),
(0x95,0x0A),
(0x96,0x0C),
(0x97,0x05),
(0x9A,0x14),
(0x9C,0x3F),
(0xA5,0x19),
(0xCC,0x19),
(0xCD,0x0B),
(0xCE,0x13),
(0xCF,0x64),
(0xD0,0x21),
(0xEF,0x01),
(0x02,0x0F),
(0x03,0x10),
(0x04,0x02),
(0x25,0x01),
(0x27,0x39),
(0x28,0x7F),
(0x29,0x08),
(0x3E,0xFF),
(0x5E,0x3D),
(0x65,0x96),
(0x67,0x97),
(0x69,0xCD),
(0x6A,0x01),
(0x6D,0x2C),
(0x6E,0x01),
(0x72,0x01),
(0x73,0x35),
(0x74,0x00),
(0x77,0x01),
)
_INITIALIZE_GESTURE = (
(0xEF,0x00),
(0x41,0x00),
(0x42,0x00),
(0xEF,0x00),
(0x48,0x3C),
(0x49,0x00),
(0x51,0x10),
(0x83,0x20),
(0x9F,0xF9),
(0xEF,0x01),
(0x01,0x1E),
(0x02,0x0F),
(0x03,0x10),
(0x04,0x02),
(0x41,0x40),
(0x43,0x30),
(0x65,0x96),
(0x66,0x00),
(0x67,0x97),
(0x68,0x01),
(0x69,0xCD),
(0x6A,0x01),
(0x6B,0xB0),
(0x6C,0x04),
(0x6D,0x2C),
(0x6E,0x01),
(0x74,0x00),
(0xEF,0x00),
(0x41,0xFF),
(0x42,0x01),
)
class GroveGestureSensor:
def __init__(self):
self._device_address = _DEFAULT_ADDRESS
data = self._read_register(0x00, 1)[0]
assert data == 0x20, 'Sensor failure'
for i in range(len(_INITIALIZE_REGISTER)):
self._write_register(_INITIALIZE_REGISTER[i][0], _INITIALIZE_REGISTER[i][1])
self._write_register(_PAJ_BANK_SELECT, 0)
for i in range(len(_INITIALIZE_GESTURE)):
self._write_register(_INITIALIZE_GESTURE[i][0], _INITIALIZE_GESTURE[i][1])
self._gesture = None
def _write_register(self, address, data):
i2c.write(self._device_address, bytearray([address, data]))
def _read_register(self, address, num_bytes):
i2c.write(self._device_address, bytearray([address]))
return i2c.read(_DEFAULT_ADDRESS, num_bytes)
def _read_device(self):
lsb = self._read_register(_PAJ_INT_FLAG1, 1)[0]
msb = self._read_register(_PAJ_INT_FLAG1 + 1, 1)[0]
self._gesture = (msb << 8) + lsb
def get_gesture(self):
self._read_device()
return self._gesture
def get_gesture_string(self):
self._read_device()
if self._gesture == _PAJ_UP:
return 'Up'
elif self._gesture == _PAJ_DOWN:
return 'Down'
elif self._gesture == _PAJ_LEFT:
return 'Left'
elif self._gesture == _PAJ_RIGHT:
return 'Right'
elif self._gesture == _PAJ_FORWARD:
return 'Forward'
elif self._gesture == _PAJ_BACKWARD:
return 'Backward'
elif self._gesture == _PAJ_CLOCKWISE:
return 'Clockwise'
elif self._gesture == _PAJ_COUNT_CLOCKWISE:
return 'AntiClockwise'
elif self._gesture == _PAJ_WAVE:
return 'Wave'
return 'None'
def demo():
sensor = GroveGestureSensor()
display.clear()
display.show('>')
while True:
if button_b.was_pressed():
break
print("Gesture: {}".format(sensor.get_gesture_string()))
sleep(50)
if __name__ == '__main__':
demo()
| 24.514019 | 92 | 0.548227 |
from microbit import sleep, i2c, display, button_b
_DEFAULT_ADDRESS = 0x73
_PAJ_BANK_SELECT = 0xEF
_PAJ_SUSPEND = 0x03
_PAJ_INT_FLAG1_MASK = 0x41
_PAJ_INT_FLAG2_MASK = 0x42
_PAJ_INT_FLAG1 = 0x43
_PAJ_INT_FLAG2 = 0x44
_PAJ_STATE = 0x45
_PAJ_PS_HIGH_THRESHOLD = 0x69
_PAJ_PS_LOW_THRESHOLD = 0x6A
_PAJ_PS_APPROACH_STATE = 0x6B
_PAJ_PS_DATA = 0x6C
_PAJ_OBJ_BRIGHTNESS = 0xB0
_PAJ_OBJ_SIZE_L = 0xB1
_PAJ_OBJ_SIZE_H = 0xB2
_PAJ_PS_GAIN = 0x44
_PAJ_IDLE_S1_STEP_L = 0x67
_PAJ_IDLE_S1_STEP_H = 0x68
_PAJ_IDLE_S2_STEP_L = 0x69
_PAJ_IDLE_S2_STEP_H = 0x6A
_PAJ_OPTOS1_TIME_L = 0x6B
_PAJ_OPTOS2_TIME_H = 0x6C
_PAJ_S1TOS2_TIME_L = 0x6D
_PAJ_S1TOS2_TIME_H = 0x6E
_PAJ_EN = 0x72
_PAJ_UP = 0x01
_PAJ_DOWN = 0x02
_PAJ_LEFT = 0x04
_PAJ_RIGHT = 0x08
_PAJ_FORWARD = 0x10
_PAJ_BACKWARD = 0x20
_PAJ_CLOCKWISE = 0x40
_PAJ_COUNT_CLOCKWISE = 0x80
_PAJ_WAVE = 0x100
_INITIALIZE_REGISTER = (
(0xEF,0x00),
(0x37,0x07),
(0x38,0x17),
(0x39,0x06),
(0x41,0x00),
(0x42,0x00),
(0x46,0x2D),
(0x47,0x0F),
(0x48,0x3C),
(0x49,0x00),
(0x4A,0x1E),
(0x4C,0x20),
(0x51,0x10),
(0x5E,0x10),
(0x60,0x27),
(0x80,0x42),
(0x81,0x44),
(0x82,0x04),
(0x8B,0x01),
(0x90,0x06),
(0x95,0x0A),
(0x96,0x0C),
(0x97,0x05),
(0x9A,0x14),
(0x9C,0x3F),
(0xA5,0x19),
(0xCC,0x19),
(0xCD,0x0B),
(0xCE,0x13),
(0xCF,0x64),
(0xD0,0x21),
(0xEF,0x01),
(0x02,0x0F),
(0x03,0x10),
(0x04,0x02),
(0x25,0x01),
(0x27,0x39),
(0x28,0x7F),
(0x29,0x08),
(0x3E,0xFF),
(0x5E,0x3D),
(0x65,0x96),
(0x67,0x97),
(0x69,0xCD),
(0x6A,0x01),
(0x6D,0x2C),
(0x6E,0x01),
(0x72,0x01),
(0x73,0x35),
(0x74,0x00),
(0x77,0x01),
)
_INITIALIZE_GESTURE = (
(0xEF,0x00),
(0x41,0x00),
(0x42,0x00),
(0xEF,0x00),
(0x48,0x3C),
(0x49,0x00),
(0x51,0x10),
(0x83,0x20),
(0x9F,0xF9),
(0xEF,0x01),
(0x01,0x1E),
(0x02,0x0F),
(0x03,0x10),
(0x04,0x02),
(0x41,0x40),
(0x43,0x30),
(0x65,0x96),
(0x66,0x00),
(0x67,0x97),
(0x68,0x01),
(0x69,0xCD),
(0x6A,0x01),
(0x6B,0xB0),
(0x6C,0x04),
(0x6D,0x2C),
(0x6E,0x01),
(0x74,0x00),
(0xEF,0x00),
(0x41,0xFF),
(0x42,0x01),
)
class GroveGestureSensor:
def __init__(self):
self._device_address = _DEFAULT_ADDRESS
data = self._read_register(0x00, 1)[0]
assert data == 0x20, 'Sensor failure'
for i in range(len(_INITIALIZE_REGISTER)):
self._write_register(_INITIALIZE_REGISTER[i][0], _INITIALIZE_REGISTER[i][1])
self._write_register(_PAJ_BANK_SELECT, 0)
for i in range(len(_INITIALIZE_GESTURE)):
self._write_register(_INITIALIZE_GESTURE[i][0], _INITIALIZE_GESTURE[i][1])
self._gesture = None
def _write_register(self, address, data):
i2c.write(self._device_address, bytearray([address, data]))
def _read_register(self, address, num_bytes):
i2c.write(self._device_address, bytearray([address]))
return i2c.read(_DEFAULT_ADDRESS, num_bytes)
def _read_device(self):
lsb = self._read_register(_PAJ_INT_FLAG1, 1)[0]
msb = self._read_register(_PAJ_INT_FLAG1 + 1, 1)[0]
self._gesture = (msb << 8) + lsb
def get_gesture(self):
self._read_device()
return self._gesture
def get_gesture_string(self):
self._read_device()
if self._gesture == _PAJ_UP:
return 'Up'
elif self._gesture == _PAJ_DOWN:
return 'Down'
elif self._gesture == _PAJ_LEFT:
return 'Left'
elif self._gesture == _PAJ_RIGHT:
return 'Right'
elif self._gesture == _PAJ_FORWARD:
return 'Forward'
elif self._gesture == _PAJ_BACKWARD:
return 'Backward'
elif self._gesture == _PAJ_CLOCKWISE:
return 'Clockwise'
elif self._gesture == _PAJ_COUNT_CLOCKWISE:
return 'AntiClockwise'
elif self._gesture == _PAJ_WAVE:
return 'Wave'
return 'None'
def demo():
sensor = GroveGestureSensor()
display.clear()
display.show('>')
while True:
if button_b.was_pressed():
break
print("Gesture: {}".format(sensor.get_gesture_string()))
sleep(50)
if __name__ == '__main__':
demo()
| true | true |
1c3249f151b296b7c4243a282c5dc5d6d62b2a3c | 3,282 | py | Python | core/controllers/practice_sessions.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | null | null | null | core/controllers/practice_sessions.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | null | null | null | core/controllers/practice_sessions.py | alexewu/oppia | 57c3c660ab7974835ec068d7c7f5ce5b5f1f25ae | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the practice sessions page."""
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import dependency_registry
from core.domain import interaction_registry
from core.domain import obj_services
from core.domain import topic_services
import feconf
import jinja2
class PracticeSessionsPage(base.BaseHandler):
"""Renders the practice sessions page."""
@acl_decorators.can_access_topic_viewer_page
def get(self, topic_name):
"""Handles GET requests."""
if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
raise self.PageNotFoundException
# Topic cannot be None as an exception will be thrown from its decorator
# if so.
topic = topic_services.get_topic_by_name(topic_name)
interaction_ids = feconf.ALLOWED_QUESTION_INTERACTION_IDS
interaction_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
interaction_dependency_ids))
interaction_templates = (
interaction_registry.Registry.get_interaction_html(
interaction_ids))
self.values.update({
'DEFAULT_OBJECT_VALUES': obj_services.get_default_object_values(),
'additional_angular_modules': additional_angular_modules,
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'dependencies_html': jinja2.utils.Markup(dependencies_html),
'topic_name': topic.name,
})
self.render_template('dist/practice-session-page.mainpage.html')
class PracticeSessionsPageDataHandler(base.BaseHandler):
"""Fetches relevant data for the practice sessions page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_topic_viewer_page
def get(self, topic_name):
if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
raise self.PageNotFoundException
# Topic cannot be None as an exception will be thrown from its decorator
# if so.
topic = topic_services.get_topic_by_name(topic_name)
skills_of_topic = topic.get_all_skill_ids()
topic_name = topic.name
self.values.update({
'topic_name': topic.name,
'skill_list': skills_of_topic
})
self.render_json(self.values)
| 36.466667 | 80 | 0.71816 |
from constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import dependency_registry
from core.domain import interaction_registry
from core.domain import obj_services
from core.domain import topic_services
import feconf
import jinja2
class PracticeSessionsPage(base.BaseHandler):
@acl_decorators.can_access_topic_viewer_page
def get(self, topic_name):
if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
raise self.PageNotFoundException
topic = topic_services.get_topic_by_name(topic_name)
interaction_ids = feconf.ALLOWED_QUESTION_INTERACTION_IDS
interaction_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
interaction_dependency_ids))
interaction_templates = (
interaction_registry.Registry.get_interaction_html(
interaction_ids))
self.values.update({
'DEFAULT_OBJECT_VALUES': obj_services.get_default_object_values(),
'additional_angular_modules': additional_angular_modules,
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'dependencies_html': jinja2.utils.Markup(dependencies_html),
'topic_name': topic.name,
})
self.render_template('dist/practice-session-page.mainpage.html')
class PracticeSessionsPageDataHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_topic_viewer_page
def get(self, topic_name):
if not constants.ENABLE_NEW_STRUCTURE_PLAYERS:
raise self.PageNotFoundException
topic = topic_services.get_topic_by_name(topic_name)
skills_of_topic = topic.get_all_skill_ids()
topic_name = topic.name
self.values.update({
'topic_name': topic.name,
'skill_list': skills_of_topic
})
self.render_json(self.values)
| true | true |
1c324a0f97c8047fe482e12b573d06c176522100 | 1,319 | py | Python | reconcile/test/test_aws_iam_keys.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_aws_iam_keys.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | reconcile/test/test_aws_iam_keys.py | bhushanthakur93/qontract-reconcile | fd8eea9f92d353224113955d08e3592864e37df8 | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
import reconcile.aws_iam_keys as integ
class TestSupportFunctions(TestCase):
def test_filter_accounts_with_account_name(self):
a = {"name": "a", "deleteKeys": ["AKIA"]}
b = {"name": "b", "deleteKeys": ["AKIA"]}
accounts = [a, b]
filtered = integ.filter_accounts(accounts, a["name"])
self.assertEqual(filtered, [a])
def test_filter_accounts_without_account_name(self):
a = {"name": "a", "deleteKeys": ["AKIA"]}
b = {"name": "b", "deleteKeys": ["AKIA"]}
accounts = [a, b]
filtered = integ.filter_accounts(accounts, None)
self.assertEqual(filtered, accounts)
def test_filter_accounts_without_delete_keys(self):
a = {"name": "a", "deleteKeys": ["AKIA"]}
b = {"name": "b"}
accounts = [a, b]
filtered = integ.filter_accounts(accounts, None)
self.assertEqual(filtered, [a])
def test_get_keys_to_delete(self):
a = {"name": "a", "deleteKeys": ["k1", "k2"]}
b = {"name": "b", "deleteKeys": None}
c = {"name": "c", "deleteKeys": []}
accounts = [a, b, c]
expected_result = {a["name"]: a["deleteKeys"]}
keys_to_delete = integ.get_keys_to_delete(accounts)
self.assertEqual(keys_to_delete, expected_result)
| 37.685714 | 61 | 0.595906 | from unittest import TestCase
import reconcile.aws_iam_keys as integ
class TestSupportFunctions(TestCase):
def test_filter_accounts_with_account_name(self):
a = {"name": "a", "deleteKeys": ["AKIA"]}
b = {"name": "b", "deleteKeys": ["AKIA"]}
accounts = [a, b]
filtered = integ.filter_accounts(accounts, a["name"])
self.assertEqual(filtered, [a])
def test_filter_accounts_without_account_name(self):
a = {"name": "a", "deleteKeys": ["AKIA"]}
b = {"name": "b", "deleteKeys": ["AKIA"]}
accounts = [a, b]
filtered = integ.filter_accounts(accounts, None)
self.assertEqual(filtered, accounts)
def test_filter_accounts_without_delete_keys(self):
a = {"name": "a", "deleteKeys": ["AKIA"]}
b = {"name": "b"}
accounts = [a, b]
filtered = integ.filter_accounts(accounts, None)
self.assertEqual(filtered, [a])
def test_get_keys_to_delete(self):
a = {"name": "a", "deleteKeys": ["k1", "k2"]}
b = {"name": "b", "deleteKeys": None}
c = {"name": "c", "deleteKeys": []}
accounts = [a, b, c]
expected_result = {a["name"]: a["deleteKeys"]}
keys_to_delete = integ.get_keys_to_delete(accounts)
self.assertEqual(keys_to_delete, expected_result)
| true | true |
1c324b9156101cf95bb2610012f39493cc8a3eb8 | 4,633 | py | Python | fastapi/encoders.py | facundojmaero/fastapi | a6897963d5ff2c836313c3b69fc6062051c07a63 | [
"MIT"
] | 1 | 2021-05-28T11:08:43.000Z | 2021-05-28T11:08:43.000Z | fastapi/encoders.py | ycd/fastapi | 848e7f27ed8018e280d40e223a99b9d9e3689734 | [
"MIT"
] | null | null | null | fastapi/encoders.py | ycd/fastapi | 848e7f27ed8018e280d40e223a99b9d9e3689734 | [
"MIT"
] | null | null | null | from collections import defaultdict
from enum import Enum
from pathlib import PurePath
from types import GeneratorType
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from pydantic import BaseModel
from pydantic.json import ENCODERS_BY_TYPE
SetIntStr = Set[Union[int, str]]
DictIntStrAny = Dict[Union[int, str], Any]
def generate_encoders_by_class_tuples(
type_encoder_map: Dict[Any, Callable]
) -> Dict[Callable, Tuple]:
encoders_by_class_tuples: Dict[Callable, Tuple] = defaultdict(tuple)
for type_, encoder in type_encoder_map.items():
encoders_by_class_tuples[encoder] += (type_,)
return encoders_by_class_tuples
encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)
def jsonable_encoder(
obj: Any,
include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
by_alias: bool = True,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
custom_encoder: dict = {},
) -> Any:
if include is not None and not isinstance(include, set):
include = set(include)
if exclude is not None and not isinstance(exclude, set):
exclude = set(exclude)
if isinstance(obj, BaseModel):
encoder = getattr(obj.__config__, "json_encoders", {})
if custom_encoder:
encoder.update(custom_encoder)
obj_dict = obj.dict(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
exclude_defaults=exclude_defaults,
)
if "__root__" in obj_dict:
obj_dict = obj_dict["__root__"]
return jsonable_encoder(
obj_dict,
exclude_none=exclude_none,
exclude_defaults=exclude_defaults,
custom_encoder=encoder,
)
if isinstance(obj, Enum):
return obj.value
if isinstance(obj, PurePath):
return str(obj)
if isinstance(obj, (str, int, float, type(None))):
return obj
if isinstance(obj, dict):
encoded_dict = {}
for key, value in obj.items():
if (value is not None or not exclude_none) and (
(include and key in include) or not exclude or key not in exclude
):
encoded_key = jsonable_encoder(
key,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
encoded_value = jsonable_encoder(
value,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
encoded_dict[encoded_key] = encoded_value
return encoded_dict
if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
encoded_list = []
for item in obj:
encoded_list.append(
jsonable_encoder(
item,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
)
return encoded_list
if custom_encoder:
if type(obj) in custom_encoder:
return custom_encoder[type(obj)](obj)
else:
for encoder_type, encoder in custom_encoder.items():
if isinstance(obj, encoder_type):
return encoder(obj)
if type(obj) in ENCODERS_BY_TYPE:
return ENCODERS_BY_TYPE[type(obj)](obj)
for encoder, classes_tuple in encoders_by_class_tuples.items():
if isinstance(obj, classes_tuple):
return encoder(obj)
errors: List[Exception] = []
try:
data = dict(obj)
except Exception as e:
errors.append(e)
try:
data = vars(obj)
except Exception as e:
errors.append(e)
raise ValueError(errors)
return jsonable_encoder(
data,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
| 33.817518 | 81 | 0.599611 | from collections import defaultdict
from enum import Enum
from pathlib import PurePath
from types import GeneratorType
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from pydantic import BaseModel
from pydantic.json import ENCODERS_BY_TYPE
SetIntStr = Set[Union[int, str]]
DictIntStrAny = Dict[Union[int, str], Any]
def generate_encoders_by_class_tuples(
type_encoder_map: Dict[Any, Callable]
) -> Dict[Callable, Tuple]:
encoders_by_class_tuples: Dict[Callable, Tuple] = defaultdict(tuple)
for type_, encoder in type_encoder_map.items():
encoders_by_class_tuples[encoder] += (type_,)
return encoders_by_class_tuples
encoders_by_class_tuples = generate_encoders_by_class_tuples(ENCODERS_BY_TYPE)
def jsonable_encoder(
obj: Any,
include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
by_alias: bool = True,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
custom_encoder: dict = {},
) -> Any:
if include is not None and not isinstance(include, set):
include = set(include)
if exclude is not None and not isinstance(exclude, set):
exclude = set(exclude)
if isinstance(obj, BaseModel):
encoder = getattr(obj.__config__, "json_encoders", {})
if custom_encoder:
encoder.update(custom_encoder)
obj_dict = obj.dict(
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
exclude_defaults=exclude_defaults,
)
if "__root__" in obj_dict:
obj_dict = obj_dict["__root__"]
return jsonable_encoder(
obj_dict,
exclude_none=exclude_none,
exclude_defaults=exclude_defaults,
custom_encoder=encoder,
)
if isinstance(obj, Enum):
return obj.value
if isinstance(obj, PurePath):
return str(obj)
if isinstance(obj, (str, int, float, type(None))):
return obj
if isinstance(obj, dict):
encoded_dict = {}
for key, value in obj.items():
if (value is not None or not exclude_none) and (
(include and key in include) or not exclude or key not in exclude
):
encoded_key = jsonable_encoder(
key,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
encoded_value = jsonable_encoder(
value,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
encoded_dict[encoded_key] = encoded_value
return encoded_dict
if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
encoded_list = []
for item in obj:
encoded_list.append(
jsonable_encoder(
item,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
)
return encoded_list
if custom_encoder:
if type(obj) in custom_encoder:
return custom_encoder[type(obj)](obj)
else:
for encoder_type, encoder in custom_encoder.items():
if isinstance(obj, encoder_type):
return encoder(obj)
if type(obj) in ENCODERS_BY_TYPE:
return ENCODERS_BY_TYPE[type(obj)](obj)
for encoder, classes_tuple in encoders_by_class_tuples.items():
if isinstance(obj, classes_tuple):
return encoder(obj)
errors: List[Exception] = []
try:
data = dict(obj)
except Exception as e:
errors.append(e)
try:
data = vars(obj)
except Exception as e:
errors.append(e)
raise ValueError(errors)
return jsonable_encoder(
data,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
custom_encoder=custom_encoder,
)
| true | true |
1c324c21a8f61bee306884a09b43f1be586eddb5 | 1,721 | py | Python | apps/challenges/migrations/0028_zip_configuration_models_for_challenge_creation.py | ChrsMark/EvalAI | d70163a2465a5d69e818a342ae75f6b0a8eb4cea | [
"BSD-3-Clause"
] | 3 | 2019-02-24T10:57:09.000Z | 2019-02-24T16:49:32.000Z | apps/challenges/migrations/0028_zip_configuration_models_for_challenge_creation.py | ChrsMark/EvalAI | d70163a2465a5d69e818a342ae75f6b0a8eb4cea | [
"BSD-3-Clause"
] | 4 | 2021-06-08T23:45:35.000Z | 2022-01-13T03:32:04.000Z | apps/challenges/migrations/0028_zip_configuration_models_for_challenge_creation.py | ChrsMark/EvalAI | d70163a2465a5d69e818a342ae75f6b0a8eb4cea | [
"BSD-3-Clause"
] | 1 | 2020-01-15T17:27:02.000Z | 2020-01-15T17:27:02.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-06-11 18:40
from __future__ import unicode_literals
import base.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('challenges', '0027_adds_unique_to_codename_dataset_split'),
]
operations = [
migrations.CreateModel(
name='ChallengeConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('zip_configuration', models.FileField(upload_to=base.utils.RandomFileName('zip_configuration_files/challenge_zip'))),
('is_created', models.BooleanField(default=False)),
('stdout_file', models.FileField(blank=True, null=True, upload_to=base.utils.RandomFileName('zip_configuration_files/challenge_zip'))),
('stderr_file', models.FileField(blank=True, null=True, upload_to=base.utils.RandomFileName('zip_configuration_files/challenge_zip'))),
('challenge', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='challenges.Challenge')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'challenge_zip_configuration',
},
),
]
| 46.513514 | 151 | 0.669959 |
from __future__ import unicode_literals
import base.utils
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('challenges', '0027_adds_unique_to_codename_dataset_split'),
]
operations = [
migrations.CreateModel(
name='ChallengeConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('zip_configuration', models.FileField(upload_to=base.utils.RandomFileName('zip_configuration_files/challenge_zip'))),
('is_created', models.BooleanField(default=False)),
('stdout_file', models.FileField(blank=True, null=True, upload_to=base.utils.RandomFileName('zip_configuration_files/challenge_zip'))),
('stderr_file', models.FileField(blank=True, null=True, upload_to=base.utils.RandomFileName('zip_configuration_files/challenge_zip'))),
('challenge', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='challenges.Challenge')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'challenge_zip_configuration',
},
),
]
| true | true |
1c324e592c8d6f9d0243b4c45d7a67cd6472be33 | 1,771 | py | Python | lektor_markdown_header_anchors.py | emmapeel2/lektor-markdown-header-anchors | cba9fcbd3b5e5644bb1766d041a31a803fc9d09c | [
"BSD-3-Clause"
] | 14 | 2015-12-24T23:29:13.000Z | 2019-12-15T23:28:53.000Z | lektor_markdown_header_anchors.py | emmapeel2/lektor-markdown-header-anchors | cba9fcbd3b5e5644bb1766d041a31a803fc9d09c | [
"BSD-3-Clause"
] | 11 | 2016-01-26T17:21:56.000Z | 2020-03-29T19:39:02.000Z | lektor_markdown_header_anchors.py | emmapeel2/lektor-markdown-header-anchors | cba9fcbd3b5e5644bb1766d041a31a803fc9d09c | [
"BSD-3-Clause"
] | 12 | 2016-01-12T19:00:22.000Z | 2021-03-05T13:18:52.000Z | from lektor.pluginsystem import Plugin
import uuid
from lektor.utils import slugify
from markupsafe import Markup
from collections import namedtuple
TocEntry = namedtuple('TocEntry', ['anchor', 'title', 'children'])
class MarkdownHeaderAnchorsPlugin(Plugin):
name = 'Markdown Header Anchors'
description = u'Lektor plugin that adds anchors and table of contents to markdown headers.'
def on_markdown_config(self, config, **extra):
class HeaderAnchorMixin(object):
def header(renderer, text, level, raw):
if self.get_config().get('anchor-type') == "random":
anchor = uuid.uuid4().hex[:6]
else:
anchor = slugify(raw)
renderer.meta['toc'].append((level, anchor, Markup(text)))
return '<h%d id="%s">%s</h%d>' % (level, anchor, text, level)
config.renderer_mixins.append(HeaderAnchorMixin)
def on_markdown_meta_init(self, meta, **extra):
meta['toc'] = []
def on_markdown_meta_postprocess(self, meta, **extra):
prev_level = None
toc = []
stack = [toc]
for level, anchor, title in meta['toc']:
if prev_level is None:
prev_level = level
elif prev_level == level - 1:
stack.append(stack[-1][-1][2])
prev_level = level
elif prev_level > level:
while prev_level > level:
# Just a simple workaround for when people do weird
# shit with headlines.
if len(stack) > 1:
stack.pop()
prev_level -= 1
stack[-1].append(TocEntry(anchor, title, []))
meta['toc'] = toc
| 35.42 | 95 | 0.561265 | from lektor.pluginsystem import Plugin
import uuid
from lektor.utils import slugify
from markupsafe import Markup
from collections import namedtuple
TocEntry = namedtuple('TocEntry', ['anchor', 'title', 'children'])
class MarkdownHeaderAnchorsPlugin(Plugin):
name = 'Markdown Header Anchors'
description = u'Lektor plugin that adds anchors and table of contents to markdown headers.'
def on_markdown_config(self, config, **extra):
class HeaderAnchorMixin(object):
def header(renderer, text, level, raw):
if self.get_config().get('anchor-type') == "random":
anchor = uuid.uuid4().hex[:6]
else:
anchor = slugify(raw)
renderer.meta['toc'].append((level, anchor, Markup(text)))
return '<h%d id="%s">%s</h%d>' % (level, anchor, text, level)
config.renderer_mixins.append(HeaderAnchorMixin)
def on_markdown_meta_init(self, meta, **extra):
meta['toc'] = []
def on_markdown_meta_postprocess(self, meta, **extra):
prev_level = None
toc = []
stack = [toc]
for level, anchor, title in meta['toc']:
if prev_level is None:
prev_level = level
elif prev_level == level - 1:
stack.append(stack[-1][-1][2])
prev_level = level
elif prev_level > level:
while prev_level > level:
if len(stack) > 1:
stack.pop()
prev_level -= 1
stack[-1].append(TocEntry(anchor, title, []))
meta['toc'] = toc
| true | true |
1c324eca209fb748e1712badce0c2051a7528294 | 135,834 | py | Python | mne/viz/_brain/_brain.py | dddd1007/mne-python | 844d53c866bbea932dd6c89ab444bb7f882f0b6f | [
"BSD-3-Clause"
] | null | null | null | mne/viz/_brain/_brain.py | dddd1007/mne-python | 844d53c866bbea932dd6c89ab444bb7f882f0b6f | [
"BSD-3-Clause"
] | null | null | null | mne/viz/_brain/_brain.py | dddd1007/mne-python | 844d53c866bbea932dd6c89ab444bb7f882f0b6f | [
"BSD-3-Clause"
] | null | null | null | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# jona-sassenhagen <jona.sassenhagen@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
import contextlib
from functools import partial
from io import BytesIO
import os
import os.path as op
import sys
import time
import copy
import traceback
import warnings
import numpy as np
from collections import OrderedDict
from .colormap import calculate_lut
from .surface import _Surface
from .view import views_dicts, _lh_views_dict
from .callback import (ShowView, TimeCallBack, SmartCallBack,
UpdateLUT, UpdateColorbarScale)
from ..utils import (_show_help_fig, _get_color_list, concatenate_images,
_generate_default_filename, _save_ndarray_img)
from .._3d import _process_clim, _handle_time, _check_views
from ...externals.decorator import decorator
from ...defaults import _handle_default
from ...surface import mesh_edges
from ...source_space import SourceSpaces, vertex_to_mni, read_talxfm
from ...transforms import apply_trans, invert_transform
from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,
use_log_level, Bunch, _ReuseCycle, warn,
get_subjects_dir)
_ARROW_MOVE = 10 # degrees per press
@decorator
def safe_event(fun, *args, **kwargs):
"""Protect against PyQt5 exiting on event-handling errors."""
try:
return fun(*args, **kwargs)
except Exception:
traceback.print_exc(file=sys.stderr)
class _Overlay(object):
def __init__(self, scalars, colormap, rng, opacity, name):
self._scalars = scalars
self._colormap = colormap
assert rng is not None
self._rng = rng
self._opacity = opacity
self._name = name
def to_colors(self):
from .._3d import _get_cmap
from matplotlib.colors import ListedColormap
if isinstance(self._colormap, str):
kind = self._colormap
cmap = _get_cmap(self._colormap)
else:
cmap = ListedColormap(self._colormap / 255.)
kind = str(type(self._colormap))
logger.debug(
f'Color mapping {repr(self._name)} with {kind} '
f'colormap and range {self._rng}')
rng = self._rng
assert rng is not None
scalars = _norm(self._scalars, rng)
colors = cmap(scalars)
if self._opacity is not None:
colors[:, 3] *= self._opacity
return colors
def _norm(x, rng):
if rng[0] == rng[1]:
factor = 1 if rng[0] == 0 else 1e-6 * rng[0]
else:
factor = rng[1] - rng[0]
return (x - rng[0]) / factor
class _LayeredMesh(object):
def __init__(self, renderer, vertices, triangles, normals):
self._renderer = renderer
self._vertices = vertices
self._triangles = triangles
self._normals = normals
self._polydata = None
self._actor = None
self._is_mapped = False
self._cache = None
self._overlays = OrderedDict()
self._default_scalars = np.ones(vertices.shape)
self._default_scalars_name = 'Data'
def map(self):
kwargs = {
"color": None,
"pickable": True,
"rgba": True,
}
mesh_data = self._renderer.mesh(
x=self._vertices[:, 0],
y=self._vertices[:, 1],
z=self._vertices[:, 2],
triangles=self._triangles,
normals=self._normals,
scalars=self._default_scalars,
**kwargs
)
self._actor, self._polydata = mesh_data
self._is_mapped = True
def _compute_over(self, B, A):
assert A.ndim == B.ndim == 2
assert A.shape[1] == B.shape[1] == 4
A_w = A[:, 3:] # * 1
B_w = B[:, 3:] * (1 - A_w)
C = A.copy()
C[:, :3] *= A_w
C[:, :3] += B[:, :3] * B_w
C[:, 3:] += B_w
C[:, :3] /= C[:, 3:]
return np.clip(C, 0, 1, out=C)
def _compose_overlays(self):
B = None
for overlay in self._overlays.values():
A = overlay.to_colors()
if B is None:
B = A
else:
B = self._compute_over(B, A)
return B
def add_overlay(self, scalars, colormap, rng, opacity, name):
overlay = _Overlay(
scalars=scalars,
colormap=colormap,
rng=rng,
opacity=opacity,
name=name,
)
self._overlays[name] = overlay
colors = overlay.to_colors()
# save colors in cache
if self._cache is None:
self._cache = colors
else:
self._cache = self._compute_over(self._cache, colors)
# update the texture
self._update()
def remove_overlay(self, names):
if not isinstance(names, list):
names = [names]
for name in names:
if name in self._overlays:
del self._overlays[name]
self.update()
def _update(self):
if self._cache is None or self._renderer is None:
return
self._renderer._set_mesh_scalars(
mesh=self._polydata,
scalars=self._cache,
name=self._default_scalars_name,
)
def update(self):
self._cache = self._compose_overlays()
self._update()
def _clean(self):
mapper = self._actor.GetMapper()
mapper.SetLookupTable(None)
self._actor.SetMapper(None)
self._actor = None
self._polydata = None
self._renderer = None
def update_overlay(self, name, scalars=None, colormap=None,
opacity=None, rng=None):
overlay = self._overlays.get(name, None)
if overlay is None:
return
if scalars is not None:
overlay._scalars = scalars
if colormap is not None:
overlay._colormap = colormap
if opacity is not None:
overlay._opacity = opacity
if rng is not None:
overlay._rng = rng
self.update()
@fill_doc
class Brain(object):
"""Class for visualizing a brain.
.. warning::
The API for this class is not currently complete. We suggest using
:meth:`mne.viz.plot_source_estimates` with the PyVista backend
enabled to obtain a ``Brain`` instance.
Parameters
----------
subject_id : str
Subject name in Freesurfer subjects dir.
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
of 'both', both hemispheres are shown in the same window.
In the case of 'split' hemispheres are displayed side-by-side
in different viewing panes.
surf : str
FreeSurfer surface mesh name (ie 'white', 'inflated', etc.).
title : str
Title for the window.
cortex : str or None
Specifies how the cortical surface is rendered.
The name of one of the preset cortex styles can be:
``'classic'`` (default), ``'high_contrast'``,
``'low_contrast'``, or ``'bone'`` or a valid color name.
Setting this to ``None`` is equivalent to ``(0.5, 0.5, 0.5)``.
alpha : float in [0, 1]
Alpha level to control opacity of the cortical surface.
size : int | array-like, shape (2,)
The size of the window, in pixels. can be one number to specify
a square window, or a length-2 sequence to specify (width, height).
background : tuple(int, int, int)
The color definition of the background: (red, green, blue).
foreground : matplotlib color
Color of the foreground (will be used for colorbars and text).
None (default) will use black or white depending on the value
of ``background``.
figure : list of Figure | None | int
If None (default), a new window will be created with the appropriate
views. For single view plots, the figure can be specified as int to
retrieve the corresponding Mayavi window.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment
variable.
views : list | str
The views to use.
offset : bool | str
If True, shifts the right- or left-most x coordinate of the left and
right surfaces, respectively, to be at zero. This is useful for viewing
inflated surface where hemispheres typically overlap. Can be "auto"
(default) use True with inflated surfaces and False otherwise
(Default: 'auto'). Only used when ``hemi='both'``.
.. versionchanged:: 0.23
Default changed to "auto".
show_toolbar : bool
If True, toolbars will be shown for each view.
offscreen : bool
If True, rendering will be done offscreen (not shown). Useful
mostly for generating images or screenshots, but can be buggy.
Use at your own risk.
interaction : str
Can be "trackball" (default) or "terrain", i.e. a turntable-style
camera.
units : str
Can be 'm' or 'mm' (default).
%(view_layout)s
silhouette : dict | bool
As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity
and ``decimate`` (level of decimation between 0 and 1 or None) of the
brain's silhouette to display. If True, the default values are used
and if False, no silhouette will be displayed. Defaults to False.
theme : str | path-like
Can be "auto" (default), "light", or "dark" or a path-like to a
custom stylesheet. For Dark-Mode and automatic Dark-Mode-Detection,
:mod:`qdarkstyle` respectively and `darkdetect
<https://github.com/albertosottile/darkdetect>`__ is required.
show : bool
Display the window as soon as it is ready. Defaults to True.
Attributes
----------
geo : dict
A dictionary of pysurfer.Surface objects for each hemisphere.
overlays : dict
The overlays.
Notes
-----
This table shows the capabilities of each Brain backend ("✓" for full
support, and "-" for partial support):
.. table::
:widths: auto
+---------------------------+--------------+---------------+
| 3D function: | surfer.Brain | mne.viz.Brain |
+===========================+==============+===============+
| add_annotation | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_foci | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_label | ✓ | ✓ |
+---------------------------+--------------+---------------+
| add_text | ✓ | ✓ |
+---------------------------+--------------+---------------+
| close | ✓ | ✓ |
+---------------------------+--------------+---------------+
| data | ✓ | ✓ |
+---------------------------+--------------+---------------+
| foci | ✓ | |
+---------------------------+--------------+---------------+
| labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_foci | ✓ | |
+---------------------------+--------------+---------------+
| remove_labels | ✓ | ✓ |
+---------------------------+--------------+---------------+
| remove_annotations | - | ✓ |
+---------------------------+--------------+---------------+
| scale_data_colormap | ✓ | |
+---------------------------+--------------+---------------+
| save_image | ✓ | ✓ |
+---------------------------+--------------+---------------+
| save_movie | ✓ | ✓ |
+---------------------------+--------------+---------------+
| screenshot | ✓ | ✓ |
+---------------------------+--------------+---------------+
| show_view | ✓ | ✓ |
+---------------------------+--------------+---------------+
| TimeViewer | ✓ | ✓ |
+---------------------------+--------------+---------------+
| enable_depth_peeling | | ✓ |
+---------------------------+--------------+---------------+
| get_picked_points | | ✓ |
+---------------------------+--------------+---------------+
| add_data(volume) | | ✓ |
+---------------------------+--------------+---------------+
| view_layout | | ✓ |
+---------------------------+--------------+---------------+
| flatmaps | | ✓ |
+---------------------------+--------------+---------------+
| vertex picking | | ✓ |
+---------------------------+--------------+---------------+
| label picking | | ✓ |
+---------------------------+--------------+---------------+
"""
def __init__(self, subject_id, hemi, surf, title=None,
cortex="classic", alpha=1.0, size=800, background="black",
foreground=None, figure=None, subjects_dir=None,
views='auto', offset='auto', show_toolbar=False,
offscreen=False, interaction='trackball', units='mm',
view_layout='vertical', silhouette=False, theme='auto',
show=True):
from ..backends.renderer import backend, _get_renderer
from .._3d import _get_cmap
from matplotlib.colors import colorConverter
if hemi in ('both', 'split'):
self._hemis = ('lh', 'rh')
elif hemi in ('lh', 'rh'):
self._hemis = (hemi, )
else:
raise KeyError('hemi has to be either "lh", "rh", "split", '
'or "both"')
self._view_layout = _check_option('view_layout', view_layout,
('vertical', 'horizontal'))
if figure is not None and not isinstance(figure, int):
backend._check_3d_figure(figure)
if title is None:
self._title = subject_id
else:
self._title = title
self._interaction = 'trackball'
if isinstance(background, str):
background = colorConverter.to_rgb(background)
self._bg_color = background
if foreground is None:
foreground = 'w' if sum(self._bg_color) < 2 else 'k'
if isinstance(foreground, str):
foreground = colorConverter.to_rgb(foreground)
self._fg_color = foreground
if isinstance(views, str):
views = [views]
views = _check_views(surf, views, hemi)
col_dict = dict(lh=1, rh=1, both=1, split=2)
shape = (len(views), col_dict[hemi])
if self._view_layout == 'horizontal':
shape = shape[::-1]
self._subplot_shape = shape
size = tuple(np.atleast_1d(size).round(0).astype(int).flat)
if len(size) not in (1, 2):
raise ValueError('"size" parameter must be an int or length-2 '
'sequence of ints.')
size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple
subjects_dir = get_subjects_dir(subjects_dir)
self.theme = theme
self.time_viewer = False
self._hemi = hemi
self._units = units
self._alpha = float(alpha)
self._subject_id = subject_id
self._subjects_dir = subjects_dir
self._views = views
self._times = None
self._vertex_to_label_id = dict()
self._annotation_labels = dict()
self._labels = {'lh': list(), 'rh': list()}
self._unnamed_label_id = 0 # can only grow
self._annots = {'lh': list(), 'rh': list()}
self._layered_meshes = {}
self._elevation_rng = [15, 165] # range of motion of camera on theta
self._lut_locked = None
# default values for silhouette
self._silhouette = {
'color': self._bg_color,
'line_width': 2,
'alpha': alpha,
'decimate': 0.9,
}
_validate_type(silhouette, (dict, bool), 'silhouette')
if isinstance(silhouette, dict):
self._silhouette.update(silhouette)
self.silhouette = True
else:
self.silhouette = silhouette
self._scalar_bar = None
# for now only one time label can be added
# since it is the same for all figures
self._time_label_added = False
# array of data used by TimeViewer
self._data = {}
self.geo = {}
self.set_time_interpolation('nearest')
geo_kwargs = self._cortex_colormap(cortex)
# evaluate at the midpoint of the used colormap
val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])
self._brain_color = _get_cmap(geo_kwargs['colormap'])(val)
# load geometry for one or both hemispheres as necessary
_validate_type(offset, (str, bool), 'offset')
if isinstance(offset, str):
_check_option('offset', offset, ('auto',), extra='when str')
offset = (surf in ('inflated', 'flat'))
offset = None if (not offset or hemi != 'both') else 0.0
logger.debug(f'Hemi offset: {offset}')
self._renderer = _get_renderer(name=self._title, size=size,
bgcolor=background,
shape=shape,
fig=figure)
self._renderer._window_close_connect(self._clean)
self._renderer._window_set_theme(theme)
self.plotter = self._renderer.plotter
self._setup_canonical_rotation()
for h in self._hemis:
# Initialize a Surface object as the geometry
geo = _Surface(subject_id, h, surf, subjects_dir, offset,
units=self._units, x_dir=self._rigid[0, :3])
# Load in the geometry and curvature
geo.load_geometry()
geo.load_curvature()
self.geo[h] = geo
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
if self._layered_meshes.get(h) is None:
mesh = _LayeredMesh(
renderer=self._renderer,
vertices=self.geo[h].coords,
triangles=self.geo[h].faces,
normals=self.geo[h].nn,
)
mesh.map() # send to GPU
mesh.add_overlay(
scalars=self.geo[h].bin_curv,
colormap=geo_kwargs["colormap"],
rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]],
opacity=alpha,
name='curv',
)
self._layered_meshes[h] = mesh
# add metadata to the mesh for picking
mesh._polydata._hemi = h
else:
actor = self._layered_meshes[h]._actor
self._renderer.plotter.add_actor(actor)
if self.silhouette:
mesh = self._layered_meshes[h]
self._renderer._silhouette(
mesh=mesh._polydata,
color=self._silhouette["color"],
line_width=self._silhouette["line_width"],
alpha=self._silhouette["alpha"],
decimate=self._silhouette["decimate"],
)
self._renderer.set_camera(**views_dicts[h][v])
self.interaction = interaction
self._closed = False
if show:
self.show()
# update the views once the geometry is all set
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self.show_view(v, row=ri, col=ci, hemi=h)
if surf == 'flat':
self._renderer.set_interaction("rubber_band_2d")
def _setup_canonical_rotation(self):
from ...coreg import fit_matched_points, _trans_from_params
self._rigid = np.eye(4)
try:
xfm = read_talxfm(self._subject_id, self._subjects_dir)
except Exception:
return
# XYZ+origin + halfway
pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5])
pts_subj = apply_trans(invert_transform(xfm), pts_tal)
# we fit with scaling enabled, but then discard it (we just need
# the rigid-body components)
params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params')
self._rigid[:] = _trans_from_params((True, True, False), params[:6])
def setup_time_viewer(self, time_viewer=True, show_traces=True):
"""Configure the time viewer parameters.
Parameters
----------
time_viewer : bool
If True, enable widgets interaction. Defaults to True.
show_traces : bool
If True, enable visualization of time traces. Defaults to True.
Notes
-----
The keyboard shortcuts are the following:
'?': Display help window
'i': Toggle interface
's': Apply auto-scaling
'r': Restore original clim
'c': Clear all traces
'n': Shift the time forward by the playback speed
'b': Shift the time backward by the playback speed
'Space': Start/Pause playback
'Up': Decrease camera elevation angle
'Down': Increase camera elevation angle
'Left': Decrease camera azimuth angle
'Right': Increase camera azimuth angle
"""
if self.time_viewer:
return
if not self._data:
raise ValueError("No data to visualize. See ``add_data``.")
self.time_viewer = time_viewer
self.orientation = list(_lh_views_dict.keys())
self.default_smoothing_range = [0, 15]
# Default configuration
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.01
self.default_status_bar_msg = "Press ? for help"
self.default_label_extract_modes = {
"stc": ["mean", "max"],
"src": ["mean_flip", "pca_flip", "auto"],
}
self.default_trace_modes = ('vertex', 'label')
self.annot = None
self.label_extract_mode = None
all_keys = ('lh', 'rh', 'vol')
self.act_data_smooth = {key: (None, None) for key in all_keys}
self.color_list = _get_color_list()
# remove grey for better contrast on the brain
self.color_list.remove("#7f7f7f")
self.color_cycle = _ReuseCycle(self.color_list)
self.mpl_canvas = None
self.help_canvas = None
self.rms = None
self.picked_patches = {key: list() for key in all_keys}
self.picked_points = {key: list() for key in all_keys}
self.pick_table = dict()
self._spheres = list()
self._mouse_no_mvt = -1
self.callbacks = dict()
self.widgets = dict()
self.keys = ('fmin', 'fmid', 'fmax')
# Derived parameters:
self.playback_speed = self.default_playback_speed_value
_validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')
self.interactor_fraction = 0.25
if isinstance(show_traces, str):
self.show_traces = True
self.separate_canvas = False
self.traces_mode = 'vertex'
if show_traces == 'separate':
self.separate_canvas = True
elif show_traces == 'label':
self.traces_mode = 'label'
else:
assert show_traces == 'vertex' # guaranteed above
else:
if isinstance(show_traces, bool):
self.show_traces = show_traces
else:
show_traces = float(show_traces)
if not 0 < show_traces < 1:
raise ValueError(
'show traces, if numeric, must be between 0 and 1, '
f'got {show_traces}')
self.show_traces = True
self.interactor_fraction = show_traces
self.traces_mode = 'vertex'
self.separate_canvas = False
del show_traces
self._configure_time_label()
self._configure_scalar_bar()
self._configure_shortcuts()
self._configure_picking()
self._configure_tool_bar()
self._configure_dock()
self._configure_menu()
self._configure_status_bar()
self._configure_playback()
self._configure_help()
# show everything at the end
self.toggle_interface()
self._renderer.show()
# sizes could change, update views
for hemi in ('lh', 'rh'):
for ri, ci, v in self._iter_views(hemi):
self.show_view(view=v, row=ri, col=ci)
self._renderer._process_events()
self._renderer._update()
# finally, show the MplCanvas
if self.show_traces:
self.mpl_canvas.show()
@safe_event
def _clean(self):
# resolve the reference cycle
self.clear_glyphs()
self.remove_annotations()
# clear init actors
for hemi in self._hemis:
self._layered_meshes[hemi]._clean()
self._clear_callbacks()
self._clear_widgets()
if getattr(self, 'mpl_canvas', None) is not None:
self.mpl_canvas.clear()
if getattr(self, 'act_data_smooth', None) is not None:
for key in list(self.act_data_smooth.keys()):
self.act_data_smooth[key] = None
# XXX this should be done in PyVista
for renderer in self._renderer._all_renderers:
renderer.RemoveAllLights()
# app_window cannot be set to None because it is used in __del__
for key in ('lighting', 'interactor', '_RenderWindow'):
setattr(self.plotter, key, None)
# Qt LeaveEvent requires _Iren so we use _FakeIren instead of None
# to resolve the ref to vtkGenericRenderWindowInteractor
self.plotter._Iren = _FakeIren()
if getattr(self.plotter, 'picker', None) is not None:
self.plotter.picker = None
# XXX end PyVista
for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar',
'interactor', 'mpl_canvas', 'time_actor',
'picked_renderer', 'act_data_smooth', '_scalar_bar',
'actions', 'widgets', 'geo', '_data'):
setattr(self, key, None)
def toggle_interface(self, value=None):
"""Toggle the interface.
Parameters
----------
value : bool | None
If True, the widgets are shown and if False, they
are hidden. If None, the state of the widgets is
toggled. Defaults to None.
"""
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
# update tool bar and dock
with self._renderer._window_ensure_minimum_sizes():
if self.visibility:
self._renderer._dock_show()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_on")
else:
self._renderer._dock_hide()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_off")
self._renderer._update()
def apply_auto_scaling(self):
"""Detect automatically fitting scaling parameters."""
self._update_auto_scaling()
def restore_user_scaling(self):
"""Restore original scaling parameters."""
self._update_auto_scaling(restore=True)
def toggle_playback(self, value=None):
"""Toggle time playback.
Parameters
----------
value : bool | None
If True, automatic time playback is enabled and if False,
it's disabled. If None, the state of time playback is toggled.
Defaults to None.
"""
if value is None:
self.playback = not self.playback
else:
self.playback = value
# update tool bar icon
if self.playback:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="pause")
else:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="play")
if self.playback:
time_data = self._data['time']
max_time = np.max(time_data)
if self._current_time == max_time: # start over
self.set_time_point(0) # first index
self._last_tick = time.time()
def reset(self):
"""Reset view and time step."""
self.reset_view()
max_time = len(self._data['time']) - 1
if max_time > 0:
self.callbacks["time"](
self._data["initial_time_idx"],
update_widget=True,
)
self._renderer._update()
def set_playback_speed(self, speed):
"""Set the time playback speed.
Parameters
----------
speed : float
The speed of the playback.
"""
self.playback_speed = speed
@safe_event
def _play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self._data['time']
times = np.arange(self._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self._current_time + time_shift, max_time)
# always use linear here -- this does not determine the data
# interpolation mode, it just finds where we are (in time) in
# terms of the time indices
idx = np.interp(time_point, time_data, times)
self.callbacks["time"](idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def _configure_time_label(self):
self.time_actor = self._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
def _configure_scalar_bar(self):
if self._scalar_bar is not None:
self._scalar_bar.SetOrientationToVertical()
self._scalar_bar.SetHeight(0.6)
self._scalar_bar.SetWidth(0.05)
self._scalar_bar.SetPosition(0.02, 0.2)
def _configure_dock_time_widget(self, layout=None):
len_time = len(self._data['time']) - 1
if len_time < 1:
return
layout = self._renderer.dock_layout if layout is None else layout
hlayout = self._renderer._dock_add_layout(vertical=False)
self.widgets["min_time"] = self._renderer._dock_add_label(
value="-", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["current_time"] = self._renderer._dock_add_label(
value="x", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["max_time"] = self._renderer._dock_add_label(
value="+", layout=hlayout)
self._renderer._layout_add_widget(layout, hlayout)
min_time = float(self._data['time'][0])
max_time = float(self._data['time'][-1])
self.widgets["min_time"].set_value(f"{min_time: .3f}")
self.widgets["max_time"].set_value(f"{max_time: .3f}")
self.widgets["current_time"].set_value(f"{self._current_time: .3f}")
def _configure_dock_playback_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
len_time = len(self._data['time']) - 1
# Time widget
if len_time < 1:
self.callbacks["time"] = None
self.widgets["time"] = None
else:
self.callbacks["time"] = TimeCallBack(
brain=self,
callback=self.plot_time_line,
)
self.widgets["time"] = self._renderer._dock_add_slider(
name="Time (s)",
value=self._data['time_idx'],
rng=[0, len_time],
double=True,
callback=self.callbacks["time"],
compact=False,
layout=layout,
)
self.callbacks["time"].widget = self.widgets["time"]
# Time labels
if len_time < 1:
self.widgets["min_time"] = None
self.widgets["max_time"] = None
self.widgets["current_time"] = None
else:
self._configure_dock_time_widget(layout)
self.callbacks["time"].label = self.widgets["current_time"]
# Playback speed widget
if len_time < 1:
self.callbacks["playback_speed"] = None
self.widgets["playback_speed"] = None
else:
self.callbacks["playback_speed"] = SmartCallBack(
callback=self.set_playback_speed,
)
self.widgets["playback_speed"] = self._renderer._dock_add_spin_box(
name="Speed",
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range,
callback=self.callbacks["playback_speed"],
layout=layout,
)
self.callbacks["playback_speed"].widget = \
self.widgets["playback_speed"]
# Time label
current_time = self._current_time
assert current_time is not None # should never be the case, float
time_label = self._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
def _configure_dock_orientation_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
# Renderer widget
rends = [str(i) for i in range(len(self._renderer._all_renderers))]
if len(rends) > 1:
def select_renderer(idx):
idx = int(idx)
loc = self._renderer._index_to_loc(idx)
self.plotter.subplot(*loc)
self.callbacks["renderer"] = SmartCallBack(
callback=select_renderer,
)
self.widgets["renderer"] = self._renderer._dock_add_combo_box(
name="Renderer",
value="0",
rng=rends,
callback=self.callbacks["renderer"],
layout=layout,
)
self.callbacks["renderer"].widget = \
self.widgets["renderer"]
# Use 'lh' as a reference for orientation for 'both'
if self._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self._hemis
orientation_data = [None] * len(rends)
for hemi in hemis_ref:
for ri, ci, view in self._iter_views(hemi):
idx = self._renderer._loc_to_index((ri, ci))
if view == 'flat':
_data = None
else:
_data = dict(default=view, hemi=hemi, row=ri, col=ci)
orientation_data[idx] = _data
self.callbacks["orientation"] = ShowView(
brain=self,
data=orientation_data,
)
self.widgets["orientation"] = self._renderer._dock_add_combo_box(
name=None,
value=self.orientation[0],
rng=self.orientation,
callback=self.callbacks["orientation"],
layout=layout,
)
def _configure_dock_colormap_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
self._renderer._dock_add_label(
value="min / mid / max",
align=True,
layout=layout,
)
up = UpdateLUT(brain=self)
for key in self.keys:
hlayout = self._renderer._dock_add_layout(vertical=False)
rng = _get_range(self)
self.callbacks[key] = lambda value, key=key: up(**{key: value})
self.widgets[key] = self._renderer._dock_add_slider(
name=None,
value=self._data[key],
rng=rng,
callback=self.callbacks[key],
double=True,
layout=hlayout,
)
self.widgets[f"entry_{key}"] = self._renderer._dock_add_spin_box(
name=None,
value=self._data[key],
callback=self.callbacks[key],
rng=rng,
layout=hlayout,
)
up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]]
self._renderer._layout_add_widget(layout, hlayout)
# reset / minus / plus
hlayout = self._renderer._dock_add_layout(vertical=False)
self._renderer._dock_add_label(
value="Rescale",
align=True,
layout=hlayout,
)
self.widgets["reset"] = self._renderer._dock_add_button(
name="↺",
callback=self.restore_user_scaling,
layout=hlayout,
)
for key, char, val in (("fminus", "➖", 1.2 ** -0.25),
("fplus", "➕", 1.2 ** 0.25)):
self.callbacks[key] = UpdateColorbarScale(
brain=self,
factor=val,
)
self.widgets[key] = self._renderer._dock_add_button(
name=char,
callback=self.callbacks[key],
layout=hlayout,
)
self._renderer._layout_add_widget(layout, hlayout)
# register colorbar slider representations
widgets = {key: self.widgets[key] for key in self.keys}
for name in ("fmin", "fmid", "fmax", "fminus", "fplus"):
self.callbacks[name].widgets = widgets
def _configure_dock_trace_widget(self, name):
if not self.show_traces:
return
# do not show trace mode for volumes
if (self._data.get('src', None) is not None and
self._data['src'].kind == 'volume'):
self._configure_vertex_time_course()
return
layout = self._renderer._dock_add_group_box(name)
# setup candidate annots
def _set_annot(annot):
self.clear_glyphs()
self.remove_labels()
self.remove_annotations()
self.annot = annot
if annot == 'None':
self.traces_mode = 'vertex'
self._configure_vertex_time_course()
else:
self.traces_mode = 'label'
self._configure_label_time_course()
self._renderer._update()
# setup label extraction parameters
def _set_label_mode(mode):
if self.traces_mode != 'label':
return
glyphs = copy.deepcopy(self.picked_patches)
self.label_extract_mode = mode
self.clear_glyphs()
for hemi in self._hemis:
for label_id in glyphs[hemi]:
label = self._annotation_labels[hemi][label_id]
vertex_id = label.vertices[0]
self._add_label_glyph(hemi, None, vertex_id)
self.mpl_canvas.axes.relim()
self.mpl_canvas.axes.autoscale_view()
self.mpl_canvas.update_plot()
self._renderer._update()
from ...source_estimate import _get_allowed_label_modes
from ...label import _read_annot_cands
dir_name = op.join(self._subjects_dir, self._subject_id, 'label')
cands = _read_annot_cands(dir_name, raise_error=False)
cands = cands + ['None']
self.annot = cands[0]
stc = self._data["stc"]
modes = _get_allowed_label_modes(stc)
if self._data["src"] is None:
modes = [m for m in modes if m not in
self.default_label_extract_modes["src"]]
self.label_extract_mode = modes[-1]
if self.traces_mode == 'vertex':
_set_annot('None')
else:
_set_annot(self.annot)
self.widgets["annotation"] = self._renderer._dock_add_combo_box(
name="Annotation",
value=self.annot,
rng=cands,
callback=_set_annot,
layout=layout,
)
self.widgets["extract_mode"] = self._renderer._dock_add_combo_box(
name="Extract mode",
value=self.label_extract_mode,
rng=modes,
callback=_set_label_mode,
layout=layout,
)
def _configure_dock(self):
self._renderer._dock_initialize()
self._configure_dock_playback_widget(name="Playback")
self._configure_dock_orientation_widget(name="Orientation")
self._configure_dock_colormap_widget(name="Color Limits")
self._configure_dock_trace_widget(name="Trace")
# Smoothing widget
self.callbacks["smoothing"] = SmartCallBack(
callback=self.set_data_smoothing,
)
self.widgets["smoothing"] = self._renderer._dock_add_spin_box(
name="Smoothing",
value=self._data['smoothing_steps'],
rng=self.default_smoothing_range,
callback=self.callbacks["smoothing"],
double=False
)
self.callbacks["smoothing"].widget = \
self.widgets["smoothing"]
self._renderer._dock_finalize()
def _configure_playback(self):
self._renderer._playback_initialize(
func=self._play,
timeout=self.refresh_rate_ms,
value=self._data['time_idx'],
rng=[0, len(self._data['time']) - 1],
time_widget=self.widgets["time"],
play_widget=self.widgets["play"],
)
def _configure_mplcanvas(self):
# Get the fractional components for the brain and mpl
self.mpl_canvas = self._renderer._window_get_mplcanvas(
brain=self,
interactor_fraction=self.interactor_fraction,
show_traces=self.show_traces,
separate_canvas=self.separate_canvas
)
xlim = [np.min(self._data['time']),
np.max(self._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
if not self.separate_canvas:
self._renderer._window_adjust_mplcanvas_layout()
self.mpl_canvas.set_color(
bg_color=self._bg_color,
fg_color=self._fg_color,
)
def _configure_vertex_time_course(self):
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
# plot RMS of the activation
y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()
if v[0] is not None))
rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y))
del y
self.rms, = self.mpl_canvas.axes.plot(
self._data['time'], rms,
lw=3, label='RMS', zorder=3, color=self._fg_color,
alpha=0.5, ls=':')
# now plot the time line
self.plot_time_line()
# then the picked points
for idx, hemi in enumerate(['lh', 'rh', 'vol']):
act_data = self.act_data_smooth.get(hemi, [None])[0]
if act_data is None:
continue
hemi_data = self._data[hemi]
vertices = hemi_data['vertices']
# simulate a picked renderer
if self._hemi in ('both', 'rh') or hemi == 'vol':
idx = 0
self.picked_renderer = self._renderer._all_renderers[idx]
# initialize the default point
if self._data['initial_time'] is not None:
# pick at that time
use_data = act_data[
:, [np.round(self._data['time_idx']).astype(int)]]
else:
use_data = act_data
ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),
use_data.shape)
if hemi == 'vol':
mesh = hemi_data['grid']
else:
mesh = self._layered_meshes[hemi]._polydata
vertex_id = vertices[ind[0]]
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _configure_picking(self):
# get data for each hemi
from scipy import sparse
for idx, hemi in enumerate(['vol', 'lh', 'rh']):
hemi_data = self._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
vertices = hemi_data['vertices']
if hemi == 'vol':
assert smooth_mat is None
smooth_mat = sparse.csr_matrix(
(np.ones(len(vertices)),
(vertices, np.arange(len(vertices)))))
self.act_data_smooth[hemi] = (act_data, smooth_mat)
self._renderer._update_picking_callback(
self._on_mouse_move,
self._on_button_press,
self._on_button_release,
self._on_pick
)
def _configure_tool_bar(self):
self._renderer._tool_bar_load_icons()
self._renderer._tool_bar_set_theme(self.theme)
self._renderer._tool_bar_initialize(name="Toolbar")
self._renderer._tool_bar_add_file_button(
name="screenshot",
desc="Take a screenshot",
func=self.save_image,
)
self._renderer._tool_bar_add_file_button(
name="movie",
desc="Save movie...",
func=self.save_movie,
shortcut="ctrl+shift+s",
)
self._renderer._tool_bar_add_button(
name="visibility",
desc="Toggle Controls",
func=self.toggle_interface,
icon_name="visibility_on"
)
self.widgets["play"] = self._renderer._tool_bar_add_play_button(
name="play",
desc="Play/Pause",
func=self.toggle_playback,
shortcut=" ",
)
self._renderer._tool_bar_add_button(
name="reset",
desc="Reset",
func=self.reset,
)
self._renderer._tool_bar_add_button(
name="scale",
desc="Auto-Scale",
func=self.apply_auto_scaling,
)
self._renderer._tool_bar_add_button(
name="clear",
desc="Clear traces",
func=self.clear_glyphs,
)
self._renderer._tool_bar_add_spacer()
self._renderer._tool_bar_add_button(
name="help",
desc="Help",
func=self.help,
shortcut="?",
)
def _shift_time(self, op):
self.callbacks["time"](
value=(op(self._current_time, self.playback_speed)),
time_as_index=False,
update_widget=True,
)
def _rotate_azimuth(self, value):
azimuth = (self._renderer.figure._azimuth + value) % 360
self._renderer.set_camera(azimuth=azimuth, reset_camera=False)
def _rotate_elevation(self, value):
elevation = np.clip(
self._renderer.figure._elevation + value,
self._elevation_rng[0],
self._elevation_rng[1],
)
self._renderer.set_camera(elevation=elevation, reset_camera=False)
def _configure_shortcuts(self):
# First, we remove the default bindings:
self._clear_callbacks()
# Then, we add our own:
self.plotter.add_key_event("i", self.toggle_interface)
self.plotter.add_key_event("s", self.apply_auto_scaling)
self.plotter.add_key_event("r", self.restore_user_scaling)
self.plotter.add_key_event("c", self.clear_glyphs)
self.plotter.add_key_event("n", partial(self._shift_time,
op=lambda x, y: x + y))
self.plotter.add_key_event("b", partial(self._shift_time,
op=lambda x, y: x - y))
for key, func, sign in (("Left", self._rotate_azimuth, 1),
("Right", self._rotate_azimuth, -1),
("Up", self._rotate_elevation, 1),
("Down", self._rotate_elevation, -1)):
self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE))
def _configure_menu(self):
self._renderer._menu_initialize()
self._renderer._menu_add_submenu(
name="help",
desc="Help",
)
self._renderer._menu_add_button(
menu_name="help",
name="help",
desc="Show MNE key bindings\t?",
func=self.help,
)
def _configure_status_bar(self):
self._renderer._status_bar_initialize()
self.status_msg = self._renderer._status_bar_add_label(
self.default_status_bar_msg, stretch=1)
self.status_progress = self._renderer._status_bar_add_progress_bar()
if self.status_progress is not None:
self.status_progress.hide()
def _on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def _on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def _on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# programmatically detect the picked renderer
try:
# pyvista<0.30.0
self.picked_renderer = \
self.plotter.iren.FindPokedRenderer(x, y)
except AttributeError:
# pyvista>=0.30.0
self.picked_renderer = \
self.plotter.iren.interactor.FindPokedRenderer(x, y)
# trigger the pick
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def _on_pick(self, vtk_picker, event):
if not self.show_traces:
return
# vtk_picker is a vtkCellPicker
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return # don't pick
# 1) Check to see if there are any spheres along the ray
if len(self._spheres):
collection = vtk_picker.GetProp3Ds()
found_sphere = None
for ii in range(collection.GetNumberOfItems()):
actor = collection.GetItemAsObject(ii)
for sphere in self._spheres:
if any(a is actor for a in sphere._actors):
found_sphere = sphere
break
if found_sphere is not None:
break
if found_sphere is not None:
assert found_sphere._is_glyph
mesh = found_sphere
# 2) Remove sphere if it's what we have
if hasattr(mesh, "_is_glyph"):
self._remove_vertex_glyph(mesh)
return
# 3) Otherwise, pick the objects in the scene
try:
hemi = mesh._hemi
except AttributeError: # volume
hemi = 'vol'
else:
assert hemi in ('lh', 'rh')
if self.act_data_smooth[hemi][0] is None: # no data to add for hemi
return
pos = np.array(vtk_picker.GetPickPosition())
if hemi == 'vol':
# VTK will give us the point closest to the viewer in the vol.
# We want to pick the point with the maximum value along the
# camera-to-click array, which fortunately we can get "just"
# by inspecting the points that are sufficiently close to the
# ray.
grid = mesh = self._data[hemi]['grid']
vertices = self._data[hemi]['vertices']
coords = self._data[hemi]['grid_coords'][vertices]
scalars = grid.cell_arrays['values'][vertices]
spacing = np.array(grid.GetSpacing())
max_dist = np.linalg.norm(spacing) / 2.
origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()
ori = pos - origin
ori /= np.linalg.norm(ori)
# the magic formula: distance from a ray to a given point
dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)
assert dists.shape == (len(coords),)
mask = dists <= max_dist
idx = np.where(mask)[0]
if len(idx) == 0:
return # weird point on edge of volume?
# useful for debugging the ray by mapping it into the volume:
# dists = dists - dists.min()
# dists = (1. - dists / dists.max()) * self._cmap_range[1]
# grid.cell_arrays['values'][vertices] = dists * mask
idx = idx[np.argmax(np.abs(scalars[idx]))]
vertex_id = vertices[idx]
# Naive way: convert pos directly to idx; i.e., apply mri_src_t
# shape = self._data[hemi]['grid_shape']
# taking into account the cell vs point difference (spacing/2)
# shift = np.array(grid.GetOrigin()) + spacing / 2.
# ijk = np.round((pos - shift) / spacing).astype(int)
# vertex_id = np.ravel_multi_index(ijk, shape, order='F')
else:
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if self.traces_mode == 'label':
self._add_label_glyph(hemi, mesh, vertex_id)
else:
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _add_label_glyph(self, hemi, mesh, vertex_id):
if hemi == 'vol':
return
label_id = self._vertex_to_label_id[hemi][vertex_id]
label = self._annotation_labels[hemi][label_id]
# remove the patch if already picked
if label_id in self.picked_patches[hemi]:
self._remove_label_glyph(hemi, label_id)
return
if hemi == label.hemi:
self.add_label(label, borders=True, reset_camera=False)
self.picked_patches[hemi].append(label_id)
def _remove_label_glyph(self, hemi, label_id):
label = self._annotation_labels[hemi][label_id]
label._line.remove()
self.color_cycle.restore(label._color)
self.mpl_canvas.update_plot()
self._layered_meshes[hemi].remove_overlay(label.name)
self.picked_patches[hemi].remove(label_id)
def _add_vertex_glyph(self, hemi, mesh, vertex_id):
if vertex_id in self.picked_points[hemi]:
return
# skip if the wrong hemi is selected
if self.act_data_smooth[hemi][0] is None:
return
color = next(self.color_cycle)
line = self.plot_time_course(hemi, vertex_id, color)
if hemi == 'vol':
ijk = np.unravel_index(
vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')
# should just be GetCentroid(center), but apparently it's VTK9+:
# center = np.empty(3)
# voxel.GetCentroid(center)
voxel = mesh.GetCell(*ijk)
pts = voxel.GetPoints()
n_pts = pts.GetNumberOfPoints()
center = np.empty((n_pts, 3))
for ii in range(pts.GetNumberOfPoints()):
pts.GetPoint(ii, center[ii])
center = np.mean(center, axis=0)
else:
center = mesh.GetPoints().GetPoint(vertex_id)
del mesh
# from the picked renderer to the subplot coords
try:
lst = self._renderer._all_renderers._renderers
except AttributeError:
lst = self._renderer._all_renderers
rindex = lst.index(self.picked_renderer)
row, col = self._renderer._index_to_loc(rindex)
actors = list()
spheres = list()
for ri, ci, _ in self._iter_views(hemi):
self.plotter.subplot(ri, ci)
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = self._renderer._sphere(
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_glyph = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
self._spheres.extend(spheres)
self.pick_table[vertex_id] = spheres
return sphere
def _remove_vertex_glyph(self, mesh, render=True):
vertex_id = mesh._vertex_id
if vertex_id not in self.pick_table:
return
hemi = mesh._hemi
color = mesh._color
spheres = self.pick_table[vertex_id]
spheres[0]._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[hemi].remove(vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(color)
for sphere in spheres:
# remove all actors
self.plotter.remove_actor(sphere._actors, render=render)
sphere._actors = None
self._spheres.pop(self._spheres.index(sphere))
self.pick_table.pop(vertex_id)
def clear_glyphs(self):
"""Clear the picking glyphs."""
if not self.time_viewer:
return
for sphere in list(self._spheres): # will remove itself, so copy
self._remove_vertex_glyph(sphere, render=False)
assert sum(len(v) for v in self.picked_points.values()) == 0
assert len(self.pick_table) == 0
assert len(self._spheres) == 0
for hemi in self._hemis:
for label_id in list(self.picked_patches[hemi]):
self._remove_label_glyph(hemi, label_id)
assert sum(len(v) for v in self.picked_patches.values()) == 0
if self.rms is not None:
self.rms.remove()
self.rms = None
self._renderer._update()
def plot_time_course(self, hemi, vertex_id, color):
"""Plot the vertex time course.
Parameters
----------
hemi : str
The hemisphere id of the vertex.
vertex_id : int
The vertex identifier in the mesh.
color : matplotlib color
The color of the time course.
Returns
-------
line : matplotlib object
The time line object.
"""
if self.mpl_canvas is None:
return
time = self._data['time'].copy() # avoid circular ref
mni = None
if hemi == 'vol':
hemi_str = 'V'
xfm = read_talxfm(
self._subject_id, self._subjects_dir)
if self._units == 'mm':
xfm['trans'][:3, 3] *= 1000.
ijk = np.unravel_index(
vertex_id, self._data[hemi]['grid_shape'], order='F')
src_mri_t = self._data[hemi]['grid_src_mri_t']
mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)
else:
hemi_str = 'L' if hemi == 'lh' else 'R'
try:
mni = vertex_to_mni(
vertices=vertex_id,
hemis=0 if hemi == 'lh' else 1,
subject=self._subject_id,
subjects_dir=self._subjects_dir
)
except Exception:
mni = None
if mni is not None:
mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni)
else:
mni = ''
label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni)
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color,
zorder=4,
)
return line
def plot_time_line(self):
"""Add the time line to the MPL widget."""
if self.mpl_canvas is None:
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self._fg_color,
lw=1,
)
self.time_line.set_xdata(current_time)
self.mpl_canvas.update_plot()
def _configure_help(self):
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('n', 'Shift the time forward by the playback speed'),
('b', 'Shift the time backward by the playback speed'),
('Space', 'Start/Pause playback'),
('Up', 'Decrease camera elevation angle'),
('Down', 'Increase camera elevation angle'),
('Left', 'Decrease camera azimuth angle'),
('Right', 'Increase camera azimuth angle'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
self.help_canvas = self._renderer._window_get_simple_canvas(
width=5, height=2, dpi=80)
_show_help_fig(
col1=text1,
col2=text2,
fig_help=self.help_canvas.fig,
ax=self.help_canvas.axes,
show=False,
)
def help(self):
"""Display the help window."""
self.help_canvas.show()
def _clear_callbacks(self):
if not hasattr(self, 'callbacks'):
return
for callback in self.callbacks.values():
if callback is not None:
for key in ('plotter', 'brain', 'callback',
'widget', 'widgets'):
setattr(callback, key, None)
self.callbacks.clear()
# Remove the default key binding
if getattr(self, "iren", None) is not None:
self.plotter.iren.clear_key_event_callbacks()
def _clear_widgets(self):
if not hasattr(self, 'widgets'):
return
for widget in self.widgets.values():
if widget is not None:
for key in ('triggered', 'valueChanged'):
setattr(widget, key, None)
self.widgets.clear()
@property
def interaction(self):
"""The interaction style."""
return self._interaction
@interaction.setter
def interaction(self, interaction):
"""Set the interaction style."""
_validate_type(interaction, str, 'interaction')
_check_option('interaction', interaction, ('trackball', 'terrain'))
for ri, ci, _ in self._iter_views('vol'): # will traverse all
self._renderer.subplot(ri, ci)
self._renderer.set_interaction(interaction)
def _cortex_colormap(self, cortex):
"""Return the colormap corresponding to the cortex."""
colormap_map = dict(classic=dict(colormap="Greys",
vmin=-1, vmax=2),
high_contrast=dict(colormap="Greys",
vmin=-.1, vmax=1.3),
low_contrast=dict(colormap="Greys",
vmin=-5, vmax=5),
bone=dict(colormap="bone_r",
vmin=-.2, vmax=2),
)
return colormap_map[cortex]
@verbose
def add_data(self, array, fmin=None, fmid=None, fmax=None,
thresh=None, center=None, transparent=False, colormap="auto",
alpha=1, vertices=None, smoothing_steps=None, time=None,
time_label="auto", colorbar=True,
hemi=None, remove_existing=None, time_label_size=None,
initial_time=None, scale_factor=None, vector_alpha=None,
clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,
verbose=None):
"""Display data from a numpy array on the surface or volume.
This provides a similar interface to
:meth:`surfer.Brain.add_overlay`, but it displays
it with a single colormap. It offers more flexibility over the
colormap, and provides a way to display four-dimensional data
(i.e., a timecourse) or five-dimensional data (i.e., a
vector-valued timecourse).
.. note:: ``fmin`` sets the low end of the colormap, and is separate
from thresh (this is a different convention from
:meth:`surfer.Brain.add_overlay`).
Parameters
----------
array : numpy array, shape (n_vertices[, 3][, n_times])
Data array. For the data to be understood as vector-valued
(3 values per vertex corresponding to X/Y/Z surface RAS),
then ``array`` must be have all 3 dimensions.
If vectors with no time dimension are desired, consider using a
singleton (e.g., ``np.newaxis``) to create a "time" dimension
and pass ``time_label=None`` (vector values are not supported).
%(fmin_fmid_fmax)s
%(thresh)s
%(center)s
%(transparent)s
colormap : str, list of color, or array
Name of matplotlib colormap to use, a list of matplotlib colors,
or a custom look up table (an n x 4 array coded with RBGA values
between 0 and 255), the default "auto" chooses a default divergent
colormap, if "center" is given (currently "icefire"), otherwise a
default sequential colormap (currently "rocket").
alpha : float in [0, 1]
Alpha level to control opacity of the overlay.
vertices : numpy array
Vertices for which the data is defined (needed if
``len(data) < nvtx``).
smoothing_steps : int or None
Number of smoothing steps (smoothing is used if len(data) < nvtx)
The value 'nearest' can be used too. None (default) will use as
many as necessary to fill the surface.
time : numpy array
Time points in the data array (if data is 2D or 3D).
%(time_label)s
colorbar : bool
Whether to add a colorbar to the figure. Can also be a tuple
to give the (row, col) index of where to put the colorbar.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown. If two hemispheres are being shown, an error will
be thrown.
remove_existing : bool
Not supported yet.
Remove surface added by previous "add_data" call. Useful for
conserving memory when displaying different data in a loop.
time_label_size : int
Font size of the time label (default 14).
initial_time : float | None
Time initially shown in the plot. ``None`` to use the first time
sample (default).
scale_factor : float | None (default)
The scale factor to use when displaying glyphs for vector-valued
data.
vector_alpha : float | None
Alpha level to control opacity of the arrows. Only used for
vector-valued data. If None (default), ``alpha`` is used.
clim : dict
Original clim arguments.
%(src_volume_options)s
colorbar_kwargs : dict | None
Options to pass to :meth:`pyvista.BasePlotter.add_scalar_bar`
(e.g., ``dict(title_font_size=10)``).
%(verbose)s
Notes
-----
If the data is defined for a subset of vertices (specified
by the "vertices" parameter), a smoothing method is used to interpolate
the data onto the high resolution surface. If the data is defined for
subsampled version of the surface, smoothing_steps can be set to None,
in which case only as many smoothing steps are applied until the whole
surface is filled with non-zeros.
Due to a Mayavi (or VTK) alpha rendering bug, ``vector_alpha`` is
clamped to be strictly < 1.
"""
_validate_type(transparent, bool, 'transparent')
_validate_type(vector_alpha, ('numeric', None), 'vector_alpha')
_validate_type(scale_factor, ('numeric', None), 'scale_factor')
# those parameters are not supported yet, only None is allowed
_check_option('thresh', thresh, [None])
_check_option('remove_existing', remove_existing, [None])
_validate_type(time_label_size, (None, 'numeric'), 'time_label_size')
if time_label_size is not None:
time_label_size = float(time_label_size)
if time_label_size < 0:
raise ValueError('time_label_size must be positive, got '
f'{time_label_size}')
hemi = self._check_hemi(hemi, extras=['vol'])
stc, array, vertices = self._check_stc(hemi, array, vertices)
array = np.asarray(array)
vector_alpha = alpha if vector_alpha is None else vector_alpha
self._data['vector_alpha'] = vector_alpha
self._data['scale_factor'] = scale_factor
# Create time array and add label if > 1D
if array.ndim <= 1:
time_idx = 0
else:
# check time array
if time is None:
time = np.arange(array.shape[-1])
else:
time = np.asarray(time)
if time.shape != (array.shape[-1],):
raise ValueError('time has shape %s, but need shape %s '
'(array.shape[-1])' %
(time.shape, (array.shape[-1],)))
self._data["time"] = time
if self._n_times is None:
self._times = time
elif len(time) != self._n_times:
raise ValueError("New n_times is different from previous "
"n_times")
elif not np.array_equal(time, self._times):
raise ValueError("Not all time values are consistent with "
"previously set times.")
# initial time
if initial_time is None:
time_idx = 0
else:
time_idx = self._to_time_index(initial_time)
# time label
time_label, _ = _handle_time(time_label, 's', time)
y_txt = 0.05 + 0.1 * bool(colorbar)
if array.ndim == 3:
if array.shape[1] != 3:
raise ValueError('If array has 3 dimensions, array.shape[1] '
'must equal 3, got %s' % (array.shape[1],))
fmin, fmid, fmax = _update_limits(
fmin, fmid, fmax, center, array
)
if colormap == 'auto':
colormap = 'mne' if center is not None else 'hot'
if smoothing_steps is None:
smoothing_steps = 7
elif smoothing_steps == 'nearest':
smoothing_steps = 0
elif isinstance(smoothing_steps, int):
if smoothing_steps < 0:
raise ValueError('Expected value of `smoothing_steps` is'
' positive but {} was given.'.format(
smoothing_steps))
else:
raise TypeError('Expected type of `smoothing_steps` is int or'
' NoneType but {} was given.'.format(
type(smoothing_steps)))
self._data['stc'] = stc
self._data['src'] = src
self._data['smoothing_steps'] = smoothing_steps
self._data['clim'] = clim
self._data['time'] = time
self._data['initial_time'] = initial_time
self._data['time_label'] = time_label
self._data['initial_time_idx'] = time_idx
self._data['time_idx'] = time_idx
self._data['transparent'] = transparent
# data specific for a hemi
self._data[hemi] = dict()
self._data[hemi]['glyph_dataset'] = None
self._data[hemi]['glyph_mapper'] = None
self._data[hemi]['glyph_actor'] = None
self._data[hemi]['array'] = array
self._data[hemi]['vertices'] = vertices
self._data['alpha'] = alpha
self._data['colormap'] = colormap
self._data['center'] = center
self._data['fmin'] = fmin
self._data['fmid'] = fmid
self._data['fmax'] = fmax
self.update_lut()
# 1) add the surfaces first
actor = None
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi in ('lh', 'rh'):
actor = self._layered_meshes[hemi]._actor
else:
src_vol = src[2:] if src.kind == 'mixed' else src
actor, _ = self._add_volume_data(hemi, src_vol, volume_options)
assert actor is not None # should have added one
# 2) update time and smoothing properties
# set_data_smoothing calls "set_time_point" for us, which will set
# _current_time
self.set_time_interpolation(self.time_interpolation)
self.set_data_smoothing(self._data['smoothing_steps'])
# 3) add the other actors
if colorbar is True:
# botto left by default
colorbar = (self._subplot_shape[0] - 1, 0)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
# Add the time label to the bottommost view
do = (ri, ci) == colorbar
if not self._time_label_added and time_label is not None and do:
time_actor = self._renderer.text2d(
x_window=0.95, y_window=y_txt,
color=self._fg_color,
size=time_label_size,
text=time_label(self._current_time),
justification='right'
)
self._data['time_actor'] = time_actor
self._time_label_added = True
if colorbar and self._scalar_bar is None and do:
kwargs = dict(source=actor, n_labels=8, color=self._fg_color,
bgcolor=self._brain_color[:3])
kwargs.update(colorbar_kwargs or {})
self._scalar_bar = self._renderer.scalarbar(**kwargs)
self._renderer.set_camera(**views_dicts[hemi][v])
# 4) update the scalar bar and opacity
self.update_lut(alpha=alpha)
def _iter_views(self, hemi):
# which rows and columns each type of visual needs to be added to
if self._hemi == 'split':
hemi_dict = dict(lh=[0], rh=[1], vol=[0, 1])
else:
hemi_dict = dict(lh=[0], rh=[0], vol=[0])
for vi, view in enumerate(self._views):
if self._hemi == 'split':
view_dict = dict(lh=[vi], rh=[vi], vol=[vi, vi])
else:
view_dict = dict(lh=[vi], rh=[vi], vol=[vi])
if self._view_layout == 'vertical':
rows = view_dict # views are rows
cols = hemi_dict # hemis are columns
else:
rows = hemi_dict # hemis are rows
cols = view_dict # views are columns
for ri, ci in zip(rows[hemi], cols[hemi]):
yield ri, ci, view
def remove_labels(self):
"""Remove all the ROI labels from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
for label in self._labels[hemi]:
mesh.remove_overlay(label.name)
self._labels[hemi].clear()
self._renderer._update()
def remove_annotations(self):
"""Remove all annotations from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
mesh.remove_overlay(self._annots[hemi])
self._annots[hemi].clear()
self._renderer._update()
def _add_volume_data(self, hemi, src, volume_options):
_validate_type(src, SourceSpaces, 'src')
_check_option('src.kind', src.kind, ('volume',))
_validate_type(
volume_options, (dict, 'numeric', None), 'volume_options')
assert hemi == 'vol'
if not isinstance(volume_options, dict):
volume_options = dict(
resolution=float(volume_options) if volume_options is not None
else None)
volume_options = _handle_default('volume_options', volume_options)
allowed_types = (
['resolution', (None, 'numeric')],
['blending', (str,)],
['alpha', ('numeric', None)],
['surface_alpha', (None, 'numeric')],
['silhouette_alpha', (None, 'numeric')],
['silhouette_linewidth', ('numeric',)],
)
for key, types in allowed_types:
_validate_type(volume_options[key], types,
f'volume_options[{repr(key)}]')
extra_keys = set(volume_options) - set(a[0] for a in allowed_types)
if len(extra_keys):
raise ValueError(
f'volume_options got unknown keys {sorted(extra_keys)}')
blending = _check_option('volume_options["blending"]',
volume_options['blending'],
('composite', 'mip'))
alpha = volume_options['alpha']
if alpha is None:
alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.
alpha = np.clip(float(alpha), 0., 1.)
resolution = volume_options['resolution']
surface_alpha = volume_options['surface_alpha']
if surface_alpha is None:
surface_alpha = min(alpha / 2., 0.1)
silhouette_alpha = volume_options['silhouette_alpha']
if silhouette_alpha is None:
silhouette_alpha = surface_alpha / 4.
silhouette_linewidth = volume_options['silhouette_linewidth']
del volume_options
volume_pos = self._data[hemi].get('grid_volume_pos')
volume_neg = self._data[hemi].get('grid_volume_neg')
center = self._data['center']
if volume_pos is None:
xyz = np.meshgrid(
*[np.arange(s) for s in src[0]['shape']], indexing='ij')
dimensions = np.array(src[0]['shape'], int)
mult = 1000 if self._units == 'mm' else 1
src_mri_t = src[0]['src_mri_t']['trans'].copy()
src_mri_t[:3] *= mult
if resolution is not None:
resolution = resolution * mult / 1000. # to mm
del src, mult
coords = np.array([c.ravel(order='F') for c in xyz]).T
coords = apply_trans(src_mri_t, coords)
self.geo[hemi] = Bunch(coords=coords)
vertices = self._data[hemi]['vertices']
assert self._data[hemi]['array'].shape[0] == len(vertices)
# MNE constructs the source space on a uniform grid in MRI space,
# but mne coreg can change it to be non-uniform, so we need to
# use all three elements here
assert np.allclose(
src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))
spacing = np.diag(src_mri_t)[:3]
origin = src_mri_t[:3, 3] - spacing / 2.
scalars = np.zeros(np.prod(dimensions))
scalars[vertices] = 1. # for the outer mesh
grid, grid_mesh, volume_pos, volume_neg = \
self._renderer._volume(dimensions, origin, spacing, scalars,
surface_alpha, resolution, blending,
center)
self._data[hemi]['alpha'] = alpha # incorrectly set earlier
self._data[hemi]['grid'] = grid
self._data[hemi]['grid_mesh'] = grid_mesh
self._data[hemi]['grid_coords'] = coords
self._data[hemi]['grid_src_mri_t'] = src_mri_t
self._data[hemi]['grid_shape'] = dimensions
self._data[hemi]['grid_volume_pos'] = volume_pos
self._data[hemi]['grid_volume_neg'] = volume_neg
actor_pos, _ = self._renderer.plotter.add_actor(
volume_pos, reset_camera=False, name=None, culling=False)
if volume_neg is not None:
actor_neg, _ = self._renderer.plotter.add_actor(
volume_neg, reset_camera=False, name=None, culling=False)
else:
actor_neg = None
grid_mesh = self._data[hemi]['grid_mesh']
if grid_mesh is not None:
_, prop = self._renderer.plotter.add_actor(
grid_mesh, reset_camera=False, name=None, culling=False,
pickable=False)
prop.SetColor(*self._brain_color[:3])
prop.SetOpacity(surface_alpha)
if silhouette_alpha > 0 and silhouette_linewidth > 0:
for ri, ci, v in self._iter_views('vol'):
self._renderer.subplot(ri, ci)
self._renderer._silhouette(
mesh=grid_mesh.GetInput(),
color=self._brain_color[:3],
line_width=silhouette_linewidth,
alpha=silhouette_alpha,
)
return actor_pos, actor_neg
def add_label(self, label, color=None, alpha=1, scalar_thresh=None,
borders=False, hemi=None, subdir=None,
reset_camera=True):
"""Add an ROI label to the image.
Parameters
----------
label : str | instance of Label
Label filepath or name. Can also be an instance of
an object with attributes "hemi", "vertices", "name", and
optionally "color" and "values" (if scalar_thresh is not None).
color : matplotlib-style color | None
Anything matplotlib accepts: string, RGB, hex, etc. (default
"crimson").
alpha : float in [0, 1]
Alpha level to control opacity.
scalar_thresh : None | float
Threshold the label ids using this value in the label
file's scalar field (i.e. label only vertices with
scalar >= thresh).
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown.
subdir : None | str
If a label is specified as name, subdir can be used to indicate
that the label file is in a sub-directory of the subject's
label directory rather than in the label directory itself (e.g.
for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``
``brain.add_label('cuneus', subdir='aparc')``).
reset_camera : bool
If True, reset the camera view after adding the label. Defaults
to True.
Notes
-----
To remove previously added labels, run Brain.remove_labels().
"""
from matplotlib.colors import colorConverter
from ...label import read_label
if isinstance(label, str):
if color is None:
color = "crimson"
if os.path.isfile(label):
filepath = label
label = read_label(filepath)
hemi = label.hemi
label_name = os.path.basename(filepath).split('.')[1]
else:
hemi = self._check_hemi(hemi)
label_name = label
label_fname = ".".join([hemi, label_name, 'label'])
if subdir is None:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', label_fname)
else:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', subdir, label_fname)
if not os.path.exists(filepath):
raise ValueError('Label file %s does not exist'
% filepath)
label = read_label(filepath)
ids = label.vertices
scalars = label.values
else:
# try to extract parameters from label instance
try:
hemi = label.hemi
ids = label.vertices
if label.name is None:
label.name = 'unnamed' + str(self._unnamed_label_id)
self._unnamed_label_id += 1
label_name = str(label.name)
if color is None:
if hasattr(label, 'color') and label.color is not None:
color = label.color
else:
color = "crimson"
if scalar_thresh is not None:
scalars = label.values
except Exception:
raise ValueError('Label was not a filename (str), and could '
'not be understood as a class. The class '
'must have attributes "hemi", "vertices", '
'"name", and (if scalar_thresh is not None)'
'"values"')
hemi = self._check_hemi(hemi)
if scalar_thresh is not None:
ids = ids[scalars >= scalar_thresh]
scalars = np.zeros(self.geo[hemi].coords.shape[0])
scalars[ids] = 1
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
stc = self._data["stc"]
src = self._data["src"]
tc = stc.extract_label_time_course(label, src=src,
mode=self.label_extract_mode)
tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]
color = next(self.color_cycle)
line = self.mpl_canvas.plot(
self._data['time'], tc, label=label_name,
color=color)
else:
line = None
orig_color = color
color = colorConverter.to_rgba(color, alpha)
cmap = np.array([(0, 0, 0, 0,), color])
ctable = np.round(cmap * 255).astype(np.uint8)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if borders:
n_vertices = scalars.size
edges = mesh_edges(self.geo[hemi].faces)
edges = edges.tocoo()
border_edges = scalars[edges.row] != scalars[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].faces.shape
keep_idx = self.geo[hemi].faces[np.any(
keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
show[keep_idx] = 1
scalars *= show
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=scalars,
colormap=ctable,
rng=[np.min(scalars), np.max(scalars)],
opacity=alpha,
name=label_name,
)
if reset_camera:
self._renderer.set_camera(**views_dicts[hemi][v])
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
label._color = orig_color
label._line = line
self._labels[hemi].append(label)
self._renderer._update()
def add_foci(self, coords, coords_as_verts=False, map_surface=None,
scale_factor=1, color="white", alpha=1, name=None,
hemi=None, resolution=50):
"""Add spherical foci, possibly mapping to displayed surf.
The foci spheres can be displayed at the coordinates given, or
mapped through a surface geometry. In other words, coordinates
from a volume-based analysis in MNI space can be displayed on an
inflated average surface by finding the closest vertex on the
white surface and mapping to that vertex on the inflated mesh.
Parameters
----------
coords : ndarray, shape (n_coords, 3)
Coordinates in stereotaxic space (default) or array of
vertex ids (with ``coord_as_verts=True``).
coords_as_verts : bool
Whether the coords parameter should be interpreted as vertex ids.
map_surface : None
Surface to map coordinates through, or None to use raw coords.
scale_factor : float
Controls the size of the foci spheres (relative to 1cm).
color : matplotlib color code
HTML name, RBG tuple, or hex code.
alpha : float in [0, 1]
Opacity of focus gylphs.
name : str
Internal name to use.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
resolution : int
The resolution of the spheres.
"""
from matplotlib.colors import colorConverter
hemi = self._check_hemi(hemi, extras=['vol'])
# those parameters are not supported yet, only None is allowed
_check_option('map_surface', map_surface, [None])
# Figure out how to interpret the first parameter
if coords_as_verts:
coords = self.geo[hemi].coords[coords]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if self._units == 'm':
scale_factor = scale_factor / 1000.
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
self._renderer.sphere(center=coords, color=color,
scale=(10. * scale_factor),
opacity=alpha, resolution=resolution)
self._renderer.set_camera(**views_dicts[hemi][v])
def add_text(self, x, y, text, name=None, color=None, opacity=1.0,
row=-1, col=-1, font_size=None, justification=None):
"""Add a text to the visualization.
Parameters
----------
x : float
X coordinate.
y : float
Y coordinate.
text : str
Text to add.
name : str
Name of the text (text label can be updated using update_text()).
color : tuple
Color of the text. Default is the foreground color set during
initialization (default is black or white depending on the
background color).
opacity : float
Opacity of the text (default 1.0).
row : int
Row index of which brain to use.
col : int
Column index of which brain to use.
font_size : float | None
The font size to use.
justification : str | None
The text justification.
"""
# XXX: support `name` should be added when update_text/remove_text
# are implemented
# _check_option('name', name, [None])
self._renderer.text2d(x_window=x, y_window=y, text=text, color=color,
size=font_size, justification=justification)
def _configure_label_time_course(self):
from ...label import read_labels_from_annot
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
self.traces_mode = 'label'
self.add_annotation(self.annot, color="w", alpha=0.75)
# now plot the time line
self.plot_time_line()
self.mpl_canvas.update_plot()
for hemi in self._hemis:
labels = read_labels_from_annot(
subject=self._subject_id,
parc=self.annot,
hemi=hemi,
subjects_dir=self._subjects_dir
)
self._vertex_to_label_id[hemi] = np.full(
self.geo[hemi].coords.shape[0], -1)
self._annotation_labels[hemi] = labels
for idx, label in enumerate(labels):
self._vertex_to_label_id[hemi][label.vertices] = idx
def add_annotation(self, annot, borders=True, alpha=1, hemi=None,
remove_existing=True, color=None, **kwargs):
"""Add an annotation file.
Parameters
----------
annot : str | tuple
Either path to annotation file or annotation name. Alternatively,
the annotation can be specified as a ``(labels, ctab)`` tuple per
hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere
or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both
hemispheres. ``labels`` and ``ctab`` should be arrays as returned
by :func:`nibabel.freesurfer.io.read_annot`.
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
alpha : float in [0, 1]
Alpha level to control opacity.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, data must exist
for both hemispheres.
remove_existing : bool
If True (default), remove old annotations.
color : matplotlib-style color code
If used, show all annotations in the same (specified) color.
Probably useful only when showing annotation borders.
**kwargs : dict
These are passed to the underlying
``mayavi.mlab.pipeline.surface`` call.
"""
from ...label import _read_annot
hemis = self._check_hemis(hemi)
# Figure out where the data is coming from
if isinstance(annot, str):
if os.path.isfile(annot):
filepath = annot
path = os.path.split(filepath)[0]
file_hemi, annot = os.path.basename(filepath).split('.')[:2]
if len(hemis) > 1:
if annot[:2] == 'lh.':
filepaths = [filepath, op.join(path, 'rh' + annot[2:])]
elif annot[:2] == 'rh.':
filepaths = [op.join(path, 'lh' + annot[2:], filepath)]
else:
raise RuntimeError('To add both hemispheres '
'simultaneously, filename must '
'begin with "lh." or "rh."')
else:
filepaths = [filepath]
else:
filepaths = []
for hemi in hemis:
filepath = op.join(self._subjects_dir,
self._subject_id,
'label',
".".join([hemi, annot, 'annot']))
if not os.path.exists(filepath):
raise ValueError('Annotation file %s does not exist'
% filepath)
filepaths += [filepath]
annots = []
for hemi, filepath in zip(hemis, filepaths):
# Read in the data
labels, cmap, _ = _read_annot(filepath)
annots.append((labels, cmap))
else:
annots = [annot] if len(hemis) == 1 else annot
annot = 'annotation'
for hemi, (labels, cmap) in zip(hemis, annots):
# Maybe zero-out the non-border vertices
self._to_borders(labels, hemi, borders)
# Handle null labels properly
cmap[:, 3] = 255
bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)
bgcolor[-1] = 0
cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive
cmap[cmap[:, 4] <= 0, :4] = bgcolor
if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):
cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))
# Set label ids sensibly
order = np.argsort(cmap[:, -1])
cmap = cmap[order]
ids = np.searchsorted(cmap[:, -1], labels)
cmap = cmap[:, :4]
# Set the alpha level
alpha_vec = cmap[:, 3]
alpha_vec[alpha_vec > 0] = alpha * 255
# Override the cmap when a single color is used
if color is not None:
from matplotlib.colors import colorConverter
rgb = np.round(np.multiply(colorConverter.to_rgb(color), 255))
cmap[:, :3] = rgb.astype(cmap.dtype)
ctable = cmap.astype(np.float64)
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=ids,
colormap=ctable,
rng=[np.min(ids), np.max(ids)],
opacity=alpha,
name=annot,
)
self._annots[hemi].append(annot)
if not self.time_viewer or self.traces_mode == 'vertex':
self._renderer._set_colormap_range(
mesh._actor, cmap.astype(np.uint8), None)
self._renderer._update()
def close(self):
"""Close all figures and cleanup data structure."""
self._closed = True
self._renderer.close()
def show(self):
"""Display the window."""
self._renderer.show()
def show_view(self, view=None, roll=None, distance=None, row=0, col=0,
hemi=None, align=True):
"""Orient camera to display view.
Parameters
----------
view : str | dict
String view, or a dict with azimuth and elevation.
roll : float | None
The roll.
distance : float | None
The distance.
row : int
The row to set.
col : int
The column to set.
hemi : str
Which hemi to use for string lookup (when in "both" mode).
align : bool
If True, consider view arguments relative to canonical MRI
directions (closest to MNI for the subject) rather than native MRI
space. This helps when MRIs are not in standard orientation (e.g.,
have large rotations).
"""
hemi = self._hemi if hemi is None else hemi
if hemi == 'split':
if (self._view_layout == 'vertical' and col == 1 or
self._view_layout == 'horizontal' and row == 1):
hemi = 'rh'
else:
hemi = 'lh'
if isinstance(view, str):
view = views_dicts[hemi].get(view)
view = view.copy()
if roll is not None:
view.update(roll=roll)
if distance is not None:
view.update(distance=distance)
self._renderer.subplot(row, col)
xfm = self._rigid if align else None
self._renderer.set_camera(**view, reset_camera=False, rigid=xfm)
self._renderer._update()
def reset_view(self):
"""Reset the camera."""
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
self._renderer.set_camera(**views_dicts[h][v],
reset_camera=False)
def save_image(self, filename=None, mode='rgb'):
"""Save view from all panels to disk.
Parameters
----------
filename : str
Path to new image file.
mode : str
Either 'rgb' or 'rgba' for values to return.
"""
if filename is None:
filename = _generate_default_filename(".png")
_save_ndarray_img(
filename, self.screenshot(mode=mode, time_viewer=True))
@fill_doc
def screenshot(self, mode='rgb', time_viewer=False):
"""Generate a screenshot of current view.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
%(brain_screenshot_time_viewer)s
Returns
-------
screenshot : array
Image pixel values.
"""
img = self._renderer.screenshot(mode)
logger.debug(f'Got screenshot of size {img.shape}')
if time_viewer and self.time_viewer and \
self.show_traces and \
not self.separate_canvas:
from matplotlib.image import imread
canvas = self.mpl_canvas.fig.canvas
canvas.draw_idle()
fig = self.mpl_canvas.fig
with BytesIO() as output:
# Need to pass dpi here so it uses the physical (HiDPI) DPI
# rather than logical DPI when saving in most cases.
# But when matplotlib uses HiDPI and VTK doesn't
# (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work,
# so let's just calculate the DPI we need to get
# the correct size output based on the widths being equal
size_in = fig.get_size_inches()
dpi = fig.get_dpi()
want_size = tuple(x * dpi for x in size_in)
n_pix = want_size[0] * want_size[1]
logger.debug(
f'Saving figure of size {size_in} @ {dpi} DPI '
f'({want_size} = {n_pix} pixels)')
# Sometimes there can be off-by-one errors here (e.g.,
# if in mpl int() rather than int(round()) is used to
# compute the number of pixels) so rather than use "raw"
# format and try to reshape ourselves, just write to PNG
# and read it, which has the dimensions encoded for us.
fig.savefig(output, dpi=dpi, format='png',
facecolor=self._bg_color, edgecolor='none')
output.seek(0)
trace_img = imread(output, format='png')[:, :, :3]
trace_img = np.clip(
np.round(trace_img * 255), 0, 255).astype(np.uint8)
bgcolor = np.array(self._brain_color[:3]) / 255
img = concatenate_images([img, trace_img], bgcolor=bgcolor)
return img
@contextlib.contextmanager
def _no_lut_update(self, why):
orig = self._lut_locked
self._lut_locked = why
try:
yield
finally:
self._lut_locked = orig
@fill_doc
def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None):
"""Update color map.
Parameters
----------
%(fmin_fmid_fmax)s
alpha : float | None
Alpha to use in the update.
"""
args = f'{fmin}, {fmid}, {fmax}, {alpha}'
if self._lut_locked is not None:
logger.debug(f'LUT update postponed with {args}')
return
logger.debug(f'Updating LUT with {args}')
center = self._data['center']
colormap = self._data['colormap']
transparent = self._data['transparent']
lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')}
_update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax)
assert all(val is not None for val in lims.values())
self._data.update(lims)
self._data['ctable'] = np.round(
calculate_lut(colormap, alpha=1., center=center,
transparent=transparent, **lims) *
255).astype(np.uint8)
# update our values
rng = self._cmap_range
ctable = self._data['ctable']
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.update_overlay(name='data',
colormap=self._data['ctable'],
opacity=alpha,
rng=rng)
self._renderer._set_colormap_range(
mesh._actor, ctable, self._scalar_bar, rng,
self._brain_color)
grid_volume_pos = hemi_data.get('grid_volume_pos')
grid_volume_neg = hemi_data.get('grid_volume_neg')
for grid_volume in (grid_volume_pos, grid_volume_neg):
if grid_volume is not None:
self._renderer._set_volume_range(
grid_volume, ctable, hemi_data['alpha'],
self._scalar_bar, rng)
glyph_actor = hemi_data.get('glyph_actor')
if glyph_actor is not None:
for glyph_actor_ in glyph_actor:
self._renderer._set_colormap_range(
glyph_actor_, ctable, self._scalar_bar, rng)
if self.time_viewer:
with self._no_lut_update(f'update_lut {args}'):
for key in ('fmin', 'fmid', 'fmax'):
self.callbacks[key](lims[key])
self._renderer._update()
def set_data_smoothing(self, n_steps):
"""Set the number of smoothing steps.
Parameters
----------
n_steps : int
Number of smoothing steps.
"""
from scipy import sparse
from ...morph import _hemi_morph
for hemi in ['lh', 'rh']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:
continue
vertices = hemi_data['vertices']
if vertices is None:
raise ValueError(
'len(data) < nvtx (%s < %s): the vertices '
'parameter must not be None'
% (len(hemi_data), self.geo[hemi].x.shape[0]))
morph_n_steps = 'nearest' if n_steps == 0 else n_steps
maps = sparse.eye(len(self.geo[hemi].coords), format='csr')
with use_log_level(False):
smooth_mat = _hemi_morph(
self.geo[hemi].orig_faces,
np.arange(len(self.geo[hemi].coords)),
vertices, morph_n_steps, maps, warn=False)
self._data[hemi]['smooth_mat'] = smooth_mat
self.set_time_point(self._data['time_idx'])
self._data['smoothing_steps'] = n_steps
@property
def _n_times(self):
return len(self._times) if self._times is not None else None
@property
def time_interpolation(self):
"""The interpolation mode."""
return self._time_interpolation
@fill_doc
def set_time_interpolation(self, interpolation):
"""Set the interpolation mode.
Parameters
----------
%(brain_time_interpolation)s
"""
self._time_interpolation = _check_option(
'interpolation',
interpolation,
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')
)
self._time_interp_funcs = dict()
self._time_interp_inv = None
if self._times is not None:
idx = np.arange(self._n_times)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
self._time_interp_funcs[hemi] = _safe_interp1d(
idx, array, self._time_interpolation, axis=-1,
assume_sorted=True)
self._time_interp_inv = _safe_interp1d(idx, self._times)
def set_time_point(self, time_idx):
"""Set the time point shown (can be a float to interpolate).
Parameters
----------
time_idx : int | float
The time index to use. Can be a float to use interpolation
between indices.
"""
self._current_act_data = dict()
time_actor = self._data.get('time_actor', None)
time_label = self._data.get('time_label', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
# interpolate in time
vectors = None
if array.ndim == 1:
act_data = array
self._current_time = 0
else:
act_data = self._time_interp_funcs[hemi](time_idx)
self._current_time = self._time_interp_inv(time_idx)
if array.ndim == 3:
vectors = act_data
act_data = np.linalg.norm(act_data, axis=1)
self._current_time = self._time_interp_inv(time_idx)
self._current_act_data[hemi] = act_data
if time_actor is not None and time_label is not None:
time_actor.SetInput(time_label(self._current_time))
# update the volume interpolation
grid = hemi_data.get('grid')
if grid is not None:
vertices = self._data['vol']['vertices']
values = self._current_act_data['vol']
rng = self._cmap_range
fill = 0 if self._data['center'] is not None else rng[0]
grid.cell_arrays['values'].fill(fill)
# XXX for sided data, we probably actually need two
# volumes as composite/MIP needs to look at two
# extremes... for now just use abs. Eventually we can add
# two volumes if we want.
grid.cell_arrays['values'][vertices] = values
# interpolate in space
smooth_mat = hemi_data.get('smooth_mat')
if smooth_mat is not None:
act_data = smooth_mat.dot(act_data)
# update the mesh scalar values
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
if 'data' in mesh._overlays:
mesh.update_overlay(name='data', scalars=act_data)
else:
mesh.add_overlay(
scalars=act_data,
colormap=self._data['ctable'],
rng=self._cmap_range,
opacity=None,
name='data',
)
# update the glyphs
if vectors is not None:
self._update_glyphs(hemi, vectors)
self._data['time_idx'] = time_idx
self._renderer._update()
def set_time(self, time):
"""Set the time to display (in seconds).
Parameters
----------
time : float
The time to show, in seconds.
"""
if self._times is None:
raise ValueError(
'Cannot set time when brain has no defined times.')
elif min(self._times) <= time <= max(self._times):
self.set_time_point(np.interp(float(time), self._times,
np.arange(self._n_times)))
else:
raise ValueError(
f'Requested time ({time} s) is outside the range of '
f'available times ({min(self._times)}-{max(self._times)} s).')
def _update_glyphs(self, hemi, vectors):
hemi_data = self._data.get(hemi)
assert hemi_data is not None
vertices = hemi_data['vertices']
vector_alpha = self._data['vector_alpha']
scale_factor = self._data['scale_factor']
vertices = slice(None) if vertices is None else vertices
x, y, z = np.array(self.geo[hemi].coords)[vertices].T
if hemi_data['glyph_actor'] is None:
add = True
hemi_data['glyph_actor'] = list()
else:
add = False
count = 0
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi_data['glyph_dataset'] is None:
glyph_mapper, glyph_dataset = self._renderer.quiver3d(
x, y, z,
vectors[:, 0], vectors[:, 1], vectors[:, 2],
color=None,
mode='2darrow',
scale_mode='vector',
scale=scale_factor,
opacity=vector_alpha,
name=str(hemi) + "_glyph"
)
hemi_data['glyph_dataset'] = glyph_dataset
hemi_data['glyph_mapper'] = glyph_mapper
else:
glyph_dataset = hemi_data['glyph_dataset']
glyph_dataset.point_arrays['vec'] = vectors
glyph_mapper = hemi_data['glyph_mapper']
if add:
glyph_actor = self._renderer._actor(glyph_mapper)
prop = glyph_actor.GetProperty()
prop.SetLineWidth(2.)
prop.SetOpacity(vector_alpha)
self._renderer.plotter.add_actor(glyph_actor)
hemi_data['glyph_actor'].append(glyph_actor)
else:
glyph_actor = hemi_data['glyph_actor'][count]
count += 1
self._renderer._set_colormap_range(
actor=glyph_actor,
ctable=self._data['ctable'],
scalar_bar=None,
rng=self._cmap_range,
)
@property
def _cmap_range(self):
dt_max = self._data['fmax']
if self._data['center'] is None:
dt_min = self._data['fmin']
else:
dt_min = -1 * dt_max
rng = [dt_min, dt_max]
return rng
def _update_fscale(self, fscale):
"""Scale the colorbar points."""
fmin = self._data['fmin'] * fscale
fmid = self._data['fmid'] * fscale
fmax = self._data['fmax'] * fscale
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _update_auto_scaling(self, restore=False):
user_clim = self._data['clim']
if user_clim is not None and 'lims' in user_clim:
allow_pos_lims = False
else:
allow_pos_lims = True
if user_clim is not None and restore:
clim = user_clim
else:
clim = 'auto'
colormap = self._data['colormap']
transparent = self._data['transparent']
mapdata = _process_clim(
clim, colormap, transparent,
np.concatenate(list(self._current_act_data.values())),
allow_pos_lims)
diverging = 'pos_lims' in mapdata['clim']
colormap = mapdata['colormap']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
transparent = mapdata['transparent']
del mapdata
fmin, fmid, fmax = scale_pts
center = 0. if diverging else None
self._data['center'] = center
self._data['colormap'] = colormap
self._data['transparent'] = transparent
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _to_time_index(self, value):
"""Return the interpolated time index of the given time value."""
time = self._data['time']
value = np.interp(value, time, np.arange(len(time)))
return value
@property
def data(self):
"""Data used by time viewer and color bar widgets."""
return self._data
@property
def labels(self):
return self._labels
@property
def views(self):
return self._views
@property
def hemis(self):
return self._hemis
def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
import imageio
with self._renderer._disabled_interaction():
images = self._make_movie_frames(
time_dilation, tmin, tmax, framerate, interpolation, callback,
time_viewer)
# find imageio FFMPEG parameters
if 'fps' not in kwargs:
kwargs['fps'] = framerate
if codec is not None:
kwargs['codec'] = codec
if bitrate is not None:
kwargs['bitrate'] = bitrate
imageio.mimwrite(filename, images, **kwargs)
def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False,
**kwargs):
def frame_callback(frame, n_frames):
if frame == n_frames:
# On the ImageIO step
self.status_msg.set_value(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self._renderer._status_bar_update()
else:
self.status_msg.set_value(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.set_range([0, n_frames - 1])
self.status_progress.set_value(frame)
self.status_progress.update()
self.status_msg.update()
self._renderer._status_bar_update()
# set cursor to busy
default_cursor = self._renderer._window_get_cursor()
self._renderer._window_set_cursor(
self._renderer._window_new_cursor("WaitCursor"))
try:
self._save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed),
callback=frame_callback,
**kwargs
)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
finally:
self._renderer._window_set_cursor(default_cursor)
@fill_doc
def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
"""Save a movie (for data with a time axis).
The movie is created through the :mod:`imageio` module. The format is
determined by the extension, and additional options can be specified
through keyword arguments that depend on the format. For available
formats and corresponding parameters see the imageio documentation:
http://imageio.readthedocs.io/en/latest/formats.html#multiple-images
.. Warning::
This method assumes that time is specified in seconds when adding
data. If time is specified in milliseconds this will result in
movies 1000 times longer than expected.
Parameters
----------
filename : str
Path at which to save the movie. The extension determines the
format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio`
documentation for available formats).
time_dilation : float
Factor by which to stretch time (default 4). For example, an epoch
from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this
would result in a 2.8 s long movie.
tmin : float
First time point to include (default: all data).
tmax : float
Last time point to include (default: all data).
framerate : float
Framerate of the movie (frames per second, default 24).
%(brain_time_interpolation)s
If None, it uses the current ``brain.interpolation``,
which defaults to ``'nearest'``. Defaults to None.
codec : str | None
The codec to use.
bitrate : float | None
The bitrate to use.
callback : callable | None
A function to call on each iteration. Useful for status message
updates. It will be passed keyword arguments ``frame`` and
``n_frames``.
%(brain_screenshot_time_viewer)s
**kwargs : dict
Specify additional options for :mod:`imageio`.
Returns
-------
dialog : object
The opened dialog is returned for testing purpose only.
"""
if filename is None:
filename = _generate_default_filename(".mp4")
func = self._save_movie_tv if self.time_viewer else self._save_movie
func(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, callback, time_viewer, **kwargs)
def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,
interpolation, callback, time_viewer):
from math import floor
# find tmin
if tmin is None:
tmin = self._times[0]
elif tmin < self._times[0]:
raise ValueError("tmin=%r is smaller than the first time point "
"(%r)" % (tmin, self._times[0]))
# find indexes at which to create frames
if tmax is None:
tmax = self._times[-1]
elif tmax > self._times[-1]:
raise ValueError("tmax=%r is greater than the latest time point "
"(%r)" % (tmax, self._times[-1]))
n_frames = floor((tmax - tmin) * time_dilation * framerate)
times = np.arange(n_frames, dtype=float)
times /= framerate * time_dilation
times += tmin
time_idx = np.interp(times, self._times, np.arange(self._n_times))
n_times = len(time_idx)
if n_times == 0:
raise ValueError("No time points selected")
logger.debug("Save movie for time points/samples\n%s\n%s"
% (times, time_idx))
# Sometimes the first screenshot is rendered with a different
# resolution on OS X
self.screenshot(time_viewer=time_viewer)
old_mode = self.time_interpolation
if interpolation is not None:
self.set_time_interpolation(interpolation)
try:
images = [
self.screenshot(time_viewer=time_viewer)
for _ in self._iter_time(time_idx, callback)]
finally:
self.set_time_interpolation(old_mode)
if callback is not None:
callback(frame=len(time_idx), n_frames=len(time_idx))
return images
def _iter_time(self, time_idx, callback):
"""Iterate through time points, then reset to current time.
Parameters
----------
time_idx : array_like
Time point indexes through which to iterate.
callback : callable | None
Callback to call before yielding each frame.
Yields
------
idx : int | float
Current index.
Notes
-----
Used by movie and image sequence saving functions.
"""
if self.time_viewer:
func = partial(self.callbacks["time"],
update_widget=True)
else:
func = self.set_time_point
current_time_idx = self._data["time_idx"]
for ii, idx in enumerate(time_idx):
func(idx)
if callback is not None:
callback(frame=ii, n_frames=len(time_idx))
yield idx
# Restore original time index
func(current_time_idx)
def _check_stc(self, hemi, array, vertices):
from ...source_estimate import (
_BaseSourceEstimate, _BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate, _BaseVolSourceEstimate
)
if isinstance(array, _BaseSourceEstimate):
stc = array
stc_surf = stc_vol = None
if isinstance(stc, _BaseSurfaceSourceEstimate):
stc_surf = stc
elif isinstance(stc, _BaseMixedSourceEstimate):
stc_surf = stc.surface() if hemi != 'vol' else None
stc_vol = stc.volume() if hemi == 'vol' else None
elif isinstance(stc, _BaseVolSourceEstimate):
stc_vol = stc if hemi == 'vol' else None
else:
raise TypeError("stc not supported")
if stc_surf is None and stc_vol is None:
raise ValueError("No data to be added")
if stc_surf is not None:
array = getattr(stc_surf, hemi + '_data')
vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]
if stc_vol is not None:
array = stc_vol.data
vertices = np.concatenate(stc_vol.vertices)
else:
stc = None
return stc, array, vertices
def _check_hemi(self, hemi, extras=()):
"""Check for safe single-hemi input, returns str."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
raise ValueError('hemi must not be None when both '
'hemispheres are displayed')
else:
hemi = self._hemi
elif hemi not in ['lh', 'rh'] + list(extras):
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' +
extra + ", got " + str(hemi))
return hemi
def _check_hemis(self, hemi):
"""Check for safe dual or single-hemi input, returns list."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
hemi = ['lh', 'rh']
else:
hemi = [self._hemi]
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
else:
hemi = [hemi]
return hemi
def _to_borders(self, label, hemi, borders, restrict_idx=None):
"""Convert a label/parc to borders."""
if not isinstance(borders, (bool, int)) or borders < 0:
raise ValueError('borders must be a bool or positive integer')
if borders:
n_vertices = label.size
edges = mesh_edges(self.geo[hemi].orig_faces)
edges = edges.tocoo()
border_edges = label[edges.row] != label[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].orig_faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].orig_faces.shape
keep_idx = self.geo[hemi].orig_faces[
np.any(keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
if restrict_idx is not None:
keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]
show[keep_idx] = 1
label *= show
def enable_depth_peeling(self):
"""Enable depth peeling."""
self._renderer.enable_depth_peeling()
def get_picked_points(self):
"""Return the vertices of the picked points.
Returns
-------
points : list of int | None
The vertices picked by the time viewer.
"""
if hasattr(self, "time_viewer"):
return self.picked_points
def __hash__(self):
"""Hash the object."""
raise NotImplementedError
def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):
"""Work around interp1d not liking singleton dimensions."""
from scipy.interpolate import interp1d
if y.shape[axis] == 1:
def func(x):
return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)
return func
else:
return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)
def _update_limits(fmin, fmid, fmax, center, array):
if center is None:
if fmin is None:
fmin = array.min() if array.size > 0 else 0
if fmax is None:
fmax = array.max() if array.size > 0 else 1
else:
if fmin is None:
fmin = 0
if fmax is None:
fmax = np.abs(center - array).max() if array.size > 0 else 1
if fmid is None:
fmid = (fmin + fmax) / 2.
if fmin >= fmid:
raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'
% (fmin, fmid))
if fmid >= fmax:
raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'
% (fmid, fmax))
return fmin, fmid, fmax
def _update_monotonic(lims, fmin, fmid, fmax):
if fmin is not None:
lims['fmin'] = fmin
if lims['fmax'] < fmin:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}')
lims['fmax'] = fmin
if lims['fmid'] < fmin:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}')
lims['fmid'] = fmin
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmid is not None:
lims['fmid'] = fmid
if lims['fmin'] > fmid:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}')
lims['fmin'] = fmid
if lims['fmax'] < fmid:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}')
lims['fmax'] = fmid
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmax is not None:
lims['fmax'] = fmax
if lims['fmin'] > fmax:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}')
lims['fmin'] = fmax
if lims['fmid'] > fmax:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}')
lims['fmid'] = fmax
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
def _get_range(brain):
val = np.abs(np.concatenate(list(brain._current_act_data.values())))
return [np.min(val), np.max(val)]
class _FakeIren():
def EnterEvent(self):
pass
def MouseMoveEvent(self):
pass
def LeaveEvent(self):
pass
def SetEventInformation(self, *args, **kwargs):
pass
def CharEvent(self):
pass
def KeyPressEvent(self, *args, **kwargs):
pass
def KeyReleaseEvent(self, *args, **kwargs):
pass
| 39.93943 | 79 | 0.540483 |
import contextlib
from functools import partial
from io import BytesIO
import os
import os.path as op
import sys
import time
import copy
import traceback
import warnings
import numpy as np
from collections import OrderedDict
from .colormap import calculate_lut
from .surface import _Surface
from .view import views_dicts, _lh_views_dict
from .callback import (ShowView, TimeCallBack, SmartCallBack,
UpdateLUT, UpdateColorbarScale)
from ..utils import (_show_help_fig, _get_color_list, concatenate_images,
_generate_default_filename, _save_ndarray_img)
from .._3d import _process_clim, _handle_time, _check_views
from ...externals.decorator import decorator
from ...defaults import _handle_default
from ...surface import mesh_edges
from ...source_space import SourceSpaces, vertex_to_mni, read_talxfm
from ...transforms import apply_trans, invert_transform
from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,
use_log_level, Bunch, _ReuseCycle, warn,
get_subjects_dir)
_ARROW_MOVE = 10
@decorator
def safe_event(fun, *args, **kwargs):
try:
return fun(*args, **kwargs)
except Exception:
traceback.print_exc(file=sys.stderr)
class _Overlay(object):
def __init__(self, scalars, colormap, rng, opacity, name):
self._scalars = scalars
self._colormap = colormap
assert rng is not None
self._rng = rng
self._opacity = opacity
self._name = name
def to_colors(self):
from .._3d import _get_cmap
from matplotlib.colors import ListedColormap
if isinstance(self._colormap, str):
kind = self._colormap
cmap = _get_cmap(self._colormap)
else:
cmap = ListedColormap(self._colormap / 255.)
kind = str(type(self._colormap))
logger.debug(
f'Color mapping {repr(self._name)} with {kind} '
f'colormap and range {self._rng}')
rng = self._rng
assert rng is not None
scalars = _norm(self._scalars, rng)
colors = cmap(scalars)
if self._opacity is not None:
colors[:, 3] *= self._opacity
return colors
def _norm(x, rng):
if rng[0] == rng[1]:
factor = 1 if rng[0] == 0 else 1e-6 * rng[0]
else:
factor = rng[1] - rng[0]
return (x - rng[0]) / factor
class _LayeredMesh(object):
def __init__(self, renderer, vertices, triangles, normals):
self._renderer = renderer
self._vertices = vertices
self._triangles = triangles
self._normals = normals
self._polydata = None
self._actor = None
self._is_mapped = False
self._cache = None
self._overlays = OrderedDict()
self._default_scalars = np.ones(vertices.shape)
self._default_scalars_name = 'Data'
def map(self):
kwargs = {
"color": None,
"pickable": True,
"rgba": True,
}
mesh_data = self._renderer.mesh(
x=self._vertices[:, 0],
y=self._vertices[:, 1],
z=self._vertices[:, 2],
triangles=self._triangles,
normals=self._normals,
scalars=self._default_scalars,
**kwargs
)
self._actor, self._polydata = mesh_data
self._is_mapped = True
def _compute_over(self, B, A):
assert A.ndim == B.ndim == 2
assert A.shape[1] == B.shape[1] == 4
A_w = A[:, 3:]
B_w = B[:, 3:] * (1 - A_w)
C = A.copy()
C[:, :3] *= A_w
C[:, :3] += B[:, :3] * B_w
C[:, 3:] += B_w
C[:, :3] /= C[:, 3:]
return np.clip(C, 0, 1, out=C)
def _compose_overlays(self):
B = None
for overlay in self._overlays.values():
A = overlay.to_colors()
if B is None:
B = A
else:
B = self._compute_over(B, A)
return B
def add_overlay(self, scalars, colormap, rng, opacity, name):
overlay = _Overlay(
scalars=scalars,
colormap=colormap,
rng=rng,
opacity=opacity,
name=name,
)
self._overlays[name] = overlay
colors = overlay.to_colors()
if self._cache is None:
self._cache = colors
else:
self._cache = self._compute_over(self._cache, colors)
self._update()
def remove_overlay(self, names):
if not isinstance(names, list):
names = [names]
for name in names:
if name in self._overlays:
del self._overlays[name]
self.update()
def _update(self):
if self._cache is None or self._renderer is None:
return
self._renderer._set_mesh_scalars(
mesh=self._polydata,
scalars=self._cache,
name=self._default_scalars_name,
)
def update(self):
self._cache = self._compose_overlays()
self._update()
def _clean(self):
mapper = self._actor.GetMapper()
mapper.SetLookupTable(None)
self._actor.SetMapper(None)
self._actor = None
self._polydata = None
self._renderer = None
def update_overlay(self, name, scalars=None, colormap=None,
opacity=None, rng=None):
overlay = self._overlays.get(name, None)
if overlay is None:
return
if scalars is not None:
overlay._scalars = scalars
if colormap is not None:
overlay._colormap = colormap
if opacity is not None:
overlay._opacity = opacity
if rng is not None:
overlay._rng = rng
self.update()
@fill_doc
class Brain(object):
def __init__(self, subject_id, hemi, surf, title=None,
cortex="classic", alpha=1.0, size=800, background="black",
foreground=None, figure=None, subjects_dir=None,
views='auto', offset='auto', show_toolbar=False,
offscreen=False, interaction='trackball', units='mm',
view_layout='vertical', silhouette=False, theme='auto',
show=True):
from ..backends.renderer import backend, _get_renderer
from .._3d import _get_cmap
from matplotlib.colors import colorConverter
if hemi in ('both', 'split'):
self._hemis = ('lh', 'rh')
elif hemi in ('lh', 'rh'):
self._hemis = (hemi, )
else:
raise KeyError('hemi has to be either "lh", "rh", "split", '
'or "both"')
self._view_layout = _check_option('view_layout', view_layout,
('vertical', 'horizontal'))
if figure is not None and not isinstance(figure, int):
backend._check_3d_figure(figure)
if title is None:
self._title = subject_id
else:
self._title = title
self._interaction = 'trackball'
if isinstance(background, str):
background = colorConverter.to_rgb(background)
self._bg_color = background
if foreground is None:
foreground = 'w' if sum(self._bg_color) < 2 else 'k'
if isinstance(foreground, str):
foreground = colorConverter.to_rgb(foreground)
self._fg_color = foreground
if isinstance(views, str):
views = [views]
views = _check_views(surf, views, hemi)
col_dict = dict(lh=1, rh=1, both=1, split=2)
shape = (len(views), col_dict[hemi])
if self._view_layout == 'horizontal':
shape = shape[::-1]
self._subplot_shape = shape
size = tuple(np.atleast_1d(size).round(0).astype(int).flat)
if len(size) not in (1, 2):
raise ValueError('"size" parameter must be an int or length-2 '
'sequence of ints.')
size = size if len(size) == 2 else size * 2
subjects_dir = get_subjects_dir(subjects_dir)
self.theme = theme
self.time_viewer = False
self._hemi = hemi
self._units = units
self._alpha = float(alpha)
self._subject_id = subject_id
self._subjects_dir = subjects_dir
self._views = views
self._times = None
self._vertex_to_label_id = dict()
self._annotation_labels = dict()
self._labels = {'lh': list(), 'rh': list()}
self._unnamed_label_id = 0
self._annots = {'lh': list(), 'rh': list()}
self._layered_meshes = {}
self._elevation_rng = [15, 165]
self._lut_locked = None
self._silhouette = {
'color': self._bg_color,
'line_width': 2,
'alpha': alpha,
'decimate': 0.9,
}
_validate_type(silhouette, (dict, bool), 'silhouette')
if isinstance(silhouette, dict):
self._silhouette.update(silhouette)
self.silhouette = True
else:
self.silhouette = silhouette
self._scalar_bar = None
self._time_label_added = False
self._data = {}
self.geo = {}
self.set_time_interpolation('nearest')
geo_kwargs = self._cortex_colormap(cortex)
val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])
self._brain_color = _get_cmap(geo_kwargs['colormap'])(val)
_validate_type(offset, (str, bool), 'offset')
if isinstance(offset, str):
_check_option('offset', offset, ('auto',), extra='when str')
offset = (surf in ('inflated', 'flat'))
offset = None if (not offset or hemi != 'both') else 0.0
logger.debug(f'Hemi offset: {offset}')
self._renderer = _get_renderer(name=self._title, size=size,
bgcolor=background,
shape=shape,
fig=figure)
self._renderer._window_close_connect(self._clean)
self._renderer._window_set_theme(theme)
self.plotter = self._renderer.plotter
self._setup_canonical_rotation()
for h in self._hemis:
geo = _Surface(subject_id, h, surf, subjects_dir, offset,
units=self._units, x_dir=self._rigid[0, :3])
geo.load_geometry()
geo.load_curvature()
self.geo[h] = geo
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
if self._layered_meshes.get(h) is None:
mesh = _LayeredMesh(
renderer=self._renderer,
vertices=self.geo[h].coords,
triangles=self.geo[h].faces,
normals=self.geo[h].nn,
)
mesh.map()
mesh.add_overlay(
scalars=self.geo[h].bin_curv,
colormap=geo_kwargs["colormap"],
rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]],
opacity=alpha,
name='curv',
)
self._layered_meshes[h] = mesh
mesh._polydata._hemi = h
else:
actor = self._layered_meshes[h]._actor
self._renderer.plotter.add_actor(actor)
if self.silhouette:
mesh = self._layered_meshes[h]
self._renderer._silhouette(
mesh=mesh._polydata,
color=self._silhouette["color"],
line_width=self._silhouette["line_width"],
alpha=self._silhouette["alpha"],
decimate=self._silhouette["decimate"],
)
self._renderer.set_camera(**views_dicts[h][v])
self.interaction = interaction
self._closed = False
if show:
self.show()
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self.show_view(v, row=ri, col=ci, hemi=h)
if surf == 'flat':
self._renderer.set_interaction("rubber_band_2d")
def _setup_canonical_rotation(self):
from ...coreg import fit_matched_points, _trans_from_params
self._rigid = np.eye(4)
try:
xfm = read_talxfm(self._subject_id, self._subjects_dir)
except Exception:
return
pts_tal = np.concatenate([np.eye(4)[:, :3], np.eye(3) * 0.5])
pts_subj = apply_trans(invert_transform(xfm), pts_tal)
params = fit_matched_points(pts_subj, pts_tal, scale=3, out='params')
self._rigid[:] = _trans_from_params((True, True, False), params[:6])
def setup_time_viewer(self, time_viewer=True, show_traces=True):
if self.time_viewer:
return
if not self._data:
raise ValueError("No data to visualize. See ``add_data``.")
self.time_viewer = time_viewer
self.orientation = list(_lh_views_dict.keys())
self.default_smoothing_range = [0, 15]
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.01
self.default_status_bar_msg = "Press ? for help"
self.default_label_extract_modes = {
"stc": ["mean", "max"],
"src": ["mean_flip", "pca_flip", "auto"],
}
self.default_trace_modes = ('vertex', 'label')
self.annot = None
self.label_extract_mode = None
all_keys = ('lh', 'rh', 'vol')
self.act_data_smooth = {key: (None, None) for key in all_keys}
self.color_list = _get_color_list()
self.color_list.remove("#7f7f7f")
self.color_cycle = _ReuseCycle(self.color_list)
self.mpl_canvas = None
self.help_canvas = None
self.rms = None
self.picked_patches = {key: list() for key in all_keys}
self.picked_points = {key: list() for key in all_keys}
self.pick_table = dict()
self._spheres = list()
self._mouse_no_mvt = -1
self.callbacks = dict()
self.widgets = dict()
self.keys = ('fmin', 'fmid', 'fmax')
self.playback_speed = self.default_playback_speed_value
_validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')
self.interactor_fraction = 0.25
if isinstance(show_traces, str):
self.show_traces = True
self.separate_canvas = False
self.traces_mode = 'vertex'
if show_traces == 'separate':
self.separate_canvas = True
elif show_traces == 'label':
self.traces_mode = 'label'
else:
assert show_traces == 'vertex'
else:
if isinstance(show_traces, bool):
self.show_traces = show_traces
else:
show_traces = float(show_traces)
if not 0 < show_traces < 1:
raise ValueError(
'show traces, if numeric, must be between 0 and 1, '
f'got {show_traces}')
self.show_traces = True
self.interactor_fraction = show_traces
self.traces_mode = 'vertex'
self.separate_canvas = False
del show_traces
self._configure_time_label()
self._configure_scalar_bar()
self._configure_shortcuts()
self._configure_picking()
self._configure_tool_bar()
self._configure_dock()
self._configure_menu()
self._configure_status_bar()
self._configure_playback()
self._configure_help()
self.toggle_interface()
self._renderer.show()
for hemi in ('lh', 'rh'):
for ri, ci, v in self._iter_views(hemi):
self.show_view(view=v, row=ri, col=ci)
self._renderer._process_events()
self._renderer._update()
if self.show_traces:
self.mpl_canvas.show()
@safe_event
def _clean(self):
self.clear_glyphs()
self.remove_annotations()
for hemi in self._hemis:
self._layered_meshes[hemi]._clean()
self._clear_callbacks()
self._clear_widgets()
if getattr(self, 'mpl_canvas', None) is not None:
self.mpl_canvas.clear()
if getattr(self, 'act_data_smooth', None) is not None:
for key in list(self.act_data_smooth.keys()):
self.act_data_smooth[key] = None
for renderer in self._renderer._all_renderers:
renderer.RemoveAllLights()
for key in ('lighting', 'interactor', '_RenderWindow'):
setattr(self.plotter, key, None)
self.plotter._Iren = _FakeIren()
if getattr(self.plotter, 'picker', None) is not None:
self.plotter.picker = None
for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar',
'interactor', 'mpl_canvas', 'time_actor',
'picked_renderer', 'act_data_smooth', '_scalar_bar',
'actions', 'widgets', 'geo', '_data'):
setattr(self, key, None)
def toggle_interface(self, value=None):
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
with self._renderer._window_ensure_minimum_sizes():
if self.visibility:
self._renderer._dock_show()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_on")
else:
self._renderer._dock_hide()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_off")
self._renderer._update()
def apply_auto_scaling(self):
self._update_auto_scaling()
def restore_user_scaling(self):
self._update_auto_scaling(restore=True)
def toggle_playback(self, value=None):
if value is None:
self.playback = not self.playback
else:
self.playback = value
if self.playback:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="pause")
else:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="play")
if self.playback:
time_data = self._data['time']
max_time = np.max(time_data)
if self._current_time == max_time:
self.set_time_point(0)
self._last_tick = time.time()
def reset(self):
self.reset_view()
max_time = len(self._data['time']) - 1
if max_time > 0:
self.callbacks["time"](
self._data["initial_time_idx"],
update_widget=True,
)
self._renderer._update()
def set_playback_speed(self, speed):
self.playback_speed = speed
@safe_event
def _play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self._data['time']
times = np.arange(self._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self._current_time + time_shift, max_time)
idx = np.interp(time_point, time_data, times)
self.callbacks["time"](idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def _configure_time_label(self):
self.time_actor = self._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
def _configure_scalar_bar(self):
if self._scalar_bar is not None:
self._scalar_bar.SetOrientationToVertical()
self._scalar_bar.SetHeight(0.6)
self._scalar_bar.SetWidth(0.05)
self._scalar_bar.SetPosition(0.02, 0.2)
def _configure_dock_time_widget(self, layout=None):
len_time = len(self._data['time']) - 1
if len_time < 1:
return
layout = self._renderer.dock_layout if layout is None else layout
hlayout = self._renderer._dock_add_layout(vertical=False)
self.widgets["min_time"] = self._renderer._dock_add_label(
value="-", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["current_time"] = self._renderer._dock_add_label(
value="x", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["max_time"] = self._renderer._dock_add_label(
value="+", layout=hlayout)
self._renderer._layout_add_widget(layout, hlayout)
min_time = float(self._data['time'][0])
max_time = float(self._data['time'][-1])
self.widgets["min_time"].set_value(f"{min_time: .3f}")
self.widgets["max_time"].set_value(f"{max_time: .3f}")
self.widgets["current_time"].set_value(f"{self._current_time: .3f}")
def _configure_dock_playback_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
len_time = len(self._data['time']) - 1
if len_time < 1:
self.callbacks["time"] = None
self.widgets["time"] = None
else:
self.callbacks["time"] = TimeCallBack(
brain=self,
callback=self.plot_time_line,
)
self.widgets["time"] = self._renderer._dock_add_slider(
name="Time (s)",
value=self._data['time_idx'],
rng=[0, len_time],
double=True,
callback=self.callbacks["time"],
compact=False,
layout=layout,
)
self.callbacks["time"].widget = self.widgets["time"]
if len_time < 1:
self.widgets["min_time"] = None
self.widgets["max_time"] = None
self.widgets["current_time"] = None
else:
self._configure_dock_time_widget(layout)
self.callbacks["time"].label = self.widgets["current_time"]
if len_time < 1:
self.callbacks["playback_speed"] = None
self.widgets["playback_speed"] = None
else:
self.callbacks["playback_speed"] = SmartCallBack(
callback=self.set_playback_speed,
)
self.widgets["playback_speed"] = self._renderer._dock_add_spin_box(
name="Speed",
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range,
callback=self.callbacks["playback_speed"],
layout=layout,
)
self.callbacks["playback_speed"].widget = \
self.widgets["playback_speed"]
current_time = self._current_time
assert current_time is not None
time_label = self._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
def _configure_dock_orientation_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
rends = [str(i) for i in range(len(self._renderer._all_renderers))]
if len(rends) > 1:
def select_renderer(idx):
idx = int(idx)
loc = self._renderer._index_to_loc(idx)
self.plotter.subplot(*loc)
self.callbacks["renderer"] = SmartCallBack(
callback=select_renderer,
)
self.widgets["renderer"] = self._renderer._dock_add_combo_box(
name="Renderer",
value="0",
rng=rends,
callback=self.callbacks["renderer"],
layout=layout,
)
self.callbacks["renderer"].widget = \
self.widgets["renderer"]
if self._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self._hemis
orientation_data = [None] * len(rends)
for hemi in hemis_ref:
for ri, ci, view in self._iter_views(hemi):
idx = self._renderer._loc_to_index((ri, ci))
if view == 'flat':
_data = None
else:
_data = dict(default=view, hemi=hemi, row=ri, col=ci)
orientation_data[idx] = _data
self.callbacks["orientation"] = ShowView(
brain=self,
data=orientation_data,
)
self.widgets["orientation"] = self._renderer._dock_add_combo_box(
name=None,
value=self.orientation[0],
rng=self.orientation,
callback=self.callbacks["orientation"],
layout=layout,
)
def _configure_dock_colormap_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
self._renderer._dock_add_label(
value="min / mid / max",
align=True,
layout=layout,
)
up = UpdateLUT(brain=self)
for key in self.keys:
hlayout = self._renderer._dock_add_layout(vertical=False)
rng = _get_range(self)
self.callbacks[key] = lambda value, key=key: up(**{key: value})
self.widgets[key] = self._renderer._dock_add_slider(
name=None,
value=self._data[key],
rng=rng,
callback=self.callbacks[key],
double=True,
layout=hlayout,
)
self.widgets[f"entry_{key}"] = self._renderer._dock_add_spin_box(
name=None,
value=self._data[key],
callback=self.callbacks[key],
rng=rng,
layout=hlayout,
)
up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]]
self._renderer._layout_add_widget(layout, hlayout)
hlayout = self._renderer._dock_add_layout(vertical=False)
self._renderer._dock_add_label(
value="Rescale",
align=True,
layout=hlayout,
)
self.widgets["reset"] = self._renderer._dock_add_button(
name="↺",
callback=self.restore_user_scaling,
layout=hlayout,
)
for key, char, val in (("fminus", "➖", 1.2 ** -0.25),
("fplus", "➕", 1.2 ** 0.25)):
self.callbacks[key] = UpdateColorbarScale(
brain=self,
factor=val,
)
self.widgets[key] = self._renderer._dock_add_button(
name=char,
callback=self.callbacks[key],
layout=hlayout,
)
self._renderer._layout_add_widget(layout, hlayout)
widgets = {key: self.widgets[key] for key in self.keys}
for name in ("fmin", "fmid", "fmax", "fminus", "fplus"):
self.callbacks[name].widgets = widgets
def _configure_dock_trace_widget(self, name):
if not self.show_traces:
return
if (self._data.get('src', None) is not None and
self._data['src'].kind == 'volume'):
self._configure_vertex_time_course()
return
layout = self._renderer._dock_add_group_box(name)
def _set_annot(annot):
self.clear_glyphs()
self.remove_labels()
self.remove_annotations()
self.annot = annot
if annot == 'None':
self.traces_mode = 'vertex'
self._configure_vertex_time_course()
else:
self.traces_mode = 'label'
self._configure_label_time_course()
self._renderer._update()
def _set_label_mode(mode):
if self.traces_mode != 'label':
return
glyphs = copy.deepcopy(self.picked_patches)
self.label_extract_mode = mode
self.clear_glyphs()
for hemi in self._hemis:
for label_id in glyphs[hemi]:
label = self._annotation_labels[hemi][label_id]
vertex_id = label.vertices[0]
self._add_label_glyph(hemi, None, vertex_id)
self.mpl_canvas.axes.relim()
self.mpl_canvas.axes.autoscale_view()
self.mpl_canvas.update_plot()
self._renderer._update()
from ...source_estimate import _get_allowed_label_modes
from ...label import _read_annot_cands
dir_name = op.join(self._subjects_dir, self._subject_id, 'label')
cands = _read_annot_cands(dir_name, raise_error=False)
cands = cands + ['None']
self.annot = cands[0]
stc = self._data["stc"]
modes = _get_allowed_label_modes(stc)
if self._data["src"] is None:
modes = [m for m in modes if m not in
self.default_label_extract_modes["src"]]
self.label_extract_mode = modes[-1]
if self.traces_mode == 'vertex':
_set_annot('None')
else:
_set_annot(self.annot)
self.widgets["annotation"] = self._renderer._dock_add_combo_box(
name="Annotation",
value=self.annot,
rng=cands,
callback=_set_annot,
layout=layout,
)
self.widgets["extract_mode"] = self._renderer._dock_add_combo_box(
name="Extract mode",
value=self.label_extract_mode,
rng=modes,
callback=_set_label_mode,
layout=layout,
)
def _configure_dock(self):
self._renderer._dock_initialize()
self._configure_dock_playback_widget(name="Playback")
self._configure_dock_orientation_widget(name="Orientation")
self._configure_dock_colormap_widget(name="Color Limits")
self._configure_dock_trace_widget(name="Trace")
self.callbacks["smoothing"] = SmartCallBack(
callback=self.set_data_smoothing,
)
self.widgets["smoothing"] = self._renderer._dock_add_spin_box(
name="Smoothing",
value=self._data['smoothing_steps'],
rng=self.default_smoothing_range,
callback=self.callbacks["smoothing"],
double=False
)
self.callbacks["smoothing"].widget = \
self.widgets["smoothing"]
self._renderer._dock_finalize()
def _configure_playback(self):
self._renderer._playback_initialize(
func=self._play,
timeout=self.refresh_rate_ms,
value=self._data['time_idx'],
rng=[0, len(self._data['time']) - 1],
time_widget=self.widgets["time"],
play_widget=self.widgets["play"],
)
def _configure_mplcanvas(self):
self.mpl_canvas = self._renderer._window_get_mplcanvas(
brain=self,
interactor_fraction=self.interactor_fraction,
show_traces=self.show_traces,
separate_canvas=self.separate_canvas
)
xlim = [np.min(self._data['time']),
np.max(self._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
if not self.separate_canvas:
self._renderer._window_adjust_mplcanvas_layout()
self.mpl_canvas.set_color(
bg_color=self._bg_color,
fg_color=self._fg_color,
)
def _configure_vertex_time_course(self):
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()
if v[0] is not None))
rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y))
del y
self.rms, = self.mpl_canvas.axes.plot(
self._data['time'], rms,
lw=3, label='RMS', zorder=3, color=self._fg_color,
alpha=0.5, ls=':')
self.plot_time_line()
for idx, hemi in enumerate(['lh', 'rh', 'vol']):
act_data = self.act_data_smooth.get(hemi, [None])[0]
if act_data is None:
continue
hemi_data = self._data[hemi]
vertices = hemi_data['vertices']
if self._hemi in ('both', 'rh') or hemi == 'vol':
idx = 0
self.picked_renderer = self._renderer._all_renderers[idx]
if self._data['initial_time'] is not None:
use_data = act_data[
:, [np.round(self._data['time_idx']).astype(int)]]
else:
use_data = act_data
ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),
use_data.shape)
if hemi == 'vol':
mesh = hemi_data['grid']
else:
mesh = self._layered_meshes[hemi]._polydata
vertex_id = vertices[ind[0]]
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _configure_picking(self):
from scipy import sparse
for idx, hemi in enumerate(['vol', 'lh', 'rh']):
hemi_data = self._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
vertices = hemi_data['vertices']
if hemi == 'vol':
assert smooth_mat is None
smooth_mat = sparse.csr_matrix(
(np.ones(len(vertices)),
(vertices, np.arange(len(vertices)))))
self.act_data_smooth[hemi] = (act_data, smooth_mat)
self._renderer._update_picking_callback(
self._on_mouse_move,
self._on_button_press,
self._on_button_release,
self._on_pick
)
def _configure_tool_bar(self):
self._renderer._tool_bar_load_icons()
self._renderer._tool_bar_set_theme(self.theme)
self._renderer._tool_bar_initialize(name="Toolbar")
self._renderer._tool_bar_add_file_button(
name="screenshot",
desc="Take a screenshot",
func=self.save_image,
)
self._renderer._tool_bar_add_file_button(
name="movie",
desc="Save movie...",
func=self.save_movie,
shortcut="ctrl+shift+s",
)
self._renderer._tool_bar_add_button(
name="visibility",
desc="Toggle Controls",
func=self.toggle_interface,
icon_name="visibility_on"
)
self.widgets["play"] = self._renderer._tool_bar_add_play_button(
name="play",
desc="Play/Pause",
func=self.toggle_playback,
shortcut=" ",
)
self._renderer._tool_bar_add_button(
name="reset",
desc="Reset",
func=self.reset,
)
self._renderer._tool_bar_add_button(
name="scale",
desc="Auto-Scale",
func=self.apply_auto_scaling,
)
self._renderer._tool_bar_add_button(
name="clear",
desc="Clear traces",
func=self.clear_glyphs,
)
self._renderer._tool_bar_add_spacer()
self._renderer._tool_bar_add_button(
name="help",
desc="Help",
func=self.help,
shortcut="?",
)
def _shift_time(self, op):
self.callbacks["time"](
value=(op(self._current_time, self.playback_speed)),
time_as_index=False,
update_widget=True,
)
def _rotate_azimuth(self, value):
azimuth = (self._renderer.figure._azimuth + value) % 360
self._renderer.set_camera(azimuth=azimuth, reset_camera=False)
def _rotate_elevation(self, value):
elevation = np.clip(
self._renderer.figure._elevation + value,
self._elevation_rng[0],
self._elevation_rng[1],
)
self._renderer.set_camera(elevation=elevation, reset_camera=False)
def _configure_shortcuts(self):
self._clear_callbacks()
self.plotter.add_key_event("i", self.toggle_interface)
self.plotter.add_key_event("s", self.apply_auto_scaling)
self.plotter.add_key_event("r", self.restore_user_scaling)
self.plotter.add_key_event("c", self.clear_glyphs)
self.plotter.add_key_event("n", partial(self._shift_time,
op=lambda x, y: x + y))
self.plotter.add_key_event("b", partial(self._shift_time,
op=lambda x, y: x - y))
for key, func, sign in (("Left", self._rotate_azimuth, 1),
("Right", self._rotate_azimuth, -1),
("Up", self._rotate_elevation, 1),
("Down", self._rotate_elevation, -1)):
self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE))
def _configure_menu(self):
self._renderer._menu_initialize()
self._renderer._menu_add_submenu(
name="help",
desc="Help",
)
self._renderer._menu_add_button(
menu_name="help",
name="help",
desc="Show MNE key bindings\t?",
func=self.help,
)
def _configure_status_bar(self):
self._renderer._status_bar_initialize()
self.status_msg = self._renderer._status_bar_add_label(
self.default_status_bar_msg, stretch=1)
self.status_progress = self._renderer._status_bar_add_progress_bar()
if self.status_progress is not None:
self.status_progress.hide()
def _on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def _on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def _on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
try:
self.picked_renderer = \
self.plotter.iren.FindPokedRenderer(x, y)
except AttributeError:
self.picked_renderer = \
self.plotter.iren.interactor.FindPokedRenderer(x, y)
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def _on_pick(self, vtk_picker, event):
if not self.show_traces:
return
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return
# 1) Check to see if there are any spheres along the ray
if len(self._spheres):
collection = vtk_picker.GetProp3Ds()
found_sphere = None
for ii in range(collection.GetNumberOfItems()):
actor = collection.GetItemAsObject(ii)
for sphere in self._spheres:
if any(a is actor for a in sphere._actors):
found_sphere = sphere
break
if found_sphere is not None:
break
if found_sphere is not None:
assert found_sphere._is_glyph
mesh = found_sphere
# 2) Remove sphere if it's what we have
if hasattr(mesh, "_is_glyph"):
self._remove_vertex_glyph(mesh)
return
try:
hemi = mesh._hemi
except AttributeError:
hemi = 'vol'
else:
assert hemi in ('lh', 'rh')
if self.act_data_smooth[hemi][0] is None:
return
pos = np.array(vtk_picker.GetPickPosition())
if hemi == 'vol':
grid = mesh = self._data[hemi]['grid']
vertices = self._data[hemi]['vertices']
coords = self._data[hemi]['grid_coords'][vertices]
scalars = grid.cell_arrays['values'][vertices]
spacing = np.array(grid.GetSpacing())
max_dist = np.linalg.norm(spacing) / 2.
origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()
ori = pos - origin
ori /= np.linalg.norm(ori)
dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)
assert dists.shape == (len(coords),)
mask = dists <= max_dist
idx = np.where(mask)[0]
if len(idx) == 0:
return
idx = idx[np.argmax(np.abs(scalars[idx]))]
vertex_id = vertices[idx]
else:
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if self.traces_mode == 'label':
self._add_label_glyph(hemi, mesh, vertex_id)
else:
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _add_label_glyph(self, hemi, mesh, vertex_id):
if hemi == 'vol':
return
label_id = self._vertex_to_label_id[hemi][vertex_id]
label = self._annotation_labels[hemi][label_id]
if label_id in self.picked_patches[hemi]:
self._remove_label_glyph(hemi, label_id)
return
if hemi == label.hemi:
self.add_label(label, borders=True, reset_camera=False)
self.picked_patches[hemi].append(label_id)
def _remove_label_glyph(self, hemi, label_id):
label = self._annotation_labels[hemi][label_id]
label._line.remove()
self.color_cycle.restore(label._color)
self.mpl_canvas.update_plot()
self._layered_meshes[hemi].remove_overlay(label.name)
self.picked_patches[hemi].remove(label_id)
def _add_vertex_glyph(self, hemi, mesh, vertex_id):
if vertex_id in self.picked_points[hemi]:
return
if self.act_data_smooth[hemi][0] is None:
return
color = next(self.color_cycle)
line = self.plot_time_course(hemi, vertex_id, color)
if hemi == 'vol':
ijk = np.unravel_index(
vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')
# center = np.empty(3)
# voxel.GetCentroid(center)
voxel = mesh.GetCell(*ijk)
pts = voxel.GetPoints()
n_pts = pts.GetNumberOfPoints()
center = np.empty((n_pts, 3))
for ii in range(pts.GetNumberOfPoints()):
pts.GetPoint(ii, center[ii])
center = np.mean(center, axis=0)
else:
center = mesh.GetPoints().GetPoint(vertex_id)
del mesh
# from the picked renderer to the subplot coords
try:
lst = self._renderer._all_renderers._renderers
except AttributeError:
lst = self._renderer._all_renderers
rindex = lst.index(self.picked_renderer)
row, col = self._renderer._index_to_loc(rindex)
actors = list()
spheres = list()
for ri, ci, _ in self._iter_views(hemi):
self.plotter.subplot(ri, ci)
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = self._renderer._sphere(
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_glyph = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
self._spheres.extend(spheres)
self.pick_table[vertex_id] = spheres
return sphere
def _remove_vertex_glyph(self, mesh, render=True):
vertex_id = mesh._vertex_id
if vertex_id not in self.pick_table:
return
hemi = mesh._hemi
color = mesh._color
spheres = self.pick_table[vertex_id]
spheres[0]._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[hemi].remove(vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(color)
for sphere in spheres:
# remove all actors
self.plotter.remove_actor(sphere._actors, render=render)
sphere._actors = None
self._spheres.pop(self._spheres.index(sphere))
self.pick_table.pop(vertex_id)
def clear_glyphs(self):
if not self.time_viewer:
return
for sphere in list(self._spheres): # will remove itself, so copy
self._remove_vertex_glyph(sphere, render=False)
assert sum(len(v) for v in self.picked_points.values()) == 0
assert len(self.pick_table) == 0
assert len(self._spheres) == 0
for hemi in self._hemis:
for label_id in list(self.picked_patches[hemi]):
self._remove_label_glyph(hemi, label_id)
assert sum(len(v) for v in self.picked_patches.values()) == 0
if self.rms is not None:
self.rms.remove()
self.rms = None
self._renderer._update()
def plot_time_course(self, hemi, vertex_id, color):
if self.mpl_canvas is None:
return
time = self._data['time'].copy() # avoid circular ref
mni = None
if hemi == 'vol':
hemi_str = 'V'
xfm = read_talxfm(
self._subject_id, self._subjects_dir)
if self._units == 'mm':
xfm['trans'][:3, 3] *= 1000.
ijk = np.unravel_index(
vertex_id, self._data[hemi]['grid_shape'], order='F')
src_mri_t = self._data[hemi]['grid_src_mri_t']
mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)
else:
hemi_str = 'L' if hemi == 'lh' else 'R'
try:
mni = vertex_to_mni(
vertices=vertex_id,
hemis=0 if hemi == 'lh' else 1,
subject=self._subject_id,
subjects_dir=self._subjects_dir
)
except Exception:
mni = None
if mni is not None:
mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni)
else:
mni = ''
label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni)
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color,
zorder=4,
)
return line
def plot_time_line(self):
if self.mpl_canvas is None:
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self._fg_color,
lw=1,
)
self.time_line.set_xdata(current_time)
self.mpl_canvas.update_plot()
def _configure_help(self):
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('n', 'Shift the time forward by the playback speed'),
('b', 'Shift the time backward by the playback speed'),
('Space', 'Start/Pause playback'),
('Up', 'Decrease camera elevation angle'),
('Down', 'Increase camera elevation angle'),
('Left', 'Decrease camera azimuth angle'),
('Right', 'Increase camera azimuth angle'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
self.help_canvas = self._renderer._window_get_simple_canvas(
width=5, height=2, dpi=80)
_show_help_fig(
col1=text1,
col2=text2,
fig_help=self.help_canvas.fig,
ax=self.help_canvas.axes,
show=False,
)
def help(self):
self.help_canvas.show()
def _clear_callbacks(self):
if not hasattr(self, 'callbacks'):
return
for callback in self.callbacks.values():
if callback is not None:
for key in ('plotter', 'brain', 'callback',
'widget', 'widgets'):
setattr(callback, key, None)
self.callbacks.clear()
# Remove the default key binding
if getattr(self, "iren", None) is not None:
self.plotter.iren.clear_key_event_callbacks()
def _clear_widgets(self):
if not hasattr(self, 'widgets'):
return
for widget in self.widgets.values():
if widget is not None:
for key in ('triggered', 'valueChanged'):
setattr(widget, key, None)
self.widgets.clear()
@property
def interaction(self):
return self._interaction
@interaction.setter
def interaction(self, interaction):
_validate_type(interaction, str, 'interaction')
_check_option('interaction', interaction, ('trackball', 'terrain'))
for ri, ci, _ in self._iter_views('vol'): # will traverse all
self._renderer.subplot(ri, ci)
self._renderer.set_interaction(interaction)
def _cortex_colormap(self, cortex):
colormap_map = dict(classic=dict(colormap="Greys",
vmin=-1, vmax=2),
high_contrast=dict(colormap="Greys",
vmin=-.1, vmax=1.3),
low_contrast=dict(colormap="Greys",
vmin=-5, vmax=5),
bone=dict(colormap="bone_r",
vmin=-.2, vmax=2),
)
return colormap_map[cortex]
@verbose
def add_data(self, array, fmin=None, fmid=None, fmax=None,
thresh=None, center=None, transparent=False, colormap="auto",
alpha=1, vertices=None, smoothing_steps=None, time=None,
time_label="auto", colorbar=True,
hemi=None, remove_existing=None, time_label_size=None,
initial_time=None, scale_factor=None, vector_alpha=None,
clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,
verbose=None):
_validate_type(transparent, bool, 'transparent')
_validate_type(vector_alpha, ('numeric', None), 'vector_alpha')
_validate_type(scale_factor, ('numeric', None), 'scale_factor')
# those parameters are not supported yet, only None is allowed
_check_option('thresh', thresh, [None])
_check_option('remove_existing', remove_existing, [None])
_validate_type(time_label_size, (None, 'numeric'), 'time_label_size')
if time_label_size is not None:
time_label_size = float(time_label_size)
if time_label_size < 0:
raise ValueError('time_label_size must be positive, got '
f'{time_label_size}')
hemi = self._check_hemi(hemi, extras=['vol'])
stc, array, vertices = self._check_stc(hemi, array, vertices)
array = np.asarray(array)
vector_alpha = alpha if vector_alpha is None else vector_alpha
self._data['vector_alpha'] = vector_alpha
self._data['scale_factor'] = scale_factor
# Create time array and add label if > 1D
if array.ndim <= 1:
time_idx = 0
else:
# check time array
if time is None:
time = np.arange(array.shape[-1])
else:
time = np.asarray(time)
if time.shape != (array.shape[-1],):
raise ValueError('time has shape %s, but need shape %s '
'(array.shape[-1])' %
(time.shape, (array.shape[-1],)))
self._data["time"] = time
if self._n_times is None:
self._times = time
elif len(time) != self._n_times:
raise ValueError("New n_times is different from previous "
"n_times")
elif not np.array_equal(time, self._times):
raise ValueError("Not all time values are consistent with "
"previously set times.")
# initial time
if initial_time is None:
time_idx = 0
else:
time_idx = self._to_time_index(initial_time)
# time label
time_label, _ = _handle_time(time_label, 's', time)
y_txt = 0.05 + 0.1 * bool(colorbar)
if array.ndim == 3:
if array.shape[1] != 3:
raise ValueError('If array has 3 dimensions, array.shape[1] '
'must equal 3, got %s' % (array.shape[1],))
fmin, fmid, fmax = _update_limits(
fmin, fmid, fmax, center, array
)
if colormap == 'auto':
colormap = 'mne' if center is not None else 'hot'
if smoothing_steps is None:
smoothing_steps = 7
elif smoothing_steps == 'nearest':
smoothing_steps = 0
elif isinstance(smoothing_steps, int):
if smoothing_steps < 0:
raise ValueError('Expected value of `smoothing_steps` is'
' positive but {} was given.'.format(
smoothing_steps))
else:
raise TypeError('Expected type of `smoothing_steps` is int or'
' NoneType but {} was given.'.format(
type(smoothing_steps)))
self._data['stc'] = stc
self._data['src'] = src
self._data['smoothing_steps'] = smoothing_steps
self._data['clim'] = clim
self._data['time'] = time
self._data['initial_time'] = initial_time
self._data['time_label'] = time_label
self._data['initial_time_idx'] = time_idx
self._data['time_idx'] = time_idx
self._data['transparent'] = transparent
# data specific for a hemi
self._data[hemi] = dict()
self._data[hemi]['glyph_dataset'] = None
self._data[hemi]['glyph_mapper'] = None
self._data[hemi]['glyph_actor'] = None
self._data[hemi]['array'] = array
self._data[hemi]['vertices'] = vertices
self._data['alpha'] = alpha
self._data['colormap'] = colormap
self._data['center'] = center
self._data['fmin'] = fmin
self._data['fmid'] = fmid
self._data['fmax'] = fmax
self.update_lut()
# 1) add the surfaces first
actor = None
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi in ('lh', 'rh'):
actor = self._layered_meshes[hemi]._actor
else:
src_vol = src[2:] if src.kind == 'mixed' else src
actor, _ = self._add_volume_data(hemi, src_vol, volume_options)
assert actor is not None # should have added one
# 2) update time and smoothing properties
# set_data_smoothing calls "set_time_point" for us, which will set
# _current_time
self.set_time_interpolation(self.time_interpolation)
self.set_data_smoothing(self._data['smoothing_steps'])
# 3) add the other actors
if colorbar is True:
# botto left by default
colorbar = (self._subplot_shape[0] - 1, 0)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
# Add the time label to the bottommost view
do = (ri, ci) == colorbar
if not self._time_label_added and time_label is not None and do:
time_actor = self._renderer.text2d(
x_window=0.95, y_window=y_txt,
color=self._fg_color,
size=time_label_size,
text=time_label(self._current_time),
justification='right'
)
self._data['time_actor'] = time_actor
self._time_label_added = True
if colorbar and self._scalar_bar is None and do:
kwargs = dict(source=actor, n_labels=8, color=self._fg_color,
bgcolor=self._brain_color[:3])
kwargs.update(colorbar_kwargs or {})
self._scalar_bar = self._renderer.scalarbar(**kwargs)
self._renderer.set_camera(**views_dicts[hemi][v])
# 4) update the scalar bar and opacity
self.update_lut(alpha=alpha)
def _iter_views(self, hemi):
# which rows and columns each type of visual needs to be added to
if self._hemi == 'split':
hemi_dict = dict(lh=[0], rh=[1], vol=[0, 1])
else:
hemi_dict = dict(lh=[0], rh=[0], vol=[0])
for vi, view in enumerate(self._views):
if self._hemi == 'split':
view_dict = dict(lh=[vi], rh=[vi], vol=[vi, vi])
else:
view_dict = dict(lh=[vi], rh=[vi], vol=[vi])
if self._view_layout == 'vertical':
rows = view_dict # views are rows
cols = hemi_dict # hemis are columns
else:
rows = hemi_dict # hemis are rows
cols = view_dict # views are columns
for ri, ci in zip(rows[hemi], cols[hemi]):
yield ri, ci, view
def remove_labels(self):
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
for label in self._labels[hemi]:
mesh.remove_overlay(label.name)
self._labels[hemi].clear()
self._renderer._update()
def remove_annotations(self):
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
mesh.remove_overlay(self._annots[hemi])
self._annots[hemi].clear()
self._renderer._update()
def _add_volume_data(self, hemi, src, volume_options):
_validate_type(src, SourceSpaces, 'src')
_check_option('src.kind', src.kind, ('volume',))
_validate_type(
volume_options, (dict, 'numeric', None), 'volume_options')
assert hemi == 'vol'
if not isinstance(volume_options, dict):
volume_options = dict(
resolution=float(volume_options) if volume_options is not None
else None)
volume_options = _handle_default('volume_options', volume_options)
allowed_types = (
['resolution', (None, 'numeric')],
['blending', (str,)],
['alpha', ('numeric', None)],
['surface_alpha', (None, 'numeric')],
['silhouette_alpha', (None, 'numeric')],
['silhouette_linewidth', ('numeric',)],
)
for key, types in allowed_types:
_validate_type(volume_options[key], types,
f'volume_options[{repr(key)}]')
extra_keys = set(volume_options) - set(a[0] for a in allowed_types)
if len(extra_keys):
raise ValueError(
f'volume_options got unknown keys {sorted(extra_keys)}')
blending = _check_option('volume_options["blending"]',
volume_options['blending'],
('composite', 'mip'))
alpha = volume_options['alpha']
if alpha is None:
alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.
alpha = np.clip(float(alpha), 0., 1.)
resolution = volume_options['resolution']
surface_alpha = volume_options['surface_alpha']
if surface_alpha is None:
surface_alpha = min(alpha / 2., 0.1)
silhouette_alpha = volume_options['silhouette_alpha']
if silhouette_alpha is None:
silhouette_alpha = surface_alpha / 4.
silhouette_linewidth = volume_options['silhouette_linewidth']
del volume_options
volume_pos = self._data[hemi].get('grid_volume_pos')
volume_neg = self._data[hemi].get('grid_volume_neg')
center = self._data['center']
if volume_pos is None:
xyz = np.meshgrid(
*[np.arange(s) for s in src[0]['shape']], indexing='ij')
dimensions = np.array(src[0]['shape'], int)
mult = 1000 if self._units == 'mm' else 1
src_mri_t = src[0]['src_mri_t']['trans'].copy()
src_mri_t[:3] *= mult
if resolution is not None:
resolution = resolution * mult / 1000. # to mm
del src, mult
coords = np.array([c.ravel(order='F') for c in xyz]).T
coords = apply_trans(src_mri_t, coords)
self.geo[hemi] = Bunch(coords=coords)
vertices = self._data[hemi]['vertices']
assert self._data[hemi]['array'].shape[0] == len(vertices)
# MNE constructs the source space on a uniform grid in MRI space,
# but mne coreg can change it to be non-uniform, so we need to
# use all three elements here
assert np.allclose(
src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))
spacing = np.diag(src_mri_t)[:3]
origin = src_mri_t[:3, 3] - spacing / 2.
scalars = np.zeros(np.prod(dimensions))
scalars[vertices] = 1. # for the outer mesh
grid, grid_mesh, volume_pos, volume_neg = \
self._renderer._volume(dimensions, origin, spacing, scalars,
surface_alpha, resolution, blending,
center)
self._data[hemi]['alpha'] = alpha # incorrectly set earlier
self._data[hemi]['grid'] = grid
self._data[hemi]['grid_mesh'] = grid_mesh
self._data[hemi]['grid_coords'] = coords
self._data[hemi]['grid_src_mri_t'] = src_mri_t
self._data[hemi]['grid_shape'] = dimensions
self._data[hemi]['grid_volume_pos'] = volume_pos
self._data[hemi]['grid_volume_neg'] = volume_neg
actor_pos, _ = self._renderer.plotter.add_actor(
volume_pos, reset_camera=False, name=None, culling=False)
if volume_neg is not None:
actor_neg, _ = self._renderer.plotter.add_actor(
volume_neg, reset_camera=False, name=None, culling=False)
else:
actor_neg = None
grid_mesh = self._data[hemi]['grid_mesh']
if grid_mesh is not None:
_, prop = self._renderer.plotter.add_actor(
grid_mesh, reset_camera=False, name=None, culling=False,
pickable=False)
prop.SetColor(*self._brain_color[:3])
prop.SetOpacity(surface_alpha)
if silhouette_alpha > 0 and silhouette_linewidth > 0:
for ri, ci, v in self._iter_views('vol'):
self._renderer.subplot(ri, ci)
self._renderer._silhouette(
mesh=grid_mesh.GetInput(),
color=self._brain_color[:3],
line_width=silhouette_linewidth,
alpha=silhouette_alpha,
)
return actor_pos, actor_neg
def add_label(self, label, color=None, alpha=1, scalar_thresh=None,
borders=False, hemi=None, subdir=None,
reset_camera=True):
from matplotlib.colors import colorConverter
from ...label import read_label
if isinstance(label, str):
if color is None:
color = "crimson"
if os.path.isfile(label):
filepath = label
label = read_label(filepath)
hemi = label.hemi
label_name = os.path.basename(filepath).split('.')[1]
else:
hemi = self._check_hemi(hemi)
label_name = label
label_fname = ".".join([hemi, label_name, 'label'])
if subdir is None:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', label_fname)
else:
filepath = op.join(self._subjects_dir, self._subject_id,
'label', subdir, label_fname)
if not os.path.exists(filepath):
raise ValueError('Label file %s does not exist'
% filepath)
label = read_label(filepath)
ids = label.vertices
scalars = label.values
else:
# try to extract parameters from label instance
try:
hemi = label.hemi
ids = label.vertices
if label.name is None:
label.name = 'unnamed' + str(self._unnamed_label_id)
self._unnamed_label_id += 1
label_name = str(label.name)
if color is None:
if hasattr(label, 'color') and label.color is not None:
color = label.color
else:
color = "crimson"
if scalar_thresh is not None:
scalars = label.values
except Exception:
raise ValueError('Label was not a filename (str), and could '
'not be understood as a class. The class '
'must have attributes "hemi", "vertices", '
'"name", and (if scalar_thresh is not None)'
'"values"')
hemi = self._check_hemi(hemi)
if scalar_thresh is not None:
ids = ids[scalars >= scalar_thresh]
scalars = np.zeros(self.geo[hemi].coords.shape[0])
scalars[ids] = 1
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
stc = self._data["stc"]
src = self._data["src"]
tc = stc.extract_label_time_course(label, src=src,
mode=self.label_extract_mode)
tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]
color = next(self.color_cycle)
line = self.mpl_canvas.plot(
self._data['time'], tc, label=label_name,
color=color)
else:
line = None
orig_color = color
color = colorConverter.to_rgba(color, alpha)
cmap = np.array([(0, 0, 0, 0,), color])
ctable = np.round(cmap * 255).astype(np.uint8)
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if borders:
n_vertices = scalars.size
edges = mesh_edges(self.geo[hemi].faces)
edges = edges.tocoo()
border_edges = scalars[edges.row] != scalars[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].faces.shape
keep_idx = self.geo[hemi].faces[np.any(
keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
show[keep_idx] = 1
scalars *= show
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=scalars,
colormap=ctable,
rng=[np.min(scalars), np.max(scalars)],
opacity=alpha,
name=label_name,
)
if reset_camera:
self._renderer.set_camera(**views_dicts[hemi][v])
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
label._color = orig_color
label._line = line
self._labels[hemi].append(label)
self._renderer._update()
def add_foci(self, coords, coords_as_verts=False, map_surface=None,
scale_factor=1, color="white", alpha=1, name=None,
hemi=None, resolution=50):
from matplotlib.colors import colorConverter
hemi = self._check_hemi(hemi, extras=['vol'])
# those parameters are not supported yet, only None is allowed
_check_option('map_surface', map_surface, [None])
# Figure out how to interpret the first parameter
if coords_as_verts:
coords = self.geo[hemi].coords[coords]
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
if self._units == 'm':
scale_factor = scale_factor / 1000.
for ri, ci, v in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
self._renderer.sphere(center=coords, color=color,
scale=(10. * scale_factor),
opacity=alpha, resolution=resolution)
self._renderer.set_camera(**views_dicts[hemi][v])
def add_text(self, x, y, text, name=None, color=None, opacity=1.0,
row=-1, col=-1, font_size=None, justification=None):
# XXX: support `name` should be added when update_text/remove_text
# are implemented
# _check_option('name', name, [None])
self._renderer.text2d(x_window=x, y_window=y, text=text, color=color,
size=font_size, justification=justification)
def _configure_label_time_course(self):
from ...label import read_labels_from_annot
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
self.traces_mode = 'label'
self.add_annotation(self.annot, color="w", alpha=0.75)
# now plot the time line
self.plot_time_line()
self.mpl_canvas.update_plot()
for hemi in self._hemis:
labels = read_labels_from_annot(
subject=self._subject_id,
parc=self.annot,
hemi=hemi,
subjects_dir=self._subjects_dir
)
self._vertex_to_label_id[hemi] = np.full(
self.geo[hemi].coords.shape[0], -1)
self._annotation_labels[hemi] = labels
for idx, label in enumerate(labels):
self._vertex_to_label_id[hemi][label.vertices] = idx
def add_annotation(self, annot, borders=True, alpha=1, hemi=None,
remove_existing=True, color=None, **kwargs):
from ...label import _read_annot
hemis = self._check_hemis(hemi)
# Figure out where the data is coming from
if isinstance(annot, str):
if os.path.isfile(annot):
filepath = annot
path = os.path.split(filepath)[0]
file_hemi, annot = os.path.basename(filepath).split('.')[:2]
if len(hemis) > 1:
if annot[:2] == 'lh.':
filepaths = [filepath, op.join(path, 'rh' + annot[2:])]
elif annot[:2] == 'rh.':
filepaths = [op.join(path, 'lh' + annot[2:], filepath)]
else:
raise RuntimeError('To add both hemispheres '
'simultaneously, filename must '
'begin with "lh." or "rh."')
else:
filepaths = [filepath]
else:
filepaths = []
for hemi in hemis:
filepath = op.join(self._subjects_dir,
self._subject_id,
'label',
".".join([hemi, annot, 'annot']))
if not os.path.exists(filepath):
raise ValueError('Annotation file %s does not exist'
% filepath)
filepaths += [filepath]
annots = []
for hemi, filepath in zip(hemis, filepaths):
# Read in the data
labels, cmap, _ = _read_annot(filepath)
annots.append((labels, cmap))
else:
annots = [annot] if len(hemis) == 1 else annot
annot = 'annotation'
for hemi, (labels, cmap) in zip(hemis, annots):
# Maybe zero-out the non-border vertices
self._to_borders(labels, hemi, borders)
# Handle null labels properly
cmap[:, 3] = 255
bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)
bgcolor[-1] = 0
cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive
cmap[cmap[:, 4] <= 0, :4] = bgcolor
if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):
cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))
# Set label ids sensibly
order = np.argsort(cmap[:, -1])
cmap = cmap[order]
ids = np.searchsorted(cmap[:, -1], labels)
cmap = cmap[:, :4]
# Set the alpha level
alpha_vec = cmap[:, 3]
alpha_vec[alpha_vec > 0] = alpha * 255
# Override the cmap when a single color is used
if color is not None:
from matplotlib.colors import colorConverter
rgb = np.round(np.multiply(colorConverter.to_rgb(color), 255))
cmap[:, :3] = rgb.astype(cmap.dtype)
ctable = cmap.astype(np.float64)
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=ids,
colormap=ctable,
rng=[np.min(ids), np.max(ids)],
opacity=alpha,
name=annot,
)
self._annots[hemi].append(annot)
if not self.time_viewer or self.traces_mode == 'vertex':
self._renderer._set_colormap_range(
mesh._actor, cmap.astype(np.uint8), None)
self._renderer._update()
def close(self):
self._closed = True
self._renderer.close()
def show(self):
self._renderer.show()
def show_view(self, view=None, roll=None, distance=None, row=0, col=0,
hemi=None, align=True):
hemi = self._hemi if hemi is None else hemi
if hemi == 'split':
if (self._view_layout == 'vertical' and col == 1 or
self._view_layout == 'horizontal' and row == 1):
hemi = 'rh'
else:
hemi = 'lh'
if isinstance(view, str):
view = views_dicts[hemi].get(view)
view = view.copy()
if roll is not None:
view.update(roll=roll)
if distance is not None:
view.update(distance=distance)
self._renderer.subplot(row, col)
xfm = self._rigid if align else None
self._renderer.set_camera(**view, reset_camera=False, rigid=xfm)
self._renderer._update()
def reset_view(self):
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self._renderer.subplot(ri, ci)
self._renderer.set_camera(**views_dicts[h][v],
reset_camera=False)
def save_image(self, filename=None, mode='rgb'):
if filename is None:
filename = _generate_default_filename(".png")
_save_ndarray_img(
filename, self.screenshot(mode=mode, time_viewer=True))
@fill_doc
def screenshot(self, mode='rgb', time_viewer=False):
img = self._renderer.screenshot(mode)
logger.debug(f'Got screenshot of size {img.shape}')
if time_viewer and self.time_viewer and \
self.show_traces and \
not self.separate_canvas:
from matplotlib.image import imread
canvas = self.mpl_canvas.fig.canvas
canvas.draw_idle()
fig = self.mpl_canvas.fig
with BytesIO() as output:
# Need to pass dpi here so it uses the physical (HiDPI) DPI
# rather than logical DPI when saving in most cases.
# But when matplotlib uses HiDPI and VTK doesn't
# so let's just calculate the DPI we need to get
size_in = fig.get_size_inches()
dpi = fig.get_dpi()
want_size = tuple(x * dpi for x in size_in)
n_pix = want_size[0] * want_size[1]
logger.debug(
f'Saving figure of size {size_in} @ {dpi} DPI '
f'({want_size} = {n_pix} pixels)')
fig.savefig(output, dpi=dpi, format='png',
facecolor=self._bg_color, edgecolor='none')
output.seek(0)
trace_img = imread(output, format='png')[:, :, :3]
trace_img = np.clip(
np.round(trace_img * 255), 0, 255).astype(np.uint8)
bgcolor = np.array(self._brain_color[:3]) / 255
img = concatenate_images([img, trace_img], bgcolor=bgcolor)
return img
@contextlib.contextmanager
def _no_lut_update(self, why):
orig = self._lut_locked
self._lut_locked = why
try:
yield
finally:
self._lut_locked = orig
@fill_doc
def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None):
args = f'{fmin}, {fmid}, {fmax}, {alpha}'
if self._lut_locked is not None:
logger.debug(f'LUT update postponed with {args}')
return
logger.debug(f'Updating LUT with {args}')
center = self._data['center']
colormap = self._data['colormap']
transparent = self._data['transparent']
lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')}
_update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax)
assert all(val is not None for val in lims.values())
self._data.update(lims)
self._data['ctable'] = np.round(
calculate_lut(colormap, alpha=1., center=center,
transparent=transparent, **lims) *
255).astype(np.uint8)
rng = self._cmap_range
ctable = self._data['ctable']
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.update_overlay(name='data',
colormap=self._data['ctable'],
opacity=alpha,
rng=rng)
self._renderer._set_colormap_range(
mesh._actor, ctable, self._scalar_bar, rng,
self._brain_color)
grid_volume_pos = hemi_data.get('grid_volume_pos')
grid_volume_neg = hemi_data.get('grid_volume_neg')
for grid_volume in (grid_volume_pos, grid_volume_neg):
if grid_volume is not None:
self._renderer._set_volume_range(
grid_volume, ctable, hemi_data['alpha'],
self._scalar_bar, rng)
glyph_actor = hemi_data.get('glyph_actor')
if glyph_actor is not None:
for glyph_actor_ in glyph_actor:
self._renderer._set_colormap_range(
glyph_actor_, ctable, self._scalar_bar, rng)
if self.time_viewer:
with self._no_lut_update(f'update_lut {args}'):
for key in ('fmin', 'fmid', 'fmax'):
self.callbacks[key](lims[key])
self._renderer._update()
def set_data_smoothing(self, n_steps):
from scipy import sparse
from ...morph import _hemi_morph
for hemi in ['lh', 'rh']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:
continue
vertices = hemi_data['vertices']
if vertices is None:
raise ValueError(
'len(data) < nvtx (%s < %s): the vertices '
'parameter must not be None'
% (len(hemi_data), self.geo[hemi].x.shape[0]))
morph_n_steps = 'nearest' if n_steps == 0 else n_steps
maps = sparse.eye(len(self.geo[hemi].coords), format='csr')
with use_log_level(False):
smooth_mat = _hemi_morph(
self.geo[hemi].orig_faces,
np.arange(len(self.geo[hemi].coords)),
vertices, morph_n_steps, maps, warn=False)
self._data[hemi]['smooth_mat'] = smooth_mat
self.set_time_point(self._data['time_idx'])
self._data['smoothing_steps'] = n_steps
@property
def _n_times(self):
return len(self._times) if self._times is not None else None
@property
def time_interpolation(self):
return self._time_interpolation
@fill_doc
def set_time_interpolation(self, interpolation):
self._time_interpolation = _check_option(
'interpolation',
interpolation,
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')
)
self._time_interp_funcs = dict()
self._time_interp_inv = None
if self._times is not None:
idx = np.arange(self._n_times)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
self._time_interp_funcs[hemi] = _safe_interp1d(
idx, array, self._time_interpolation, axis=-1,
assume_sorted=True)
self._time_interp_inv = _safe_interp1d(idx, self._times)
def set_time_point(self, time_idx):
self._current_act_data = dict()
time_actor = self._data.get('time_actor', None)
time_label = self._data.get('time_label', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
vectors = None
if array.ndim == 1:
act_data = array
self._current_time = 0
else:
act_data = self._time_interp_funcs[hemi](time_idx)
self._current_time = self._time_interp_inv(time_idx)
if array.ndim == 3:
vectors = act_data
act_data = np.linalg.norm(act_data, axis=1)
self._current_time = self._time_interp_inv(time_idx)
self._current_act_data[hemi] = act_data
if time_actor is not None and time_label is not None:
time_actor.SetInput(time_label(self._current_time))
grid = hemi_data.get('grid')
if grid is not None:
vertices = self._data['vol']['vertices']
values = self._current_act_data['vol']
rng = self._cmap_range
fill = 0 if self._data['center'] is not None else rng[0]
grid.cell_arrays['values'].fill(fill)
grid.cell_arrays['values'][vertices] = values
smooth_mat = hemi_data.get('smooth_mat')
if smooth_mat is not None:
act_data = smooth_mat.dot(act_data)
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
if 'data' in mesh._overlays:
mesh.update_overlay(name='data', scalars=act_data)
else:
mesh.add_overlay(
scalars=act_data,
colormap=self._data['ctable'],
rng=self._cmap_range,
opacity=None,
name='data',
)
if vectors is not None:
self._update_glyphs(hemi, vectors)
self._data['time_idx'] = time_idx
self._renderer._update()
def set_time(self, time):
if self._times is None:
raise ValueError(
'Cannot set time when brain has no defined times.')
elif min(self._times) <= time <= max(self._times):
self.set_time_point(np.interp(float(time), self._times,
np.arange(self._n_times)))
else:
raise ValueError(
f'Requested time ({time} s) is outside the range of '
f'available times ({min(self._times)}-{max(self._times)} s).')
def _update_glyphs(self, hemi, vectors):
hemi_data = self._data.get(hemi)
assert hemi_data is not None
vertices = hemi_data['vertices']
vector_alpha = self._data['vector_alpha']
scale_factor = self._data['scale_factor']
vertices = slice(None) if vertices is None else vertices
x, y, z = np.array(self.geo[hemi].coords)[vertices].T
if hemi_data['glyph_actor'] is None:
add = True
hemi_data['glyph_actor'] = list()
else:
add = False
count = 0
for ri, ci, _ in self._iter_views(hemi):
self._renderer.subplot(ri, ci)
if hemi_data['glyph_dataset'] is None:
glyph_mapper, glyph_dataset = self._renderer.quiver3d(
x, y, z,
vectors[:, 0], vectors[:, 1], vectors[:, 2],
color=None,
mode='2darrow',
scale_mode='vector',
scale=scale_factor,
opacity=vector_alpha,
name=str(hemi) + "_glyph"
)
hemi_data['glyph_dataset'] = glyph_dataset
hemi_data['glyph_mapper'] = glyph_mapper
else:
glyph_dataset = hemi_data['glyph_dataset']
glyph_dataset.point_arrays['vec'] = vectors
glyph_mapper = hemi_data['glyph_mapper']
if add:
glyph_actor = self._renderer._actor(glyph_mapper)
prop = glyph_actor.GetProperty()
prop.SetLineWidth(2.)
prop.SetOpacity(vector_alpha)
self._renderer.plotter.add_actor(glyph_actor)
hemi_data['glyph_actor'].append(glyph_actor)
else:
glyph_actor = hemi_data['glyph_actor'][count]
count += 1
self._renderer._set_colormap_range(
actor=glyph_actor,
ctable=self._data['ctable'],
scalar_bar=None,
rng=self._cmap_range,
)
@property
def _cmap_range(self):
dt_max = self._data['fmax']
if self._data['center'] is None:
dt_min = self._data['fmin']
else:
dt_min = -1 * dt_max
rng = [dt_min, dt_max]
return rng
def _update_fscale(self, fscale):
fmin = self._data['fmin'] * fscale
fmid = self._data['fmid'] * fscale
fmax = self._data['fmax'] * fscale
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _update_auto_scaling(self, restore=False):
user_clim = self._data['clim']
if user_clim is not None and 'lims' in user_clim:
allow_pos_lims = False
else:
allow_pos_lims = True
if user_clim is not None and restore:
clim = user_clim
else:
clim = 'auto'
colormap = self._data['colormap']
transparent = self._data['transparent']
mapdata = _process_clim(
clim, colormap, transparent,
np.concatenate(list(self._current_act_data.values())),
allow_pos_lims)
diverging = 'pos_lims' in mapdata['clim']
colormap = mapdata['colormap']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
transparent = mapdata['transparent']
del mapdata
fmin, fmid, fmax = scale_pts
center = 0. if diverging else None
self._data['center'] = center
self._data['colormap'] = colormap
self._data['transparent'] = transparent
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _to_time_index(self, value):
time = self._data['time']
value = np.interp(value, time, np.arange(len(time)))
return value
@property
def data(self):
return self._data
@property
def labels(self):
return self._labels
@property
def views(self):
return self._views
@property
def hemis(self):
return self._hemis
def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
import imageio
with self._renderer._disabled_interaction():
images = self._make_movie_frames(
time_dilation, tmin, tmax, framerate, interpolation, callback,
time_viewer)
if 'fps' not in kwargs:
kwargs['fps'] = framerate
if codec is not None:
kwargs['codec'] = codec
if bitrate is not None:
kwargs['bitrate'] = bitrate
imageio.mimwrite(filename, images, **kwargs)
def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False,
**kwargs):
def frame_callback(frame, n_frames):
if frame == n_frames:
self.status_msg.set_value(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self._renderer._status_bar_update()
else:
self.status_msg.set_value(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.set_range([0, n_frames - 1])
self.status_progress.set_value(frame)
self.status_progress.update()
self.status_msg.update()
self._renderer._status_bar_update()
default_cursor = self._renderer._window_get_cursor()
self._renderer._window_set_cursor(
self._renderer._window_new_cursor("WaitCursor"))
try:
self._save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed),
callback=frame_callback,
**kwargs
)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
finally:
self._renderer._window_set_cursor(default_cursor)
@fill_doc
def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
if filename is None:
filename = _generate_default_filename(".mp4")
func = self._save_movie_tv if self.time_viewer else self._save_movie
func(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, callback, time_viewer, **kwargs)
def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,
interpolation, callback, time_viewer):
from math import floor
if tmin is None:
tmin = self._times[0]
elif tmin < self._times[0]:
raise ValueError("tmin=%r is smaller than the first time point "
"(%r)" % (tmin, self._times[0]))
if tmax is None:
tmax = self._times[-1]
elif tmax > self._times[-1]:
raise ValueError("tmax=%r is greater than the latest time point "
"(%r)" % (tmax, self._times[-1]))
n_frames = floor((tmax - tmin) * time_dilation * framerate)
times = np.arange(n_frames, dtype=float)
times /= framerate * time_dilation
times += tmin
time_idx = np.interp(times, self._times, np.arange(self._n_times))
n_times = len(time_idx)
if n_times == 0:
raise ValueError("No time points selected")
logger.debug("Save movie for time points/samples\n%s\n%s"
% (times, time_idx))
self.screenshot(time_viewer=time_viewer)
old_mode = self.time_interpolation
if interpolation is not None:
self.set_time_interpolation(interpolation)
try:
images = [
self.screenshot(time_viewer=time_viewer)
for _ in self._iter_time(time_idx, callback)]
finally:
self.set_time_interpolation(old_mode)
if callback is not None:
callback(frame=len(time_idx), n_frames=len(time_idx))
return images
def _iter_time(self, time_idx, callback):
if self.time_viewer:
func = partial(self.callbacks["time"],
update_widget=True)
else:
func = self.set_time_point
current_time_idx = self._data["time_idx"]
for ii, idx in enumerate(time_idx):
func(idx)
if callback is not None:
callback(frame=ii, n_frames=len(time_idx))
yield idx
func(current_time_idx)
def _check_stc(self, hemi, array, vertices):
from ...source_estimate import (
_BaseSourceEstimate, _BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate, _BaseVolSourceEstimate
)
if isinstance(array, _BaseSourceEstimate):
stc = array
stc_surf = stc_vol = None
if isinstance(stc, _BaseSurfaceSourceEstimate):
stc_surf = stc
elif isinstance(stc, _BaseMixedSourceEstimate):
stc_surf = stc.surface() if hemi != 'vol' else None
stc_vol = stc.volume() if hemi == 'vol' else None
elif isinstance(stc, _BaseVolSourceEstimate):
stc_vol = stc if hemi == 'vol' else None
else:
raise TypeError("stc not supported")
if stc_surf is None and stc_vol is None:
raise ValueError("No data to be added")
if stc_surf is not None:
array = getattr(stc_surf, hemi + '_data')
vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]
if stc_vol is not None:
array = stc_vol.data
vertices = np.concatenate(stc_vol.vertices)
else:
stc = None
return stc, array, vertices
def _check_hemi(self, hemi, extras=()):
if hemi is None:
if self._hemi not in ['lh', 'rh']:
raise ValueError('hemi must not be None when both '
'hemispheres are displayed')
else:
hemi = self._hemi
elif hemi not in ['lh', 'rh'] + list(extras):
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' +
extra + ", got " + str(hemi))
return hemi
def _check_hemis(self, hemi):
if hemi is None:
if self._hemi not in ['lh', 'rh']:
hemi = ['lh', 'rh']
else:
hemi = [self._hemi]
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
else:
hemi = [hemi]
return hemi
def _to_borders(self, label, hemi, borders, restrict_idx=None):
if not isinstance(borders, (bool, int)) or borders < 0:
raise ValueError('borders must be a bool or positive integer')
if borders:
n_vertices = label.size
edges = mesh_edges(self.geo[hemi].orig_faces)
edges = edges.tocoo()
border_edges = label[edges.row] != label[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].orig_faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].orig_faces.shape
keep_idx = self.geo[hemi].orig_faces[
np.any(keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
if restrict_idx is not None:
keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]
show[keep_idx] = 1
label *= show
def enable_depth_peeling(self):
self._renderer.enable_depth_peeling()
def get_picked_points(self):
if hasattr(self, "time_viewer"):
return self.picked_points
def __hash__(self):
raise NotImplementedError
def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):
from scipy.interpolate import interp1d
if y.shape[axis] == 1:
def func(x):
return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)
return func
else:
return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)
def _update_limits(fmin, fmid, fmax, center, array):
if center is None:
if fmin is None:
fmin = array.min() if array.size > 0 else 0
if fmax is None:
fmax = array.max() if array.size > 0 else 1
else:
if fmin is None:
fmin = 0
if fmax is None:
fmax = np.abs(center - array).max() if array.size > 0 else 1
if fmid is None:
fmid = (fmin + fmax) / 2.
if fmin >= fmid:
raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'
% (fmin, fmid))
if fmid >= fmax:
raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'
% (fmid, fmax))
return fmin, fmid, fmax
def _update_monotonic(lims, fmin, fmid, fmax):
if fmin is not None:
lims['fmin'] = fmin
if lims['fmax'] < fmin:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}')
lims['fmax'] = fmin
if lims['fmid'] < fmin:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}')
lims['fmid'] = fmin
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmid is not None:
lims['fmid'] = fmid
if lims['fmin'] > fmid:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}')
lims['fmin'] = fmid
if lims['fmax'] < fmid:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}')
lims['fmax'] = fmid
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmax is not None:
lims['fmax'] = fmax
if lims['fmin'] > fmax:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}')
lims['fmin'] = fmax
if lims['fmid'] > fmax:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}')
lims['fmid'] = fmax
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
def _get_range(brain):
val = np.abs(np.concatenate(list(brain._current_act_data.values())))
return [np.min(val), np.max(val)]
class _FakeIren():
def EnterEvent(self):
pass
def MouseMoveEvent(self):
pass
def LeaveEvent(self):
pass
def SetEventInformation(self, *args, **kwargs):
pass
def CharEvent(self):
pass
def KeyPressEvent(self, *args, **kwargs):
pass
def KeyReleaseEvent(self, *args, **kwargs):
pass
| true | true |
1c324ee8763179a0da8952250323994620a39714 | 1,184 | py | Python | dnaSequence/test_strandsAreNotEmpty.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | 1 | 2021-03-22T20:45:06.000Z | 2021-03-22T20:45:06.000Z | dnaSequence/test_strandsAreNotEmpty.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | dnaSequence/test_strandsAreNotEmpty.py | ankitsumitg/python-projects | 34a3df6fcd8544bf83aa9f3d47ec160e3838b1d1 | [
"MIT"
] | null | null | null | """
Do Not Edit this file. You may and are encouraged to look at it for reference.
"""
import unittest
import dnaSequencing
class TestStrandsAreNotEmpty(unittest.TestCase):
def test001_strandsAreNotEmptyExists(self):
self.assertTrue('strandsAreNotEmpty' in dir(dnaSequencing),
'Function "strandsAreNotEmpty" was not defined, check your spelling')
def test002_neitherStrandEmpty(self):
from dnaSequencing import strandsAreNotEmpty
self.assertTrue(strandsAreNotEmpty('aaa', 'bbb'), 'Neither the target or candidate strand were empty')
def test003_targetStrandEmpty(self):
from dnaSequencing import strandsAreNotEmpty
self.assertFalse(strandsAreNotEmpty('', 'aaa'), 'The target strand was empty')
def test002_candidateStrandEmpty(self):
from dnaSequencing import strandsAreNotEmpty
self.assertFalse(strandsAreNotEmpty('aaa', ''), 'The candidate strand was empty')
def test005_bothStrandsEmtpy(self):
from dnaSequencing import strandsAreNotEmpty
self.assertFalse(strandsAreNotEmpty('', ''), 'Both strands were empty')
if __name__ == '__main__':
unittest.main()
| 38.193548 | 110 | 0.727196 |
import unittest
import dnaSequencing
class TestStrandsAreNotEmpty(unittest.TestCase):
def test001_strandsAreNotEmptyExists(self):
self.assertTrue('strandsAreNotEmpty' in dir(dnaSequencing),
'Function "strandsAreNotEmpty" was not defined, check your spelling')
def test002_neitherStrandEmpty(self):
from dnaSequencing import strandsAreNotEmpty
self.assertTrue(strandsAreNotEmpty('aaa', 'bbb'), 'Neither the target or candidate strand were empty')
def test003_targetStrandEmpty(self):
from dnaSequencing import strandsAreNotEmpty
self.assertFalse(strandsAreNotEmpty('', 'aaa'), 'The target strand was empty')
def test002_candidateStrandEmpty(self):
from dnaSequencing import strandsAreNotEmpty
self.assertFalse(strandsAreNotEmpty('aaa', ''), 'The candidate strand was empty')
def test005_bothStrandsEmtpy(self):
from dnaSequencing import strandsAreNotEmpty
self.assertFalse(strandsAreNotEmpty('', ''), 'Both strands were empty')
if __name__ == '__main__':
unittest.main()
| true | true |
1c324fde2fbe67a02bf54c8152defdcf78ac4408 | 4,082 | py | Python | iconparse.py | avuko/iconparse | 6a5895a26f8a7a43bc365dcadadf53a697f9576d | [
"BSD-3-Clause"
] | null | null | null | iconparse.py | avuko/iconparse | 6a5895a26f8a7a43bc365dcadadf53a697f9576d | [
"BSD-3-Clause"
] | null | null | null | iconparse.py | avuko/iconparse | 6a5895a26f8a7a43bc365dcadadf53a697f9576d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
import binascii
import sys
import array
# Simple tool to analyse .ico files
# bit = 1, byte = 8, WORD = 16bits, DWORD = 32bits
# 2 hex chars = 1 byte, 8 bits
BYTE = 2
WORD = 4
DWORD = 8
# ICONDIR
# I'll define values as (length in hex chars, default value, name)
# WORD: Reserved (must be 0)
idReserved = {'size': WORD, 'offset': 0, 'name': 'idReserved'}
# WORD: Resource Type (1 for icons)
idType = {'size': WORD, 'offset': 4, 'name': 'idType'}
# WORD: How many images?
idCount = {'size': WORD, 'offset': 8, 'name': 'idCount'}
# ICONDIRENTRY: image entries (idCount #)
idEntrySize = (BYTE * 4) + (WORD * 2) + (DWORD * 2)
idEntries = {'size': idEntrySize, 'offset': 12, 'name': 'idEntries'}
# ICONDIRENTRY https://en.wikipedia.org/wiki/ICO_(file_format)
# https://msdn.microsoft.com/en-us/library/ms997538.aspx
# Width, in pixels, of the image
bWidth = {'size': BYTE, 'offset': 0, 'name': 'bWidth'}
# Height, in pixels, of the image
bHeight = {'size': BYTE, 'offset': 2, 'name': 'bHeight'}
# Number of colors in image (0 if >=8bpp)
bColorCount = {'size': BYTE, 'offset': 4, 'name': 'bColorCount'}
# Reserved ( must be 0)
bReserved = {'size': BYTE, 'offset': 6, 'name': 'bReserved'}
# Color Planes
wPlanes = {'size': WORD, 'offset': 8, 'name': 'wPlanes'}
# Bits per pixel
wBitCount = {'size': WORD, 'offset': 12, 'name': 'wBitCount'}
# How many bytes in this resource?
dwBytesInRes = {'size': DWORD, 'offset': 16, 'name': 'dwBytesInRes'}
# Where in the file is this image?
dwImageOffset = {'size': DWORD, 'offset': 24, 'name': 'dwImageOffset'}
try:
binary = sys.argv[1]
except IndexError as e:
print("please give me a file to work with")
exit(1)
try:
if sys.argv[2]:
DUMP = True
except IndexError as e:
DUMP = False
def dword(hexdword):
hexdword = hexdword[4:8] + hexdword[0:4]
return(hexdword)
def parsevalues(valueinfo, hexbinary):
name = valueinfo['name']
offset = valueinfo['offset']
size = valueinfo['size']
slicesize = offset + size
result = hexbinary[offset:slicesize]
# if it is not a DWORD (or smaller), it is likely not an int.
if len(result) > DWORD:
result = result
else:
if size == DWORD:
result = dword(result)
result = int(result, 16)
return {'name': name, 'offset': offset, 'size': size, 'result': result}
def parseidentry(hexidentry):
parsedvalues = []
for value in [bWidth, bHeight, bColorCount, bReserved, wPlanes,
wBitCount, dwBytesInRes, dwImageOffset]:
parsedvalues.append(parsevalues(value, hexidentry))
return parsedvalues
with open(binary, 'rb') as binaryin:
"""read, byteswap (from little endian because windows) and hexlify"""
BI = array.array('h', binaryin.read())
BI.byteswap()
hexbinary = binascii.hexlify(BI)
for values in idReserved, idType, idCount:
parsedvalues = parsevalues(values, hexbinary)
print(parsedvalues)
idCountinfo = parsevalues(idCount, hexbinary)
idCounts = idCountinfo['result']
for idCount in range(1, idCounts + 1):
dumpdata = None
idEntry = parsevalues(idEntries, hexbinary)
parsedidentry = parseidentry(idEntry['result'])
# lets dump contents of the separate ico files
for idinfo in parsedidentry:
if idinfo['name'] is 'dwBytesInRes':
IDdwBytesInRes = idinfo['result']
elif idinfo['name'] is 'dwImageOffset':
IDdwImageOffset = idinfo['result']
if DUMP:
with open(binary, 'rb') as binaryin:
binaryin.seek(IDdwImageOffset)
idbinary = binaryin.read(int(IDdwBytesInRes))
dumpdata = binascii.hexlify(idbinary)
# ugly hack to make output easier to parse
if dumpdata:
print({idCount: parsedidentry, 'dumpdata': dumpdata})
else:
print({idCount: parsedidentry})
# add 32 chars to offset because of the icondirentry size
idEntries['offset'] = (idEntries['offset'] + 32)
| 32.396825 | 75 | 0.632778 |
import binascii
import sys
import array
BYTE = 2
WORD = 4
DWORD = 8
# WORD: Reserved (must be 0)
idReserved = {'size': WORD, 'offset': 0, 'name': 'idReserved'}
# WORD: Resource Type (1 for icons)
idType = {'size': WORD, 'offset': 4, 'name': 'idType'}
# WORD: How many images?
idCount = {'size': WORD, 'offset': 8, 'name': 'idCount'}
# ICONDIRENTRY: image entries (idCount #)
idEntrySize = (BYTE * 4) + (WORD * 2) + (DWORD * 2)
idEntries = {'size': idEntrySize, 'offset': 12, 'name': 'idEntries'}
# ICONDIRENTRY https://en.wikipedia.org/wiki/ICO_(file_format)
# https://msdn.microsoft.com/en-us/library/ms997538.aspx
# Width, in pixels, of the image
bWidth = {'size': BYTE, 'offset': 0, 'name': 'bWidth'}
# Height, in pixels, of the image
bHeight = {'size': BYTE, 'offset': 2, 'name': 'bHeight'}
# Number of colors in image (0 if >=8bpp)
bColorCount = {'size': BYTE, 'offset': 4, 'name': 'bColorCount'}
# Reserved ( must be 0)
bReserved = {'size': BYTE, 'offset': 6, 'name': 'bReserved'}
# Color Planes
wPlanes = {'size': WORD, 'offset': 8, 'name': 'wPlanes'}
# Bits per pixel
wBitCount = {'size': WORD, 'offset': 12, 'name': 'wBitCount'}
# How many bytes in this resource?
dwBytesInRes = {'size': DWORD, 'offset': 16, 'name': 'dwBytesInRes'}
# Where in the file is this image?
dwImageOffset = {'size': DWORD, 'offset': 24, 'name': 'dwImageOffset'}
try:
binary = sys.argv[1]
except IndexError as e:
print("please give me a file to work with")
exit(1)
try:
if sys.argv[2]:
DUMP = True
except IndexError as e:
DUMP = False
def dword(hexdword):
hexdword = hexdword[4:8] + hexdword[0:4]
return(hexdword)
def parsevalues(valueinfo, hexbinary):
name = valueinfo['name']
offset = valueinfo['offset']
size = valueinfo['size']
slicesize = offset + size
result = hexbinary[offset:slicesize]
# if it is not a DWORD (or smaller), it is likely not an int.
if len(result) > DWORD:
result = result
else:
if size == DWORD:
result = dword(result)
result = int(result, 16)
return {'name': name, 'offset': offset, 'size': size, 'result': result}
def parseidentry(hexidentry):
parsedvalues = []
for value in [bWidth, bHeight, bColorCount, bReserved, wPlanes,
wBitCount, dwBytesInRes, dwImageOffset]:
parsedvalues.append(parsevalues(value, hexidentry))
return parsedvalues
with open(binary, 'rb') as binaryin:
BI = array.array('h', binaryin.read())
BI.byteswap()
hexbinary = binascii.hexlify(BI)
for values in idReserved, idType, idCount:
parsedvalues = parsevalues(values, hexbinary)
print(parsedvalues)
idCountinfo = parsevalues(idCount, hexbinary)
idCounts = idCountinfo['result']
for idCount in range(1, idCounts + 1):
dumpdata = None
idEntry = parsevalues(idEntries, hexbinary)
parsedidentry = parseidentry(idEntry['result'])
# lets dump contents of the separate ico files
for idinfo in parsedidentry:
if idinfo['name'] is 'dwBytesInRes':
IDdwBytesInRes = idinfo['result']
elif idinfo['name'] is 'dwImageOffset':
IDdwImageOffset = idinfo['result']
if DUMP:
with open(binary, 'rb') as binaryin:
binaryin.seek(IDdwImageOffset)
idbinary = binaryin.read(int(IDdwBytesInRes))
dumpdata = binascii.hexlify(idbinary)
# ugly hack to make output easier to parse
if dumpdata:
print({idCount: parsedidentry, 'dumpdata': dumpdata})
else:
print({idCount: parsedidentry})
# add 32 chars to offset because of the icondirentry size
idEntries['offset'] = (idEntries['offset'] + 32)
| true | true |
1c3251d213c147a8fb1ad8acb142a4cfc355d157 | 3,889 | py | Python | homeassistant/components/unifi/unifi_entity_base.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 4 | 2021-07-11T09:11:00.000Z | 2022-02-27T14:43:50.000Z | homeassistant/components/unifi/unifi_entity_base.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/unifi/unifi_entity_base.py | andersop91/core | 0e0ef0aa17073609eae7c974cf4c73306b7c414b | [
"Apache-2.0"
] | 1 | 2022-01-12T22:14:01.000Z | 2022-01-12T22:14:01.000Z | """Base class for UniFi Network entities."""
import logging
from typing import Any
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
class UniFiBase(Entity):
"""UniFi entity base class."""
DOMAIN = ""
TYPE = ""
def __init__(self, item, controller) -> None:
"""Set up UniFi Network entity base.
Register mac to controller entities to cover disabled entities.
"""
self._item = item
self.controller = controller
self.controller.entities[self.DOMAIN][self.TYPE].add(self.key)
@property
def key(self) -> Any:
"""Return item key."""
return self._item.mac
async def async_added_to_hass(self) -> None:
"""Entity created."""
_LOGGER.debug(
"New %s entity %s (%s)",
self.TYPE,
self.entity_id,
self.key,
)
for signal, method in (
(self.controller.signal_reachable, self.async_signal_reachable_callback),
(self.controller.signal_options_update, self.options_updated),
(self.controller.signal_remove, self.remove_item),
):
self.async_on_remove(async_dispatcher_connect(self.hass, signal, method))
self._item.register_callback(self.async_update_callback)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect object when removed."""
_LOGGER.debug(
"Removing %s entity %s (%s)",
self.TYPE,
self.entity_id,
self.key,
)
self._item.remove_callback(self.async_update_callback)
self.controller.entities[self.DOMAIN][self.TYPE].remove(self.key)
@callback
def async_signal_reachable_callback(self) -> None:
"""Call when controller connection state change."""
self.async_update_callback()
@callback
def async_update_callback(self) -> None:
"""Update the entity's state."""
_LOGGER.debug(
"Updating %s entity %s (%s)",
self.TYPE,
self.entity_id,
self.key,
)
self.async_write_ha_state()
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
raise NotImplementedError
async def remove_item(self, keys: set) -> None:
"""Remove entity if key is part of set.
Remove entity if no entry in entity registry exist.
Remove entity registry entry if no entry in device registry exist.
Remove device registry entry if there is only one linked entity (this entity).
Remove config entry reference from device registry entry if there is more than one config entry.
Remove entity registry entry if there are more than one entity linked to the device registry entry.
"""
if self.key not in keys:
return
entity_registry = er.async_get(self.hass)
entity_entry = entity_registry.async_get(self.entity_id)
if not entity_entry:
await self.async_remove(force_remove=True)
return
device_registry = dr.async_get(self.hass)
device_entry = device_registry.async_get(entity_entry.device_id)
if not device_entry:
entity_registry.async_remove(self.entity_id)
return
device_registry.async_update_device(
entity_entry.device_id,
remove_config_entry_id=self.controller.config_entry.entry_id,
)
entity_registry.async_remove(self.entity_id)
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
| 34.114035 | 107 | 0.645153 | import logging
from typing import Any
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
class UniFiBase(Entity):
DOMAIN = ""
TYPE = ""
def __init__(self, item, controller) -> None:
self._item = item
self.controller = controller
self.controller.entities[self.DOMAIN][self.TYPE].add(self.key)
@property
def key(self) -> Any:
return self._item.mac
async def async_added_to_hass(self) -> None:
_LOGGER.debug(
"New %s entity %s (%s)",
self.TYPE,
self.entity_id,
self.key,
)
for signal, method in (
(self.controller.signal_reachable, self.async_signal_reachable_callback),
(self.controller.signal_options_update, self.options_updated),
(self.controller.signal_remove, self.remove_item),
):
self.async_on_remove(async_dispatcher_connect(self.hass, signal, method))
self._item.register_callback(self.async_update_callback)
async def async_will_remove_from_hass(self) -> None:
_LOGGER.debug(
"Removing %s entity %s (%s)",
self.TYPE,
self.entity_id,
self.key,
)
self._item.remove_callback(self.async_update_callback)
self.controller.entities[self.DOMAIN][self.TYPE].remove(self.key)
@callback
def async_signal_reachable_callback(self) -> None:
self.async_update_callback()
@callback
def async_update_callback(self) -> None:
_LOGGER.debug(
"Updating %s entity %s (%s)",
self.TYPE,
self.entity_id,
self.key,
)
self.async_write_ha_state()
async def options_updated(self) -> None:
raise NotImplementedError
async def remove_item(self, keys: set) -> None:
if self.key not in keys:
return
entity_registry = er.async_get(self.hass)
entity_entry = entity_registry.async_get(self.entity_id)
if not entity_entry:
await self.async_remove(force_remove=True)
return
device_registry = dr.async_get(self.hass)
device_entry = device_registry.async_get(entity_entry.device_id)
if not device_entry:
entity_registry.async_remove(self.entity_id)
return
device_registry.async_update_device(
entity_entry.device_id,
remove_config_entry_id=self.controller.config_entry.entry_id,
)
entity_registry.async_remove(self.entity_id)
@property
def should_poll(self) -> bool:
return False
| true | true |
1c32520c22a2a5d88be5c4198d83d778a6ecaf6c | 9,924 | py | Python | train.py | tuq820/efficientdet_with_landmark | 438d92168e5bb91d9ea9bd4b5c743ef41f936fde | [
"MIT"
] | 1 | 2020-07-31T10:04:37.000Z | 2020-07-31T10:04:37.000Z | train.py | tuq820/efficientdet_with_landmark | 438d92168e5bb91d9ea9bd4b5c743ef41f936fde | [
"MIT"
] | null | null | null | train.py | tuq820/efficientdet_with_landmark | 438d92168e5bb91d9ea9bd4b5c743ef41f936fde | [
"MIT"
] | null | null | null | import os
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from src.dataset import CocoDataset, Resizer, Normalizer, Augmenter, collater, MaJiaDataset
from src.model import EfficientDet
from tensorboardX import SummaryWriter
import shutil
import numpy as np
from tqdm.autonotebook import tqdm
def get_args():
parser = argparse.ArgumentParser(
"EfficientDet: Scalable and Efficient Object Detection implementation by Signatrix GmbH")
parser.add_argument("--image_size", type=int, default=512, help="The common width and height for all images")
parser.add_argument("--batch_size", type=int, default=8, help="The number of images per batch")
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument('--alpha', type=float, default=0.25)
parser.add_argument('--gamma', type=float, default=1.5)
parser.add_argument("--num_epochs", type=int, default=50)
parser.add_argument("--test_interval", type=int, default=1, help="Number of epoches between testing phases")
parser.add_argument("--es_min_delta", type=float, default=0.0,
help="Early stopping's parameter: minimum change loss to qualify as an improvement")
parser.add_argument("--es_patience", type=int, default=0,
help="Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.")
# parser.add_argument("--data_path", type=str, default="/disk4t/data/coco/data/coco", help="the root folder of dataset")
parser.add_argument("--data_path", type=str, default="/home/pc/work/data/majia",
help="the root folder of dataset")
parser.add_argument("--label_txt", type=str, default="/home/pc/work/data/majia/data_01.txt",
help="the root folder of dataset")
parser.add_argument("--log_path", type=str, default="tensorboard/signatrix_efficientdet_coco")
parser.add_argument("--saved_path", type=str, default="trained_models1")
# parser.add_argument("--resume", type=str, default="trained_models/signatrix_efficientdet_majia_30.pth")
parser.add_argument("--resume", type=str, default=None)
args = parser.parse_args()
return args
def train(opt):
num_gpus = 1
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
torch.cuda.manual_seed(123)
else:
torch.manual_seed(123)
training_params = {"batch_size": opt.batch_size * num_gpus,
"shuffle": True,
"drop_last": True,
"collate_fn": collater,
"num_workers": 12}
test_params = {"batch_size": opt.batch_size,
"shuffle": False,
"drop_last": False,
"collate_fn": collater,
"num_workers": 12}
# training_set = CocoDataset(root_dir=opt.data_path, set="train2017",
# transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
training_set = MaJiaDataset(root_dir=opt.data_path, label_txt=opt.label_txt,
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
training_generator = DataLoader(training_set, **training_params)
# test_set = CocoDataset(root_dir=opt.data_path, set="val2017",
# transform=transforms.Compose([Normalizer(), Resizer()]))
test_set = MaJiaDataset(root_dir=opt.data_path, label_txt=opt.label_txt,
transform=transforms.Compose([Normalizer(), Resizer()]))
test_generator = DataLoader(test_set, **test_params)
if opt.resume is not None:
model = torch.load('trained_models/signatrix_efficientdet_majia_30.pth')
if isinstance(model, torch.nn.DataParallel):
model = model.module
else:
model = EfficientDet(num_classes=training_set.num_classes())
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
writer = SummaryWriter(opt.log_path)
if torch.cuda.is_available():
model = model.cuda()
model = nn.DataParallel(model)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
best_loss = 1e5
best_epoch = 0
model.train()
num_iter_per_epoch = len(training_generator)
for epoch in range(opt.num_epochs):
model.train()
# if torch.cuda.is_available():
# model.module.freeze_bn()
# else:
# model.freeze_bn()
epoch_loss = []
progress_bar = tqdm(training_generator)
for iter, data in enumerate(progress_bar):
try:
optimizer.zero_grad()
if torch.cuda.is_available():
cls_loss, reg_loss, ldm_loss = model([data['img'].cuda().float(), data['annot'].cuda()])
else:
cls_loss, reg_loss, ldm_loss = model([data['img'].float(), data['annot']])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
ldm_loss = ldm_loss.mean()
loss = cls_loss + reg_loss + ldm_loss
if loss == 0:
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(loss))
total_loss = np.mean(epoch_loss)
progress_bar.set_description(
'Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Ldm loss: {:.5f}, Batch loss: {:.5f} Total loss: {:.5f}'.format(
epoch + 1, opt.num_epochs, iter + 1, num_iter_per_epoch, cls_loss, reg_loss, ldm_loss, loss,
total_loss))
writer.add_scalar('Train/Total_loss', total_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Regression_loss', reg_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Landmark_loss', ldm_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Classfication_loss (focal loss)', cls_loss, epoch * num_iter_per_epoch + iter)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
if epoch % opt.test_interval == 0:
model.eval()
loss_regression_ls = []
loss_classification_ls = []
loss_landmark_ls = []
for iter, data in enumerate(test_generator):
with torch.no_grad():
if torch.cuda.is_available():
cls_loss, reg_loss, ldm_loss = model([data['img'].cuda().float(), data['annot'].cuda()])
else:
cls_loss, reg_loss, ldm_loss = model([data['img'].float(), data['annot']])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
ldm_loss = ldm_loss.mean()
loss_classification_ls.append(float(cls_loss))
loss_regression_ls.append(float(reg_loss))
loss_landmark_ls.append(float(ldm_loss))
cls_loss = np.mean(loss_classification_ls)
reg_loss = np.mean(loss_regression_ls)
ldm_loss = np.mean(loss_landmark_ls)
loss = cls_loss + reg_loss + ldm_loss
print(
'Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Landmark loss: {:.5f}, Total loss: {:1.5f}'.format(
epoch + 1, opt.num_epochs, cls_loss, reg_loss, ldm_loss,
np.mean(loss)))
writer.add_scalar('Test/Total_loss', loss, epoch)
writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
writer.add_scalar('Test/Landmark_loss', ldm_loss, epoch)
writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss, epoch)
if loss + opt.es_min_delta < best_loss:
best_loss = loss
best_epoch = epoch
torch.save(model, os.path.join(opt.saved_path, "signatrix_efficientdet_majia.pth"))
dummy_input = torch.rand(opt.batch_size, 3, 512, 512)
if torch.cuda.is_available():
dummy_input = dummy_input.cuda()
if isinstance(model, nn.DataParallel):
model.module.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(model.module, dummy_input,
os.path.join(opt.saved_path, "signatrix_efficientdet_majia.onnx"),
verbose=False)
model.module.backbone_net.model.set_swish(memory_efficient=True)
else:
model.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(model, dummy_input,
os.path.join(opt.saved_path, "signatrix_efficientdet_majia.onnx"),
verbose=False)
model.backbone_net.model.set_swish(memory_efficient=True)
# Early stopping
if epoch - best_epoch > opt.es_patience > 0:
print("Stop training at epoch {}. The lowest loss achieved is {}".format(epoch, loss))
break
writer.close()
if __name__ == "__main__":
opt = get_args()
train(opt)
# 直接执行python3 train.py 即可训练码架
# python3 test_video.py 即可测试码架
| 46.373832 | 170 | 0.600665 | import os
import argparse
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import transforms
from src.dataset import CocoDataset, Resizer, Normalizer, Augmenter, collater, MaJiaDataset
from src.model import EfficientDet
from tensorboardX import SummaryWriter
import shutil
import numpy as np
from tqdm.autonotebook import tqdm
def get_args():
parser = argparse.ArgumentParser(
"EfficientDet: Scalable and Efficient Object Detection implementation by Signatrix GmbH")
parser.add_argument("--image_size", type=int, default=512, help="The common width and height for all images")
parser.add_argument("--batch_size", type=int, default=8, help="The number of images per batch")
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument('--alpha', type=float, default=0.25)
parser.add_argument('--gamma', type=float, default=1.5)
parser.add_argument("--num_epochs", type=int, default=50)
parser.add_argument("--test_interval", type=int, default=1, help="Number of epoches between testing phases")
parser.add_argument("--es_min_delta", type=float, default=0.0,
help="Early stopping's parameter: minimum change loss to qualify as an improvement")
parser.add_argument("--es_patience", type=int, default=0,
help="Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique.")
parser.add_argument("--data_path", type=str, default="/home/pc/work/data/majia",
help="the root folder of dataset")
parser.add_argument("--label_txt", type=str, default="/home/pc/work/data/majia/data_01.txt",
help="the root folder of dataset")
parser.add_argument("--log_path", type=str, default="tensorboard/signatrix_efficientdet_coco")
parser.add_argument("--saved_path", type=str, default="trained_models1")
parser.add_argument("--resume", type=str, default=None)
args = parser.parse_args()
return args
def train(opt):
num_gpus = 1
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
torch.cuda.manual_seed(123)
else:
torch.manual_seed(123)
training_params = {"batch_size": opt.batch_size * num_gpus,
"shuffle": True,
"drop_last": True,
"collate_fn": collater,
"num_workers": 12}
test_params = {"batch_size": opt.batch_size,
"shuffle": False,
"drop_last": False,
"collate_fn": collater,
"num_workers": 12}
training_set = MaJiaDataset(root_dir=opt.data_path, label_txt=opt.label_txt,
transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]))
training_generator = DataLoader(training_set, **training_params)
test_set = MaJiaDataset(root_dir=opt.data_path, label_txt=opt.label_txt,
transform=transforms.Compose([Normalizer(), Resizer()]))
test_generator = DataLoader(test_set, **test_params)
if opt.resume is not None:
model = torch.load('trained_models/signatrix_efficientdet_majia_30.pth')
if isinstance(model, torch.nn.DataParallel):
model = model.module
else:
model = EfficientDet(num_classes=training_set.num_classes())
if os.path.isdir(opt.log_path):
shutil.rmtree(opt.log_path)
os.makedirs(opt.log_path)
if not os.path.isdir(opt.saved_path):
os.makedirs(opt.saved_path)
writer = SummaryWriter(opt.log_path)
if torch.cuda.is_available():
model = model.cuda()
model = nn.DataParallel(model)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True)
best_loss = 1e5
best_epoch = 0
model.train()
num_iter_per_epoch = len(training_generator)
for epoch in range(opt.num_epochs):
model.train()
epoch_loss = []
progress_bar = tqdm(training_generator)
for iter, data in enumerate(progress_bar):
try:
optimizer.zero_grad()
if torch.cuda.is_available():
cls_loss, reg_loss, ldm_loss = model([data['img'].cuda().float(), data['annot'].cuda()])
else:
cls_loss, reg_loss, ldm_loss = model([data['img'].float(), data['annot']])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
ldm_loss = ldm_loss.mean()
loss = cls_loss + reg_loss + ldm_loss
if loss == 0:
continue
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
epoch_loss.append(float(loss))
total_loss = np.mean(epoch_loss)
progress_bar.set_description(
'Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Ldm loss: {:.5f}, Batch loss: {:.5f} Total loss: {:.5f}'.format(
epoch + 1, opt.num_epochs, iter + 1, num_iter_per_epoch, cls_loss, reg_loss, ldm_loss, loss,
total_loss))
writer.add_scalar('Train/Total_loss', total_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Regression_loss', reg_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Landmark_loss', ldm_loss, epoch * num_iter_per_epoch + iter)
writer.add_scalar('Train/Classfication_loss (focal loss)', cls_loss, epoch * num_iter_per_epoch + iter)
except Exception as e:
print(e)
continue
scheduler.step(np.mean(epoch_loss))
if epoch % opt.test_interval == 0:
model.eval()
loss_regression_ls = []
loss_classification_ls = []
loss_landmark_ls = []
for iter, data in enumerate(test_generator):
with torch.no_grad():
if torch.cuda.is_available():
cls_loss, reg_loss, ldm_loss = model([data['img'].cuda().float(), data['annot'].cuda()])
else:
cls_loss, reg_loss, ldm_loss = model([data['img'].float(), data['annot']])
cls_loss = cls_loss.mean()
reg_loss = reg_loss.mean()
ldm_loss = ldm_loss.mean()
loss_classification_ls.append(float(cls_loss))
loss_regression_ls.append(float(reg_loss))
loss_landmark_ls.append(float(ldm_loss))
cls_loss = np.mean(loss_classification_ls)
reg_loss = np.mean(loss_regression_ls)
ldm_loss = np.mean(loss_landmark_ls)
loss = cls_loss + reg_loss + ldm_loss
print(
'Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. Landmark loss: {:.5f}, Total loss: {:1.5f}'.format(
epoch + 1, opt.num_epochs, cls_loss, reg_loss, ldm_loss,
np.mean(loss)))
writer.add_scalar('Test/Total_loss', loss, epoch)
writer.add_scalar('Test/Regression_loss', reg_loss, epoch)
writer.add_scalar('Test/Landmark_loss', ldm_loss, epoch)
writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss, epoch)
if loss + opt.es_min_delta < best_loss:
best_loss = loss
best_epoch = epoch
torch.save(model, os.path.join(opt.saved_path, "signatrix_efficientdet_majia.pth"))
dummy_input = torch.rand(opt.batch_size, 3, 512, 512)
if torch.cuda.is_available():
dummy_input = dummy_input.cuda()
if isinstance(model, nn.DataParallel):
model.module.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(model.module, dummy_input,
os.path.join(opt.saved_path, "signatrix_efficientdet_majia.onnx"),
verbose=False)
model.module.backbone_net.model.set_swish(memory_efficient=True)
else:
model.backbone_net.model.set_swish(memory_efficient=False)
torch.onnx.export(model, dummy_input,
os.path.join(opt.saved_path, "signatrix_efficientdet_majia.onnx"),
verbose=False)
model.backbone_net.model.set_swish(memory_efficient=True)
if epoch - best_epoch > opt.es_patience > 0:
print("Stop training at epoch {}. The lowest loss achieved is {}".format(epoch, loss))
break
writer.close()
if __name__ == "__main__":
opt = get_args()
train(opt)
| true | true |
1c3253acb209ec239f44675e6e1fdabe60dfb69e | 8,615 | py | Python | qiskit/circuit/library/__init__.py | saktar-unr/qiskit-terra | e3c4e76957f40ad447e471e64dc101fed418752b | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/library/__init__.py | saktar-unr/qiskit-terra | e3c4e76957f40ad447e471e64dc101fed418752b | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/library/__init__.py | saktar-unr/qiskit-terra | e3c4e76957f40ad447e471e64dc101fed418752b | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
===============================================
Circuit Library (:mod:`qiskit.circuit.library`)
===============================================
.. currentmodule:: qiskit.circuit.library
Standard Gates
==============
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
C3XGate
C3SXGate
C4XGate
CCXGate
DCXGate
CHGate
CPhaseGate
CRXGate
CRYGate
CRZGate
CSwapGate
CSXGate
CUGate
CU1Gate
CU3Gate
CXGate
CYGate
CZGate
HGate
IGate
MCPhaseGate
MCXGate
MCXGrayCode
MCXRecursive
MCXVChain
PhaseGate
RCCXGate
RC3XGate
RGate
RXGate
RXXGate
RYGate
RYYGate
RZGate
RZZGate
RZXGate
XXPlusYYGate
XXMinusYYGate
ECRGate
SGate
SdgGate
SwapGate
iSwapGate
SXGate
SXdgGate
TGate
TdgGate
UGate
U1Gate
U2Gate
U3Gate
XGate
YGate
ZGate
Standard Directives
===================
..
This summary table deliberately does not generate toctree entries; these directives are "owned"
by ``qiskit.circuit``.
.. autosummary::
~qiskit.circuit.Barrier
~qiskit.circuit.Measure
~qiskit.circuit.Reset
Generalized Gates
=================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
Diagonal
MCMT
MCMTVChain
Permutation
GMS
MSGate
GR
GRX
GRY
GRZ
RVGate
PauliGate
LinearFunction
Boolean Logic Circuits
======================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
AND
OR
XOR
InnerProduct
Basis Change Circuits
=====================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
QFT
Arithmetic Circuits
===================
Amplitude Functions
-------------------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
LinearAmplitudeFunction
Functional Pauli Rotations
--------------------------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
FunctionalPauliRotations
LinearPauliRotations
PolynomialPauliRotations
PiecewiseLinearPauliRotations
PiecewisePolynomialPauliRotations
PiecewiseChebyshev
Adders
------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
DraperQFTAdder
CDKMRippleCarryAdder
VBERippleCarryAdder
WeightedAdder
Multipliers
-----------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
HRSCumulativeMultiplier
RGQFTMultiplier
Comparators
-----------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
IntegerComparator
Functions on binary variables
-----------------------------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
QuadraticForm
Other arithmetic functions
--------------------------
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
ExactReciprocal
Amplitude Functions
===================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
LinearAmplitudeFunction
Particular Quantum Circuits
===========================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
FourierChecking
GraphState
HiddenLinearFunction
IQP
QuantumVolume
PhaseEstimation
GroverOperator
PhaseOracle
EvolvedOperatorAnsatz
PauliEvolutionGate
N-local circuits
================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
NLocal
TwoLocal
PauliTwoDesign
RealAmplitudes
EfficientSU2
ExcitationPreserving
QAOAAnsatz
Data encoding circuits
======================
.. autosummary::
:toctree: ../stubs/
:template: autosummary/class_no_inherited_members.rst
PauliFeatureMap
ZFeatureMap
ZZFeatureMap
StatePreparation
NCT (Not-CNOT-Toffoli) template circuits
========================================
.. autosummary::
:toctree: ../stubs/
templates.nct.template_nct_2a_1
templates.nct.template_nct_2a_2
templates.nct.template_nct_2a_3
templates.nct.template_nct_4a_1
templates.nct.template_nct_4a_2
templates.nct.template_nct_4a_3
templates.nct.template_nct_4b_1
templates.nct.template_nct_4b_2
templates.nct.template_nct_5a_1
templates.nct.template_nct_5a_2
templates.nct.template_nct_5a_3
templates.nct.template_nct_5a_4
templates.nct.template_nct_6a_1
templates.nct.template_nct_6a_2
templates.nct.template_nct_6a_3
templates.nct.template_nct_6a_4
templates.nct.template_nct_6b_1
templates.nct.template_nct_6b_2
templates.nct.template_nct_6c_1
templates.nct.template_nct_7a_1
templates.nct.template_nct_7b_1
templates.nct.template_nct_7c_1
templates.nct.template_nct_7d_1
templates.nct.template_nct_7e_1
templates.nct.template_nct_2a_1
templates.nct.template_nct_9a_1
templates.nct.template_nct_9c_1
templates.nct.template_nct_9c_2
templates.nct.template_nct_9c_3
templates.nct.template_nct_9c_4
templates.nct.template_nct_9c_5
templates.nct.template_nct_9c_6
templates.nct.template_nct_9c_7
templates.nct.template_nct_9c_8
templates.nct.template_nct_9c_9
templates.nct.template_nct_9c_10
templates.nct.template_nct_9c_11
templates.nct.template_nct_9c_12
templates.nct.template_nct_9d_1
templates.nct.template_nct_9d_2
templates.nct.template_nct_9d_3
templates.nct.template_nct_9d_4
templates.nct.template_nct_9d_5
templates.nct.template_nct_9d_6
templates.nct.template_nct_9d_7
templates.nct.template_nct_9d_8
templates.nct.template_nct_9d_9
templates.nct.template_nct_9d_10
Clifford template circuits
==========================
.. autosummary::
:toctree: ../stubs/
clifford_2_1
clifford_2_2
clifford_2_3
clifford_2_4
clifford_3_1
clifford_4_1
clifford_4_2
clifford_4_3
clifford_4_4
clifford_5_1
clifford_6_1
clifford_6_2
clifford_6_3
clifford_6_4
clifford_6_5
clifford_8_1
clifford_8_2
clifford_8_3
RZXGate template circuits
=========================
.. autosummary::
:toctree: ../stubs/
rzx_yz
rzx_xz
rzx_cy
rzx_zz1
rzx_zz2
rzx_zz3
"""
from .standard_gates import *
from .templates import *
from ..barrier import Barrier
from ..measure import Measure
from ..reset import Reset
from .blueprintcircuit import BlueprintCircuit
from .generalized_gates import (
Diagonal,
MCMT,
MCMTVChain,
Permutation,
GMS,
MSGate,
GR,
GRX,
GRY,
GRZ,
RVGate,
PauliGate,
LinearFunction,
)
from .pauli_evolution import PauliEvolutionGate
from .boolean_logic import (
AND,
OR,
XOR,
InnerProduct,
)
from .basis_change import QFT
from .arithmetic import (
FunctionalPauliRotations,
LinearPauliRotations,
PiecewiseLinearPauliRotations,
PiecewisePolynomialPauliRotations,
PolynomialPauliRotations,
IntegerComparator,
WeightedAdder,
QuadraticForm,
LinearAmplitudeFunction,
VBERippleCarryAdder,
CDKMRippleCarryAdder,
DraperQFTAdder,
PiecewiseChebyshev,
HRSCumulativeMultiplier,
RGQFTMultiplier,
ExactReciprocal,
)
from .n_local import (
NLocal,
TwoLocal,
PauliTwoDesign,
RealAmplitudes,
EfficientSU2,
ExcitationPreserving,
QAOAAnsatz,
)
from .data_preparation import PauliFeatureMap, ZFeatureMap, ZZFeatureMap, StatePreparation
from .quantum_volume import QuantumVolume
from .fourier_checking import FourierChecking
from .graph_state import GraphState
from .hidden_linear_function import HiddenLinearFunction
from .iqp import IQP
from .phase_estimation import PhaseEstimation
from .grover_operator import GroverOperator
from .phase_oracle import PhaseOracle
from .evolved_operator_ansatz import EvolvedOperatorAnsatz
| 19.988399 | 99 | 0.710157 |
from .standard_gates import *
from .templates import *
from ..barrier import Barrier
from ..measure import Measure
from ..reset import Reset
from .blueprintcircuit import BlueprintCircuit
from .generalized_gates import (
Diagonal,
MCMT,
MCMTVChain,
Permutation,
GMS,
MSGate,
GR,
GRX,
GRY,
GRZ,
RVGate,
PauliGate,
LinearFunction,
)
from .pauli_evolution import PauliEvolutionGate
from .boolean_logic import (
AND,
OR,
XOR,
InnerProduct,
)
from .basis_change import QFT
from .arithmetic import (
FunctionalPauliRotations,
LinearPauliRotations,
PiecewiseLinearPauliRotations,
PiecewisePolynomialPauliRotations,
PolynomialPauliRotations,
IntegerComparator,
WeightedAdder,
QuadraticForm,
LinearAmplitudeFunction,
VBERippleCarryAdder,
CDKMRippleCarryAdder,
DraperQFTAdder,
PiecewiseChebyshev,
HRSCumulativeMultiplier,
RGQFTMultiplier,
ExactReciprocal,
)
from .n_local import (
NLocal,
TwoLocal,
PauliTwoDesign,
RealAmplitudes,
EfficientSU2,
ExcitationPreserving,
QAOAAnsatz,
)
from .data_preparation import PauliFeatureMap, ZFeatureMap, ZZFeatureMap, StatePreparation
from .quantum_volume import QuantumVolume
from .fourier_checking import FourierChecking
from .graph_state import GraphState
from .hidden_linear_function import HiddenLinearFunction
from .iqp import IQP
from .phase_estimation import PhaseEstimation
from .grover_operator import GroverOperator
from .phase_oracle import PhaseOracle
from .evolved_operator_ansatz import EvolvedOperatorAnsatz
| true | true |
1c3254b922526c41d4c73273574c98047e8491d8 | 770 | py | Python | Django_Project/apps/oauth/utils.py | yasugsh/Django_Project | de5f5cc9244cc2763dafe7597e5e2efef8342c8a | [
"MIT"
] | null | null | null | Django_Project/apps/oauth/utils.py | yasugsh/Django_Project | de5f5cc9244cc2763dafe7597e5e2efef8342c8a | [
"MIT"
] | null | null | null | Django_Project/apps/oauth/utils.py | yasugsh/Django_Project | de5f5cc9244cc2763dafe7597e5e2efef8342c8a | [
"MIT"
] | null | null | null | from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from django.conf import settings
from . import constants
def generate_openid_signature(openid):
"""
对openid进行签名
:param openid: 扫码QQ的openid
:return: 加密后的openid
"""
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.ACCESS_TOKEN_EXPIRES)
data = {'openid': openid}
token_openid = serializer.dumps(data) # 对称加密(返回bytes类型)
return token_openid.decode()
def check_openid_signature(token_openid):
"""检验token_openid"""
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.ACCESS_TOKEN_EXPIRES)
try:
data = serializer.loads(token_openid)
except BadData:
return None
return data.get('openid')
| 26.551724 | 91 | 0.736364 | from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from django.conf import settings
from . import constants
def generate_openid_signature(openid):
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.ACCESS_TOKEN_EXPIRES)
data = {'openid': openid}
token_openid = serializer.dumps(data)
return token_openid.decode()
def check_openid_signature(token_openid):
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.ACCESS_TOKEN_EXPIRES)
try:
data = serializer.loads(token_openid)
except BadData:
return None
return data.get('openid')
| true | true |
1c325586cdeb67523fc28455208f5f446e7a0214 | 37,261 | py | Python | theano/tensor/extra_ops.py | mayunpeng/Theano | c74da33de3768e231ffa0d92d9d11667a2a5aedb | [
"BSD-3-Clause"
] | 1 | 2021-07-01T02:51:08.000Z | 2021-07-01T02:51:08.000Z | theano/tensor/extra_ops.py | mayunpeng/Theano | c74da33de3768e231ffa0d92d9d11667a2a5aedb | [
"BSD-3-Clause"
] | null | null | null | theano/tensor/extra_ops.py | mayunpeng/Theano | c74da33de3768e231ffa0d92d9d11667a2a5aedb | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import numpy
import warnings
from six.moves import xrange
import theano
from theano.tensor import basic
from theano.tensor import nlinalg # noqa
from theano import gof, scalar
from theano.gradient import DisconnectedType
tensor = basic
class CpuContiguous(theano.Op):
"""
Check to see if the input is c-contiguous,
if it is, do nothing, else return a contiguous array
"""
__props__ = ()
view_map = {0: [0]}
def make_node(self, x):
x_ = theano.tensor.as_tensor_variable(x)
return theano.Apply(self, [x_], [x_.type()])
def perform(self, node, inputs, output_storage):
x, = inputs
y = output_storage[0]
# if the ouput is contiguous do nothing, else copy
# the input
if not x.flags['C_CONTIGUOUS']:
x = x.copy()
assert x.flags['C_CONTIGUOUS']
y[0] = x
def c_code(self, node, name, inames, onames, sub):
x, = inames
y, = onames
code = """
if (!PyArray_CHKFLAGS(%(x)s, NPY_ARRAY_C_CONTIGUOUS)){
// check to see if output is contiguous first
if (%(y)s != NULL &&
PyArray_CHKFLAGS(%(y)s, NPY_ARRAY_C_CONTIGUOUS)){
PyArray_CopyInto(%(y)s, %(x)s);
}
else{
Py_XDECREF(%(y)s);
%(y)s = PyArray_GETCONTIGUOUS(%(x)s);
}
}
else{
Py_XINCREF(%(x)s);
Py_XDECREF(%(y)s);
%(y)s = %(x)s;
}
""" % locals()
return code
def c_code_cache_version(self):
return (0,)
cpu_contiguous = CpuContiguous()
class CumsumOp(theano.Op):
# See function cumsum for docstring
__props__ = ("axis",)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x):
x = basic.as_tensor_variable(x)
out_type = x.type()
if self.axis is None:
out_type = theano.tensor.vector(dtype=x.dtype) # Flatten
elif self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError('axis(={0}) out of bounds'.format(self.axis))
return theano.Apply(self, [x], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.cumsum(x, axis=self.axis)
def grad(self, inputs, output_gradients):
[gi] = output_gradients
if self.axis is None:
return [cumsum(gi[::-1])[::-1].reshape(inputs[0].shape)]
# We need to reverse the gradients along ``self.axis``,
# compute cumsum, then reverse again
reverse_slicing = [slice(None, None, None)] * gi.ndim
reverse_slicing[self.axis] = slice(None, None, -1)
reverse_slicing = tuple(reverse_slicing)
return [cumsum(gi[reverse_slicing], self.axis)[reverse_slicing]]
def infer_shape(self, node, shapes):
if self.axis is None:
return [(tensor.prod(shapes[0]),)] # Flatten
return shapes
def c_code(self, node, name, inames, onames, sub):
x, = inames
z, = onames
axis = self.axis
fail = sub['fail']
if self.axis is None or (self.axis == 0 and node.inputs[0].ndim == 1):
code = """
npy_intp shape[1] = { PyArray_SIZE(%(x)s) };
if(!(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumSum(
%(x)s, NPY_MAXDIMS,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
else:
code = """
if(!(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s))))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumSum(
%(x)s, %(axis)s,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
return code
def c_code_cache_version(self):
return (6,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.axis)
def cumsum(x, axis=None):
"""Return the cumulative sum of the elements along a given axis.
Wraping of numpy.cumsum.
:param x: Input tensor variable.
:param axis: The axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
.. versionadded:: 0.7
"""
return CumsumOp(axis=axis)(x)
class CumprodOp(theano.Op):
# See function cumprod for docstring
__props__ = ("axis",)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x):
x = basic.as_tensor_variable(x)
out_type = x.type()
if self.axis is None:
out_type = theano.tensor.vector(dtype=x.dtype) # Flatten
elif self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError('axis(={0}) out of bounds'.format(self.axis))
return theano.Apply(self, [x], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.cumprod(x, axis=self.axis)
def grad(self, inputs, output_gradients):
x, = inputs
gi, = output_gradients
fx = cumprod(x, axis=self.axis)
if self.axis is None:
return [cumsum((fx * gi)[::-1])[::-1].reshape(inputs[0].shape) / x]
# We need to reverse the gradients along ``self.axis``,
# compute cumsum, then reverse again
reverse_slicing = [slice(None, None, None)] * gi.ndim
reverse_slicing[self.axis] = slice(None, None, -1)
reverse_slicing = tuple(reverse_slicing)
return [cumsum((fx * gi)[reverse_slicing],
self.axis)[reverse_slicing] / x]
def infer_shape(self, node, shapes):
if self.axis is None:
return [(tensor.prod(shapes[0]),)] # Flatten
return shapes
def c_code(self, node, name, inames, onames, sub):
x, = inames
z, = onames
axis = self.axis
fail = sub['fail']
if self.axis is None or (self.axis == 0 and node.inputs[0].ndim == 1):
code = """
npy_intp shape[1] = { PyArray_SIZE(%(x)s) };
if(!(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumProd(
%(x)s, NPY_MAXDIMS,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
else:
code = """
if(!(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s)) ))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumProd(
%(x)s, %(axis)s,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
return code
def c_code_cache_version(self):
return (4,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.axis)
def cumprod(x, axis=None):
"""Return the cumulative product of the elements along a given axis.
Wraping of numpy.cumprod.
:param x: Input tensor variable.
:param axis: The axis along which the cumulative product is computed.
The default (None) is to compute the cumprod over the flattened array.
.. versionadded:: 0.7
"""
return CumprodOp(axis=axis)(x)
class DiffOp(theano.Op):
# See function diff for docstring
__props__ = ("n", "axis")
def __init__(self, n=1, axis=-1):
self.n = n
self.axis = axis
# numpy return a view in that case.
# TODO, make an optimization that remove this op in this case.
if n == 0:
self.view_map = {0: [0]}
def make_node(self, x):
x = basic.as_tensor_variable(x)
return theano.Apply(self, [x], [x.type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.diff(x, n=self.n, axis=self.axis)
def grad(self, inputs, outputs_gradients):
inputs = inputs[0]
if inputs.ndim != 1:
raise NotImplementedError("Grad is not implemented for inputs with"
"number of dimension other than 1.")
z = outputs_gradients[0]
def _grad_helper(z):
pre = basic.concatenate([[0.], z])
app = basic.concatenate([z, [0.]])
return pre - app
for k in range(self.n):
z = _grad_helper(z)
return [z]
def infer_shape(self, node, ins_shapes):
i0_shapes = ins_shapes[0]
out_shape = list(i0_shapes)
out_shape[self.axis] = out_shape[self.axis] - self.n
return [out_shape]
def __str__(self):
return self.__class__.__name__
def diff(x, n=1, axis=-1):
"""Calculate the n-th order discrete difference along given axis.
The first order difference is given by out[i] = a[i + 1] - a[i]
along the given axis, higher order differences are calculated by
using diff recursively. Wraping of numpy.diff.
:param x: Input tensor variable.
:param n: The number of times values are differenced, default is 1.
:param axis: The axis along which the difference is taken,
default is the last axis.
.. versionadded:: 0.6
"""
return DiffOp(n=n, axis=axis)(x)
class BinCountOp(theano.Op):
"""
DEPRECATED: use bincount() instead.
See function bincount for docstring
"""
compatible_type = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64')
"""Tuple of all compatible dtype for the parameter of this op."""
__props__ = ("minlength",)
def __init__(self, minlength=None):
self.minlength = minlength
if minlength is not None:
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if not bool(numpy_ver >= [1, 6]):
raise NotImplementedError(
"BinCountOp with minlength attribute"
" requires NumPy 1.6 or higher.")
def make_node(self, x, weights):
warnings.warn((
"Tile op is deprecated, use tile function instead."),
stacklevel=3)
x = basic.as_tensor_variable(x)
if x.dtype not in BinCountOp.compatible_type:
raise TypeError("Inputs dtype must be an integer.")
# Some dtypes are not supported by numpy's implementation of bincount.
# Until another one is available, we should fail at graph construction
# time, not wait for execution.
int_bitwidth = theano.gof.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
intp_bitwidth = theano.gof.local_bitwidth()
if intp_bitwidth == 32:
out_type = basic.ivector()
elif intp_bitwidth == 64:
out_type = basic.lvector()
if x.dtype in numpy_unsupported_dtypes:
raise TypeError(
("Input dtypes %s are not supported by numpy.bincount, "
% numpy_unsupported_dtypes), x.dtype)
if x.ndim != 1:
raise TypeError("Inputs must be of dimension 1.")
if weights is None:
weights = theano.gof.Constant(theano.gof.Generic(), None)
else:
weights = basic.as_tensor_variable(weights)
out_type = basic.dvector()
if weights.ndim != 1:
raise TypeError("Weights cannot have a number of"
"dimension different of 1.")
return theano.Apply(self, [x, weights], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
weights = inputs[1]
z = output_storage[0]
if weights is not None and weights.shape != x.shape:
raise TypeError("All inputs must have the same shape.")
# Needed for numpy 1.4.1 compatibility
if self.minlength:
out = np.bincount(x, weights=weights, minlength=self.minlength)
else:
out = np.bincount(x, weights=weights)
z[0] = theano._asarray(out, dtype=node.outputs[0].dtype)
def grad(self, inputs, outputs_gradients):
output = self(*inputs)
if output.dtype.find('int') != -1:
return [inp.zeros_like().astype(theano.config.floatX)
for inp in inputs]
raise NotImplementedError()
def infer_shape(self, node, ins_shapes):
x = node.inputs[0]
m = basic.max(x) + 1
if self.minlength is not None:
m = basic.maximum(m, self.minlength)
return [[m]]
def __str__(self):
return self.__class__.__name__
def bincount(x, weights=None, minlength=None, assert_nonneg=False):
"""Count number of occurrences of each value in array of ints.
The number of bins (of size 1) is one larger than the largest
value in x. If minlength is specified, there will be at least
this number of bins in the output array (though it will be longer
if necessary, depending on the contents of x). Each bin gives the
number of occurrences of its index value in x. If weights is
specified the input array is weighted by it, i.e. if a value n
is found at position i, out[n] += weight[i] instead of out[n] += 1.
:param x: 1 dimension, nonnegative ints
:param weights: array of the same shape as x with corresponding weights.
Optional.
:param minlength: A minimum number of bins for the output array.
Optional.
:param assert_nonneg: A flag that inserts an assert_op to check if
every input x is nonnegative.
Optional.
.. versionadded:: 0.6
"""
compatible_type = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32')
unsupported_dtypes = ('uint64',)
if x.dtype in unsupported_dtypes:
raise TypeError(
("Input dtype %s is not supported, "
% unsupported_dtypes), x.dtype)
if x.dtype not in compatible_type:
raise TypeError("Inputs dtype must be an integer.")
if x.ndim != 1:
raise TypeError("Inputs must be of dimension 1.")
if assert_nonneg:
from theano.tensor.opt import Assert
assert_op = Assert('Input to bincount has negative values!')
x = assert_op(x, theano.tensor.all(x >= 0))
max_value = theano.tensor.cast(x.max() + 1, 'int64')
if minlength is not None:
max_value = theano.tensor.maximum(max_value, minlength)
if weights is None:
out = theano.tensor.zeros([max_value], dtype=x.dtype)
out = theano.tensor.inc_subtensor(out[x], 1)
else:
out = theano.tensor.zeros([max_value], dtype=weights.dtype)
out = theano.tensor.inc_subtensor(out[x], weights)
return out
def squeeze(x):
"""Remove broadcastable dimensions from
the shape of an array.
It returns the input array, but with the
broadcastable dimensions removed. This is
always `x` itself or a view into `x`.
:param x: Input data, tensor variable.
:return: `x` without its broadcastable dimensions.
.. versionadded:: 0.6
"""
view = x.dimshuffle([i for i in range(x.ndim)
if not x.broadcastable[i]])
return view
def compress(condition, x, axis=None):
"""Return selected slices of an array along given axis.
It returns the input tensor, but with selected slices along a given axis
retained. If no axis is provided, the tensor is flattened
Corresponds to numpy.compress
:param x: Input data, tensor variable
:param condition: 1 dimensional array of non-zero and zero values
corresponding to indices of slices along a selected axis
:return: `x` with selected slices
.. versionadded:: 0.7
"""
indices = theano.tensor.basic.flatnonzero(condition)
return x.take(indices, axis=axis)
class RepeatOp(theano.Op):
# See the repeat function for docstring
__props__ = ("axis",)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x, repeats):
x = basic.as_tensor_variable(x)
repeats = basic.as_tensor_variable(repeats)
if repeats.dtype not in tensor.discrete_dtypes:
raise TypeError("repeats.dtype must be an integer.")
# Some dtypes are not supported by numpy's implementation of repeat.
# Until another one is available, we should fail at graph construction
# time, not wait for execution.
ptr_bitwidth = theano.gof.local_bitwidth()
if ptr_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if ptr_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
if repeats.dtype in numpy_unsupported_dtypes:
raise TypeError(
("dtypes %s are not supported by numpy.repeat "
"for the 'repeats' parameter, "
% str(numpy_unsupported_dtypes)), repeats.dtype)
if self.axis is None:
broadcastable = [False]
else:
try:
const_reps = basic.get_scalar_constant_value(repeats)
except basic.NotScalarConstantError:
const_reps = None
if const_reps == 1:
broadcastable = x.broadcastable
else:
broadcastable = list(x.broadcastable)
broadcastable[self.axis] = False
out_type = theano.tensor.TensorType(x.dtype, broadcastable)
return theano.Apply(self, [x, repeats], [out_type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
repeats = inputs[1]
z = output_storage[0]
z[0] = np.repeat(x, repeats=repeats, axis=self.axis)
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inputs, gout):
(x, repeats) = inputs
(gz,) = gout
if repeats.ndim == 0:
if self.axis is None:
axis = x.ndim
else:
if self.axis >= 0:
axis = self.axis + 1
else:
axis = self.axis + x.ndim + 1
shape = [x.shape[k] for k in range(x.ndim)]
shape.insert(axis, repeats)
return [gz.reshape(shape, x.ndim + 1).sum(axis=axis),
DisconnectedType()()]
elif repeats.ndim == 1:
# For this implementation, we would need to specify the length
# of repeats in order to split gz in the right way to sum
# the good part.
raise NotImplementedError()
else:
raise ValueError()
def infer_shape(self, node, ins_shapes):
i0_shapes = ins_shapes[0]
repeats = node.inputs[1]
out_shape = list(i0_shapes)
# uint64 shape are not supported.
dtype = None
if repeats.dtype in ['uint8', 'uint16', 'uint32']:
dtype = 'int64'
if self.axis is None:
if repeats.ndim == 0:
if len(i0_shapes) == 0:
out_shape = [repeats]
else:
res = 1
for d in i0_shapes:
res = res * d
out_shape = (res * repeats, )
else:
out_shape = [theano.tensor.sum(repeats, dtype=dtype)]
else:
if repeats.ndim == 0:
out_shape[self.axis] = out_shape[self.axis] * repeats
else:
out_shape[self.axis] = theano.tensor.sum(repeats, dtype=dtype)
return [out_shape]
def __str__(self):
return self.__class__.__name__
def repeat(x, repeats, axis=None):
"""Repeat elements of an array.
It returns an array which has the same shape as `x`, except
along the given axis. The axis is used to speficy along which
axis to repeat values. By default, use the flattened input
array, and return a flat output array.
The number of repetitions for each element is `repeat`.
`repeats` is broadcasted to fit the length of the given `axis`.
:param x: Input data, tensor variable.
:param repeats: int, scalar or tensor variable.
:param axis: int, optional.
:see: :func:`tensor.tile <tensor.tile>`
.. versionadded:: 0.6
"""
repeats = tensor.as_tensor_variable(repeats)
if repeats.ndim > 1:
raise ValueError('The dimension of repeats should not exceed 1.')
if repeats.ndim == 1:
return RepeatOp(axis=axis)(x, repeats)
else:
if axis is None:
axis = 0
x = x.flatten()
else:
if axis >= x.ndim:
raise ValueError('Axis should not exceed x.ndim-1.')
if axis < 0:
axis = x.ndim + axis
shape = [x.shape[i] for i in xrange(x.ndim)]
# shape_ is the shape of the intermediate tensor which has
# an additional dimension comparing to x. We use alloc to
# allocate space for this intermediate tensor to replicate x
# along that additional dimension.
shape_ = shape[:]
shape_.insert(axis + 1, repeats)
# shape is now the shape of output, where shape[axis] becomes
# shape[axis]*repeats.
shape[axis] = shape[axis] * repeats
# dims_ is the dimension of that intermediate tensor.
dims_ = list(numpy.arange(x.ndim))
dims_.insert(axis + 1, 'x')
# After the original tensor is duplicated along the additional
# dimension, we reshape it to the expected output shape, and
# return the output z.
z = tensor.alloc(x.dimshuffle(*dims_), *shape_).reshape(shape)
return z
class Bartlett(gof.Op):
# See function bartlett for docstring
__props__ = ()
def __str__(self):
return self.__class__.__name__
def make_node(self, M):
M = tensor.as_tensor_variable(M)
if M.ndim != 0:
raise TypeError('%s only works on scalar input'
% self.__class__.__name__)
elif (not M.dtype.startswith('int') and
not M.dtype.startswith('uint')):
# dtype is a theano attribute here
raise TypeError('%s only works on integer input'
% self.__class__.__name__)
return gof.Apply(self, [M], [tensor.dvector()])
def perform(self, node, inputs, out_):
M = inputs[0]
out, = out_
out[0] = numpy.bartlett(M)
def infer_shape(self, node, in_shapes):
temp = node.inputs[0]
M = tensor.switch(tensor.lt(temp, 0),
tensor.cast(0, temp.dtype),
temp)
return [[M]]
def grad(self, inputs, output_grads):
return [None for i in inputs]
bartlett_ = Bartlett()
# I create a function only to have the doc show well.
def bartlett(M):
"""An instance of this class returns the Bartlett spectral window in the
time-domain. The Bartlett window is very similar to a triangular window,
except that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much ripple in
the frequency domain.
:param M: (integer scalar) Number of points in the output
window. If zero or less, an empty vector is returned.
:return: (vector of doubles) The triangular window, with the
maximum value normalized to one (the value one appears only if
the number of samples is odd), with the first and last samples
equal to zero.
.. versionadded:: 0.6
"""
return bartlett_(M)
class FillDiagonal(gof.Op):
# See function fill_diagonal for docstring
__props__ = ()
def __str__(self):
return self.__class__.__name__
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
def make_node(self, a, val):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
if a.ndim < 2:
raise TypeError('%s: first parameter must have at least'
' two dimensions' % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError('%s: second parameter must be a scalar'
% self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError('%s: type of second parameter must be the same as'
' the first\'s' % self.__class__.__name__)
return gof.Apply(self, [a, val], [a.type()])
def perform(self, node, inputs, output_storage):
a = inputs[0].copy()
val = inputs[1]
if a.ndim == 2:
# numpy.fill_diagonal up to date(including 1.6.2) have a
# bug for tall matrix.
# For 2-d arrays, we accept rectangular ones.
step = a.shape[1] + 1
end = a.shape[1] * a.shape[1]
# Write the value out into the diagonal.
a.flat[:end:step] = val
else:
numpy.fill_diagonal(a, val)
output_storage[0][0] = a
def grad(self, inp, cost_grad):
"""
Note: The gradient is currently implemented for matrices
only.
"""
a, val = inp
grad = cost_grad[0]
if (a.dtype.startswith('complex')):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError('%s: gradient is currently implemented'
' for matrices only' %
self.__class__.__name__)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices
wr_val = theano.tensor.nlinalg.diag(grad).sum()
return [wr_a, wr_val]
fill_diagonal_ = FillDiagonal()
# I create a function only to have the doc show well.
def fill_diagonal(a, val):
""" Returns a copy of an array with all
elements of the main diagonal set to a specified scalar value.
:param a: Rectangular array of at least two dimensions.
:param val: Scalar value to fill the diagonal whose type must be
compatible with that of array 'a' (i.e. 'val' cannot be viewed
as an upcast of 'a').
:return: An array identical to 'a' except that its main diagonal
is filled with scalar 'val'. (For an array 'a' with a.ndim >=
2, the main diagonal is the list of locations a[i, i, ..., i]
(i.e. with indices all identical).)
Support rectangular matrix and tensor with more than 2 dimensions
if the later have all dimensions are equals.
.. versionadded:: 0.6
"""
return fill_diagonal_(a, val)
class FillDiagonalOffset(gof.Op):
# See function fill_diagonal_offset for docstring
__props__ = ()
def __str__(self):
return self.__class__.__name__
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
def make_node(self, a, val, offset):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
offset = tensor.as_tensor_variable(offset)
if a.ndim != 2:
raise TypeError('%s: first parameter must have exactly'
' two dimensions' % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError('%s: second parameter must be a scalar'
% self.__class__.__name__)
elif offset.ndim != 0:
raise TypeError('%s: third parameter must be a scalar'
% self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError('%s: type of second parameter must be the same'
' as the first\'s' % self.__class__.__name__)
elif offset.dtype[:3] != 'int':
raise TypeError('%s: type of third parameter must be as integer'
' use theano.tensor.cast( input, \'int32/int64\')'
% self.__class__.__name__)
return gof.Apply(self, [a, val, offset], [a.type()])
def perform(self, node, inputs, output_storage):
a = inputs[0].copy()
val = inputs[1]
offset = inputs[2]
height, width = a.shape
"""
Note: The fill_diagonal only support rectangular matrix. The output
of tall matrix is "wrapped", which is an option in numpy 1.9.0
but was regarded as a bug in numpy 1.6.2. Here I implement the
fill_diagonal_offset with unwrapped output, so fill_diagonal_offset
supports tall matrix.(This make a little difference between the output
of fill_diagonal and fill_diagonal_offset only in the case of tall
matrix)
"""
if offset >= 0:
start = offset
num_of_step = min(min(width, height), width - offset)
else:
start = - offset * a.shape[1]
num_of_step = min(min(width, height), height + offset)
step = a.shape[1] + 1
end = start + step * num_of_step
# Write the value out into the diagonal.
a.flat[start:end:step] = val
output_storage[0][0] = a
def grad(self, inp, cost_grad):
"""
Note: The gradient is currently implemented for matrices
only.
"""
a, val, offset = inp
grad = cost_grad[0]
height, width = grad.shape
if (a.dtype.startswith('complex')):
return [None, None]
# only valid for matrices
wr_a = fill_diagonal_offset(grad, 0, offset)
offset_abs = basic.abs_(offset)
pos_offset_flag = basic.ge(offset, 0)
neg_offset_flag = basic.lt(offset, 0)
min_wh = basic.minimum(width, height)
start = offset * pos_offset_flag + offset_abs * width * neg_offset_flag
num_of_step = basic.minimum(min_wh, width * pos_offset_flag +
height * neg_offset_flag - offset_abs)
step = a.shape[1] + 1
end = start + step * num_of_step
# input of slice should be integer
start = basic.cast(start, 'int32')
step = basic.cast(step, 'int32')
end = basic.cast(end, 'int32')
wr_val = grad.flatten()[start:end:step].sum()
wr_offset = theano.gradient.grad_undefined(
self, 2, offset,
"offset is not defined for non-integer offset so"
" fill_diagonal_offset(a,val,offset+eps) is undefined")
return [wr_a, wr_val, wr_offset]
fill_diagonal_offset_ = FillDiagonalOffset()
def fill_diagonal_offset(a, val, offset):
"""
Returns a copy of an array with all
elements of the main diagonal set to a specified scalar value.
:param a: Rectangular array of two dimensions.
:param val: Scalar value to fill the diagonal whose type must be
compatible with that of array 'a' (i.e. 'val' cannot be viewed
as an upcast of 'a').
:param offset: Scalar value Offset of the diagonal from the main
diagonal. Can be positive or negative integer.
:return: An array identical to 'a' except that its offset diagonal
is filled with scalar 'val'. The output is unwrapped.
"""
return fill_diagonal_offset_(a, val, offset)
def to_one_hot(y, nb_class, dtype=None):
"""Return a matrix where each row correspond to the one hot
encoding of each element in y.
:param y: A vector of integer value between 0 and nb_class - 1.
:param nb_class: The number of class in y.
:param dtype: The dtype of the returned matrix. Default floatX.
:return: A matrix of shape (y.shape[0], nb_class), where each
row ``i`` is the one hot encoding of the corresponding ``y[i]``
value.
"""
ret = theano.tensor.zeros((y.shape[0], nb_class),
dtype=dtype)
ret = theano.tensor.set_subtensor(ret[theano.tensor.arange(y.shape[0]), y],
1)
return ret
class Unique(theano.Op):
"""
Wraps numpy.unique.
This op is not implemented on the GPU.
Examples
========
>>> import numpy as np
>>> x = theano.tensor.vector()
>>> f = theano.function([x], Unique(True, True, False)(x))
>>> f([1, 2., 3, 4, 3, 2, 1.])
[array([ 1., 2., 3., 4.]), array([0, 1, 2, 3]), array([0, 1, 2, 3, 2, 1, 0])]
>>> y = theano.tensor.matrix()
>>> g = theano.function([y], Unique(True, True, False)(y))
>>> g([[1, 1, 1.0], (2, 3, 3.0)])
[array([ 1., 2., 3.]), array([0, 3, 4]), array([0, 0, 0, 1, 2, 2])]
"""
__props__ = ("return_index", "return_inverse", "return_counts")
def __init__(self, return_index=False, return_inverse=False,
return_counts=False):
self.return_index = return_index
self.return_inverse = return_inverse
self.return_counts = return_counts
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if self.return_counts and bool(numpy_ver < [1, 9]):
raise RuntimeError(
"Numpy version = " + np.__version__ +
". Option 'return_counts=True' works starting"
" from version 1.9.0.")
def make_node(self, x):
x = basic.as_tensor_variable(x)
outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()]
typ = basic.TensorType(broadcastable=[False], dtype='int64')
if self.return_index:
outputs.append(typ())
if self.return_inverse:
outputs.append(typ())
if self.return_counts:
outputs.append(typ())
return theano.Apply(self, [x], outputs)
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage
param = {}
if self.return_index:
param['return_index'] = True
if self.return_inverse:
param['return_inverse'] = True
if self.return_counts:
param['return_counts'] = True
outs = np.unique(x, **param)
if ((not self.return_inverse) and
(not self.return_index) and
(not self.return_counts)):
z[0][0] = outs
else:
for i in range(len(outs)):
z[i][0] = outs[i]
def infer_shape(self, node, i0_shapes):
ret = node.fgraph.shape_feature.default_infer_shape(node, i0_shapes)
if self.return_inverse:
shape = (basic.prod(i0_shapes[0]), )
if self.return_index:
ret[2] = shape
return ret
ret[1] = shape
return ret
return ret
| 33.997263 | 146 | 0.565524 | import numpy as np
import numpy
import warnings
from six.moves import xrange
import theano
from theano.tensor import basic
from theano.tensor import nlinalg
from theano import gof, scalar
from theano.gradient import DisconnectedType
tensor = basic
class CpuContiguous(theano.Op):
__props__ = ()
view_map = {0: [0]}
def make_node(self, x):
x_ = theano.tensor.as_tensor_variable(x)
return theano.Apply(self, [x_], [x_.type()])
def perform(self, node, inputs, output_storage):
x, = inputs
y = output_storage[0]
if not x.flags['C_CONTIGUOUS']:
x = x.copy()
assert x.flags['C_CONTIGUOUS']
y[0] = x
def c_code(self, node, name, inames, onames, sub):
x, = inames
y, = onames
code = """
if (!PyArray_CHKFLAGS(%(x)s, NPY_ARRAY_C_CONTIGUOUS)){
// check to see if output is contiguous first
if (%(y)s != NULL &&
PyArray_CHKFLAGS(%(y)s, NPY_ARRAY_C_CONTIGUOUS)){
PyArray_CopyInto(%(y)s, %(x)s);
}
else{
Py_XDECREF(%(y)s);
%(y)s = PyArray_GETCONTIGUOUS(%(x)s);
}
}
else{
Py_XINCREF(%(x)s);
Py_XDECREF(%(y)s);
%(y)s = %(x)s;
}
""" % locals()
return code
def c_code_cache_version(self):
return (0,)
cpu_contiguous = CpuContiguous()
class CumsumOp(theano.Op):
__props__ = ("axis",)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x):
x = basic.as_tensor_variable(x)
out_type = x.type()
if self.axis is None:
out_type = theano.tensor.vector(dtype=x.dtype)
elif self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError('axis(={0}) out of bounds'.format(self.axis))
return theano.Apply(self, [x], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.cumsum(x, axis=self.axis)
def grad(self, inputs, output_gradients):
[gi] = output_gradients
if self.axis is None:
return [cumsum(gi[::-1])[::-1].reshape(inputs[0].shape)]
reverse_slicing = [slice(None, None, None)] * gi.ndim
reverse_slicing[self.axis] = slice(None, None, -1)
reverse_slicing = tuple(reverse_slicing)
return [cumsum(gi[reverse_slicing], self.axis)[reverse_slicing]]
def infer_shape(self, node, shapes):
if self.axis is None:
return [(tensor.prod(shapes[0]),)]
return shapes
def c_code(self, node, name, inames, onames, sub):
x, = inames
z, = onames
axis = self.axis
fail = sub['fail']
if self.axis is None or (self.axis == 0 and node.inputs[0].ndim == 1):
code = """
npy_intp shape[1] = { PyArray_SIZE(%(x)s) };
if(!(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumSum(
%(x)s, NPY_MAXDIMS,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
else:
code = """
if(!(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s))))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumSum(
%(x)s, %(axis)s,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
return code
def c_code_cache_version(self):
return (6,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.axis)
def cumsum(x, axis=None):
return CumsumOp(axis=axis)(x)
class CumprodOp(theano.Op):
__props__ = ("axis",)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x):
x = basic.as_tensor_variable(x)
out_type = x.type()
if self.axis is None:
out_type = theano.tensor.vector(dtype=x.dtype)
elif self.axis >= x.ndim or self.axis < -x.ndim:
raise ValueError('axis(={0}) out of bounds'.format(self.axis))
return theano.Apply(self, [x], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.cumprod(x, axis=self.axis)
def grad(self, inputs, output_gradients):
x, = inputs
gi, = output_gradients
fx = cumprod(x, axis=self.axis)
if self.axis is None:
return [cumsum((fx * gi)[::-1])[::-1].reshape(inputs[0].shape) / x]
reverse_slicing = [slice(None, None, None)] * gi.ndim
reverse_slicing[self.axis] = slice(None, None, -1)
reverse_slicing = tuple(reverse_slicing)
return [cumsum((fx * gi)[reverse_slicing],
self.axis)[reverse_slicing] / x]
def infer_shape(self, node, shapes):
if self.axis is None:
return [(tensor.prod(shapes[0]),)]
return shapes
def c_code(self, node, name, inames, onames, sub):
x, = inames
z, = onames
axis = self.axis
fail = sub['fail']
if self.axis is None or (self.axis == 0 and node.inputs[0].ndim == 1):
code = """
npy_intp shape[1] = { PyArray_SIZE(%(x)s) };
if(!(%(z)s && PyArray_DIMS(%(z)s)[0] == shape[0]))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumProd(
%(x)s, NPY_MAXDIMS,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
else:
code = """
if(!(%(z)s && PyArray_CompareLists(PyArray_DIMS(%(z)s), PyArray_DIMS(%(x)s), PyArray_NDIM(%(x)s)) ))
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s), PyArray_TYPE((PyArrayObject*) py_%(x)s));
}
if (!%(z)s)
%(fail)s;
{
PyObject * t = PyArray_CumProd(
%(x)s, %(axis)s,
PyArray_TYPE((PyArrayObject*) py_%(x)s), %(z)s);
if (!t){
%(fail)s;
}
// Because PyArray_CumSum returns a newly created reference on t.
Py_XDECREF(t);
}
""" % locals()
return code
def c_code_cache_version(self):
return (4,)
def __str__(self):
return "%s{%s}" % (self.__class__.__name__, self.axis)
def cumprod(x, axis=None):
return CumprodOp(axis=axis)(x)
class DiffOp(theano.Op):
__props__ = ("n", "axis")
def __init__(self, n=1, axis=-1):
self.n = n
self.axis = axis
if n == 0:
self.view_map = {0: [0]}
def make_node(self, x):
x = basic.as_tensor_variable(x)
return theano.Apply(self, [x], [x.type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage[0]
z[0] = np.diff(x, n=self.n, axis=self.axis)
def grad(self, inputs, outputs_gradients):
inputs = inputs[0]
if inputs.ndim != 1:
raise NotImplementedError("Grad is not implemented for inputs with"
"number of dimension other than 1.")
z = outputs_gradients[0]
def _grad_helper(z):
pre = basic.concatenate([[0.], z])
app = basic.concatenate([z, [0.]])
return pre - app
for k in range(self.n):
z = _grad_helper(z)
return [z]
def infer_shape(self, node, ins_shapes):
i0_shapes = ins_shapes[0]
out_shape = list(i0_shapes)
out_shape[self.axis] = out_shape[self.axis] - self.n
return [out_shape]
def __str__(self):
return self.__class__.__name__
def diff(x, n=1, axis=-1):
return DiffOp(n=n, axis=axis)(x)
class BinCountOp(theano.Op):
compatible_type = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64')
__props__ = ("minlength",)
def __init__(self, minlength=None):
self.minlength = minlength
if minlength is not None:
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if not bool(numpy_ver >= [1, 6]):
raise NotImplementedError(
"BinCountOp with minlength attribute"
" requires NumPy 1.6 or higher.")
def make_node(self, x, weights):
warnings.warn((
"Tile op is deprecated, use tile function instead."),
stacklevel=3)
x = basic.as_tensor_variable(x)
if x.dtype not in BinCountOp.compatible_type:
raise TypeError("Inputs dtype must be an integer.")
# Until another one is available, we should fail at graph construction
# time, not wait for execution.
int_bitwidth = theano.gof.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
intp_bitwidth = theano.gof.local_bitwidth()
if intp_bitwidth == 32:
out_type = basic.ivector()
elif intp_bitwidth == 64:
out_type = basic.lvector()
if x.dtype in numpy_unsupported_dtypes:
raise TypeError(
("Input dtypes %s are not supported by numpy.bincount, "
% numpy_unsupported_dtypes), x.dtype)
if x.ndim != 1:
raise TypeError("Inputs must be of dimension 1.")
if weights is None:
weights = theano.gof.Constant(theano.gof.Generic(), None)
else:
weights = basic.as_tensor_variable(weights)
out_type = basic.dvector()
if weights.ndim != 1:
raise TypeError("Weights cannot have a number of"
"dimension different of 1.")
return theano.Apply(self, [x, weights], [out_type])
def perform(self, node, inputs, output_storage):
x = inputs[0]
weights = inputs[1]
z = output_storage[0]
if weights is not None and weights.shape != x.shape:
raise TypeError("All inputs must have the same shape.")
# Needed for numpy 1.4.1 compatibility
if self.minlength:
out = np.bincount(x, weights=weights, minlength=self.minlength)
else:
out = np.bincount(x, weights=weights)
z[0] = theano._asarray(out, dtype=node.outputs[0].dtype)
def grad(self, inputs, outputs_gradients):
output = self(*inputs)
if output.dtype.find('int') != -1:
return [inp.zeros_like().astype(theano.config.floatX)
for inp in inputs]
raise NotImplementedError()
def infer_shape(self, node, ins_shapes):
x = node.inputs[0]
m = basic.max(x) + 1
if self.minlength is not None:
m = basic.maximum(m, self.minlength)
return [[m]]
def __str__(self):
return self.__class__.__name__
def bincount(x, weights=None, minlength=None, assert_nonneg=False):
compatible_type = ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32')
unsupported_dtypes = ('uint64',)
if x.dtype in unsupported_dtypes:
raise TypeError(
("Input dtype %s is not supported, "
% unsupported_dtypes), x.dtype)
if x.dtype not in compatible_type:
raise TypeError("Inputs dtype must be an integer.")
if x.ndim != 1:
raise TypeError("Inputs must be of dimension 1.")
if assert_nonneg:
from theano.tensor.opt import Assert
assert_op = Assert('Input to bincount has negative values!')
x = assert_op(x, theano.tensor.all(x >= 0))
max_value = theano.tensor.cast(x.max() + 1, 'int64')
if minlength is not None:
max_value = theano.tensor.maximum(max_value, minlength)
if weights is None:
out = theano.tensor.zeros([max_value], dtype=x.dtype)
out = theano.tensor.inc_subtensor(out[x], 1)
else:
out = theano.tensor.zeros([max_value], dtype=weights.dtype)
out = theano.tensor.inc_subtensor(out[x], weights)
return out
def squeeze(x):
view = x.dimshuffle([i for i in range(x.ndim)
if not x.broadcastable[i]])
return view
def compress(condition, x, axis=None):
indices = theano.tensor.basic.flatnonzero(condition)
return x.take(indices, axis=axis)
class RepeatOp(theano.Op):
# See the repeat function for docstring
__props__ = ("axis",)
def __init__(self, axis=None):
self.axis = axis
def make_node(self, x, repeats):
x = basic.as_tensor_variable(x)
repeats = basic.as_tensor_variable(repeats)
if repeats.dtype not in tensor.discrete_dtypes:
raise TypeError("repeats.dtype must be an integer.")
# Some dtypes are not supported by numpy's implementation of repeat.
ptr_bitwidth = theano.gof.local_bitwidth()
if ptr_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if ptr_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
if repeats.dtype in numpy_unsupported_dtypes:
raise TypeError(
("dtypes %s are not supported by numpy.repeat "
"for the 'repeats' parameter, "
% str(numpy_unsupported_dtypes)), repeats.dtype)
if self.axis is None:
broadcastable = [False]
else:
try:
const_reps = basic.get_scalar_constant_value(repeats)
except basic.NotScalarConstantError:
const_reps = None
if const_reps == 1:
broadcastable = x.broadcastable
else:
broadcastable = list(x.broadcastable)
broadcastable[self.axis] = False
out_type = theano.tensor.TensorType(x.dtype, broadcastable)
return theano.Apply(self, [x, repeats], [out_type()])
def perform(self, node, inputs, output_storage):
x = inputs[0]
repeats = inputs[1]
z = output_storage[0]
z[0] = np.repeat(x, repeats=repeats, axis=self.axis)
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inputs, gout):
(x, repeats) = inputs
(gz,) = gout
if repeats.ndim == 0:
if self.axis is None:
axis = x.ndim
else:
if self.axis >= 0:
axis = self.axis + 1
else:
axis = self.axis + x.ndim + 1
shape = [x.shape[k] for k in range(x.ndim)]
shape.insert(axis, repeats)
return [gz.reshape(shape, x.ndim + 1).sum(axis=axis),
DisconnectedType()()]
elif repeats.ndim == 1:
raise NotImplementedError()
else:
raise ValueError()
def infer_shape(self, node, ins_shapes):
i0_shapes = ins_shapes[0]
repeats = node.inputs[1]
out_shape = list(i0_shapes)
dtype = None
if repeats.dtype in ['uint8', 'uint16', 'uint32']:
dtype = 'int64'
if self.axis is None:
if repeats.ndim == 0:
if len(i0_shapes) == 0:
out_shape = [repeats]
else:
res = 1
for d in i0_shapes:
res = res * d
out_shape = (res * repeats, )
else:
out_shape = [theano.tensor.sum(repeats, dtype=dtype)]
else:
if repeats.ndim == 0:
out_shape[self.axis] = out_shape[self.axis] * repeats
else:
out_shape[self.axis] = theano.tensor.sum(repeats, dtype=dtype)
return [out_shape]
def __str__(self):
return self.__class__.__name__
def repeat(x, repeats, axis=None):
repeats = tensor.as_tensor_variable(repeats)
if repeats.ndim > 1:
raise ValueError('The dimension of repeats should not exceed 1.')
if repeats.ndim == 1:
return RepeatOp(axis=axis)(x, repeats)
else:
if axis is None:
axis = 0
x = x.flatten()
else:
if axis >= x.ndim:
raise ValueError('Axis should not exceed x.ndim-1.')
if axis < 0:
axis = x.ndim + axis
shape = [x.shape[i] for i in xrange(x.ndim)]
shape_ = shape[:]
shape_.insert(axis + 1, repeats)
shape[axis] = shape[axis] * repeats
dims_ = list(numpy.arange(x.ndim))
dims_.insert(axis + 1, 'x')
z = tensor.alloc(x.dimshuffle(*dims_), *shape_).reshape(shape)
return z
class Bartlett(gof.Op):
__props__ = ()
def __str__(self):
return self.__class__.__name__
def make_node(self, M):
M = tensor.as_tensor_variable(M)
if M.ndim != 0:
raise TypeError('%s only works on scalar input'
% self.__class__.__name__)
elif (not M.dtype.startswith('int') and
not M.dtype.startswith('uint')):
raise TypeError('%s only works on integer input'
% self.__class__.__name__)
return gof.Apply(self, [M], [tensor.dvector()])
def perform(self, node, inputs, out_):
M = inputs[0]
out, = out_
out[0] = numpy.bartlett(M)
def infer_shape(self, node, in_shapes):
temp = node.inputs[0]
M = tensor.switch(tensor.lt(temp, 0),
tensor.cast(0, temp.dtype),
temp)
return [[M]]
def grad(self, inputs, output_grads):
return [None for i in inputs]
bartlett_ = Bartlett()
def bartlett(M):
return bartlett_(M)
class FillDiagonal(gof.Op):
__props__ = ()
def __str__(self):
return self.__class__.__name__
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
def make_node(self, a, val):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
if a.ndim < 2:
raise TypeError('%s: first parameter must have at least'
' two dimensions' % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError('%s: second parameter must be a scalar'
% self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError('%s: type of second parameter must be the same as'
' the first\'s' % self.__class__.__name__)
return gof.Apply(self, [a, val], [a.type()])
def perform(self, node, inputs, output_storage):
a = inputs[0].copy()
val = inputs[1]
if a.ndim == 2:
# numpy.fill_diagonal up to date(including 1.6.2) have a
# bug for tall matrix.
# For 2-d arrays, we accept rectangular ones.
step = a.shape[1] + 1
end = a.shape[1] * a.shape[1]
# Write the value out into the diagonal.
a.flat[:end:step] = val
else:
numpy.fill_diagonal(a, val)
output_storage[0][0] = a
def grad(self, inp, cost_grad):
a, val = inp
grad = cost_grad[0]
if (a.dtype.startswith('complex')):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError('%s: gradient is currently implemented'
' for matrices only' %
self.__class__.__name__)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices
wr_val = theano.tensor.nlinalg.diag(grad).sum()
return [wr_a, wr_val]
fill_diagonal_ = FillDiagonal()
# I create a function only to have the doc show well.
def fill_diagonal(a, val):
return fill_diagonal_(a, val)
class FillDiagonalOffset(gof.Op):
# See function fill_diagonal_offset for docstring
__props__ = ()
def __str__(self):
return self.__class__.__name__
def infer_shape(self, node, in_shapes):
return [in_shapes[0]]
def make_node(self, a, val, offset):
a = tensor.as_tensor_variable(a)
val = tensor.as_tensor_variable(val)
offset = tensor.as_tensor_variable(offset)
if a.ndim != 2:
raise TypeError('%s: first parameter must have exactly'
' two dimensions' % self.__class__.__name__)
elif val.ndim != 0:
raise TypeError('%s: second parameter must be a scalar'
% self.__class__.__name__)
elif offset.ndim != 0:
raise TypeError('%s: third parameter must be a scalar'
% self.__class__.__name__)
val = tensor.cast(val, dtype=scalar.upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError('%s: type of second parameter must be the same'
' as the first\'s' % self.__class__.__name__)
elif offset.dtype[:3] != 'int':
raise TypeError('%s: type of third parameter must be as integer'
' use theano.tensor.cast( input, \'int32/int64\')'
% self.__class__.__name__)
return gof.Apply(self, [a, val, offset], [a.type()])
def perform(self, node, inputs, output_storage):
a = inputs[0].copy()
val = inputs[1]
offset = inputs[2]
height, width = a.shape
if offset >= 0:
start = offset
num_of_step = min(min(width, height), width - offset)
else:
start = - offset * a.shape[1]
num_of_step = min(min(width, height), height + offset)
step = a.shape[1] + 1
end = start + step * num_of_step
a.flat[start:end:step] = val
output_storage[0][0] = a
def grad(self, inp, cost_grad):
a, val, offset = inp
grad = cost_grad[0]
height, width = grad.shape
if (a.dtype.startswith('complex')):
return [None, None]
wr_a = fill_diagonal_offset(grad, 0, offset)
offset_abs = basic.abs_(offset)
pos_offset_flag = basic.ge(offset, 0)
neg_offset_flag = basic.lt(offset, 0)
min_wh = basic.minimum(width, height)
start = offset * pos_offset_flag + offset_abs * width * neg_offset_flag
num_of_step = basic.minimum(min_wh, width * pos_offset_flag +
height * neg_offset_flag - offset_abs)
step = a.shape[1] + 1
end = start + step * num_of_step
start = basic.cast(start, 'int32')
step = basic.cast(step, 'int32')
end = basic.cast(end, 'int32')
wr_val = grad.flatten()[start:end:step].sum()
wr_offset = theano.gradient.grad_undefined(
self, 2, offset,
"offset is not defined for non-integer offset so"
" fill_diagonal_offset(a,val,offset+eps) is undefined")
return [wr_a, wr_val, wr_offset]
fill_diagonal_offset_ = FillDiagonalOffset()
def fill_diagonal_offset(a, val, offset):
return fill_diagonal_offset_(a, val, offset)
def to_one_hot(y, nb_class, dtype=None):
ret = theano.tensor.zeros((y.shape[0], nb_class),
dtype=dtype)
ret = theano.tensor.set_subtensor(ret[theano.tensor.arange(y.shape[0]), y],
1)
return ret
class Unique(theano.Op):
__props__ = ("return_index", "return_inverse", "return_counts")
def __init__(self, return_index=False, return_inverse=False,
return_counts=False):
self.return_index = return_index
self.return_inverse = return_inverse
self.return_counts = return_counts
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
if self.return_counts and bool(numpy_ver < [1, 9]):
raise RuntimeError(
"Numpy version = " + np.__version__ +
". Option 'return_counts=True' works starting"
" from version 1.9.0.")
def make_node(self, x):
x = basic.as_tensor_variable(x)
outputs = [basic.TensorType(broadcastable=[False], dtype=x.dtype)()]
typ = basic.TensorType(broadcastable=[False], dtype='int64')
if self.return_index:
outputs.append(typ())
if self.return_inverse:
outputs.append(typ())
if self.return_counts:
outputs.append(typ())
return theano.Apply(self, [x], outputs)
def perform(self, node, inputs, output_storage):
x = inputs[0]
z = output_storage
param = {}
if self.return_index:
param['return_index'] = True
if self.return_inverse:
param['return_inverse'] = True
if self.return_counts:
param['return_counts'] = True
outs = np.unique(x, **param)
if ((not self.return_inverse) and
(not self.return_index) and
(not self.return_counts)):
z[0][0] = outs
else:
for i in range(len(outs)):
z[i][0] = outs[i]
def infer_shape(self, node, i0_shapes):
ret = node.fgraph.shape_feature.default_infer_shape(node, i0_shapes)
if self.return_inverse:
shape = (basic.prod(i0_shapes[0]), )
if self.return_index:
ret[2] = shape
return ret
ret[1] = shape
return ret
return ret
| true | true |
1c32567adbefc7506d550909f7d41a6db6224a31 | 2,226 | py | Python | skills_ml/algorithms/sampling/methods.py | maneeshdisodia/skills-ml | 194b262aa5bad1af381d1f63f8b327cf96523950 | [
"MIT"
] | null | null | null | skills_ml/algorithms/sampling/methods.py | maneeshdisodia/skills-ml | 194b262aa5bad1af381d1f63f8b327cf96523950 | [
"MIT"
] | null | null | null | skills_ml/algorithms/sampling/methods.py | maneeshdisodia/skills-ml | 194b262aa5bad1af381d1f63f8b327cf96523950 | [
"MIT"
] | null | null | null | """Generic sampling methods"""
import numpy as np
import heapq as hq
import random
def reservoir(it, k):
"""Reservoir sampling with Random Sort from a job posting iterator
Randomly choosing a sample of k items from a streaming iterator. Using random sort to implement the algorithm.
Basically, it's assigning random number as keys to each item and maintain k items with minimum value for keys,
which equals to assigning a random number to each item as key and sort items using these keys and take top k items.
Args:
it (iterator): Job posting iterator to sample from
k (int): Sample size
Returns:
generator: The result sample of k items.
"""
it = iter(it)
result = []
for i, datum in enumerate(it):
if i < k:
result.append(datum)
else:
j = random.randint(0, i-1)
if j < k:
result[j] = datum
while len(result) > 0:
yield result.pop()
def reservoir_weighted(it, k, weights):
"""Weighted reservoir Sampling from job posting iterator
Randomly choosing a sample of k items from a streaming iterator based on the weights.
Args:
it (iterator): Job posting iterator to sample from. The format should be (job_posting, label)
k (int): Sample size
weights (dict): a dictionary that has key-value pairs as label-weighting pairs. It expects every
label in the iterator to be present as a key in the weights dictionary For example,
weights = {'11': 2, '13', 1}. In this case, the label/key is the occupation major
group and the value is the weight you want to sample with.
Returns:
generator: The result sample of k items from weighted reservori sampling.
"""
heap = []
hkey = lambda w: np.power(np.random.uniform(0.0, 1.0), 1.0 / w)
for i, datum in enumerate(it):
weight = weights[datum[1]]
score = hkey(weight)
if len(heap) < k:
hq.heappush(heap, (hkey(weight), datum))
elif score > heap[0][0]:
hq.heapreplace(heap, (score, datum))
while len(heap) > 0:
yield hq.heappop(heap)[1]
| 35.903226 | 119 | 0.623989 | import numpy as np
import heapq as hq
import random
def reservoir(it, k):
it = iter(it)
result = []
for i, datum in enumerate(it):
if i < k:
result.append(datum)
else:
j = random.randint(0, i-1)
if j < k:
result[j] = datum
while len(result) > 0:
yield result.pop()
def reservoir_weighted(it, k, weights):
heap = []
hkey = lambda w: np.power(np.random.uniform(0.0, 1.0), 1.0 / w)
for i, datum in enumerate(it):
weight = weights[datum[1]]
score = hkey(weight)
if len(heap) < k:
hq.heappush(heap, (hkey(weight), datum))
elif score > heap[0][0]:
hq.heapreplace(heap, (score, datum))
while len(heap) > 0:
yield hq.heappop(heap)[1]
| true | true |
1c32588266235dc67fa6e996a30f2750797e9832 | 876 | py | Python | setup.py | RockefellerArchiveCenter/rac_aspace | 02546e5d618a6b9c2e2edba35383a457cba9321b | [
"MIT"
] | null | null | null | setup.py | RockefellerArchiveCenter/rac_aspace | 02546e5d618a6b9c2e2edba35383a457cba9321b | [
"MIT"
] | 74 | 2020-01-14T14:55:51.000Z | 2021-02-18T21:13:29.000Z | setup.py | RockefellerArchiveCenter/rac_aspace | 02546e5d618a6b9c2e2edba35383a457cba9321b | [
"MIT"
] | 2 | 2020-03-28T21:19:21.000Z | 2022-02-11T20:05:33.000Z | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='rac-aspace',
version='0.0.1',
description='Helpers for using the ArchivesSpace API using ArchivesSnake',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/RockefellerArchiveCenter/rac_aspace',
author='Rockefeller Archive Center',
author_email='archive@rockarch.org',
license='MIT',
packages=find_packages(),
install_requires=["ArchivesSnake>=0.8.1",
"rapidfuzz>=0.7.3"],
tests_require=["pytest",
"pre-commit>=1.18.3",
"sphinx>=1.8.5",
"tox>=3.14.0",
"vcrpy>=4.0.2"],
python_requires='>=3.5',
zip_safe=False)
| 35.04 | 80 | 0.590183 | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='rac-aspace',
version='0.0.1',
description='Helpers for using the ArchivesSpace API using ArchivesSnake',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/RockefellerArchiveCenter/rac_aspace',
author='Rockefeller Archive Center',
author_email='archive@rockarch.org',
license='MIT',
packages=find_packages(),
install_requires=["ArchivesSnake>=0.8.1",
"rapidfuzz>=0.7.3"],
tests_require=["pytest",
"pre-commit>=1.18.3",
"sphinx>=1.8.5",
"tox>=3.14.0",
"vcrpy>=4.0.2"],
python_requires='>=3.5',
zip_safe=False)
| true | true |
1c32595e7fc90ce0ff461cb9decddd85e1c9ac69 | 3,437 | py | Python | localstack/services/cloudformation/models/logs.py | Madajevas/localstack | 85c712e50d45183b9703c682de02d5114c50c47c | [
"Apache-2.0"
] | 1 | 2022-01-21T17:14:52.000Z | 2022-01-21T17:14:52.000Z | localstack/services/cloudformation/models/logs.py | Madajevas/localstack | 85c712e50d45183b9703c682de02d5114c50c47c | [
"Apache-2.0"
] | null | null | null | localstack/services/cloudformation/models/logs.py | Madajevas/localstack | 85c712e50d45183b9703c682de02d5114c50c47c | [
"Apache-2.0"
] | null | null | null | from localstack.services.cloudformation.deployment_utils import generate_default_name
from localstack.services.cloudformation.service_models import GenericBaseModel
from localstack.utils.aws import aws_stack
class LogsLogGroup(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::Logs::LogGroup"
def get_cfn_attribute(self, attribute_name):
props = self.props
if attribute_name == "Arn":
return props.get("arn")
return super(LogsLogGroup, self).get_cfn_attribute(attribute_name)
def get_physical_resource_id(self, attribute=None, **kwargs):
if attribute == "Arn":
return self.get_cfn_attribute("Arn")
return self.props.get("LogGroupName")
def fetch_state(self, stack_name, resources):
group_name = self.props.get("LogGroupName")
group_name = self.resolve_refs_recursively(stack_name, group_name, resources)
logs = aws_stack.connect_to_service("logs")
groups = logs.describe_log_groups(logGroupNamePrefix=group_name)["logGroups"]
return ([g for g in groups if g["logGroupName"] == group_name] or [None])[0]
@staticmethod
def add_defaults(resource, stack_name: str):
role_name = resource.get("Properties", {}).get("LogGroupName")
if not role_name:
resource["Properties"]["LogGroupName"] = generate_default_name(
stack_name, resource["LogicalResourceId"]
)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_log_group",
"parameters": {"logGroupName": "LogGroupName"},
},
"delete": {
"function": "delete_log_group",
"parameters": {"logGroupName": "LogGroupName"},
},
}
class LogsSubscriptionFilter(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::Logs::SubscriptionFilter"
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("LogGroupName")
def fetch_state(self, stack_name, resources):
props = self.props
group_name = self.resolve_refs_recursively(stack_name, props.get("LogGroupName"), resources)
filter_pattern = self.resolve_refs_recursively(
stack_name, props.get("FilterPattern"), resources
)
logs = aws_stack.connect_to_service("logs")
groups = logs.describe_subscription_filters(logGroupName=group_name)["subscriptionFilters"]
groups = [g for g in groups if g.get("filterPattern") == filter_pattern]
return (groups or [None])[0]
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "put_subscription_filter",
"parameters": {
"logGroupName": "LogGroupName",
"filterName": "LogGroupName", # there can only be one filter associated with a log group
"filterPattern": "FilterPattern",
"destinationArn": "DestinationArn",
},
},
"delete": {
"function": "delete_subscription_filter",
"parameters": {
"logGroupName": "LogGroupName",
"filterName": "LogGroupName",
},
},
}
| 38.188889 | 109 | 0.614489 | from localstack.services.cloudformation.deployment_utils import generate_default_name
from localstack.services.cloudformation.service_models import GenericBaseModel
from localstack.utils.aws import aws_stack
class LogsLogGroup(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::Logs::LogGroup"
def get_cfn_attribute(self, attribute_name):
props = self.props
if attribute_name == "Arn":
return props.get("arn")
return super(LogsLogGroup, self).get_cfn_attribute(attribute_name)
def get_physical_resource_id(self, attribute=None, **kwargs):
if attribute == "Arn":
return self.get_cfn_attribute("Arn")
return self.props.get("LogGroupName")
def fetch_state(self, stack_name, resources):
group_name = self.props.get("LogGroupName")
group_name = self.resolve_refs_recursively(stack_name, group_name, resources)
logs = aws_stack.connect_to_service("logs")
groups = logs.describe_log_groups(logGroupNamePrefix=group_name)["logGroups"]
return ([g for g in groups if g["logGroupName"] == group_name] or [None])[0]
@staticmethod
def add_defaults(resource, stack_name: str):
role_name = resource.get("Properties", {}).get("LogGroupName")
if not role_name:
resource["Properties"]["LogGroupName"] = generate_default_name(
stack_name, resource["LogicalResourceId"]
)
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "create_log_group",
"parameters": {"logGroupName": "LogGroupName"},
},
"delete": {
"function": "delete_log_group",
"parameters": {"logGroupName": "LogGroupName"},
},
}
class LogsSubscriptionFilter(GenericBaseModel):
@staticmethod
def cloudformation_type():
return "AWS::Logs::SubscriptionFilter"
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get("LogGroupName")
def fetch_state(self, stack_name, resources):
props = self.props
group_name = self.resolve_refs_recursively(stack_name, props.get("LogGroupName"), resources)
filter_pattern = self.resolve_refs_recursively(
stack_name, props.get("FilterPattern"), resources
)
logs = aws_stack.connect_to_service("logs")
groups = logs.describe_subscription_filters(logGroupName=group_name)["subscriptionFilters"]
groups = [g for g in groups if g.get("filterPattern") == filter_pattern]
return (groups or [None])[0]
@staticmethod
def get_deploy_templates():
return {
"create": {
"function": "put_subscription_filter",
"parameters": {
"logGroupName": "LogGroupName",
"filterName": "LogGroupName",
"filterPattern": "FilterPattern",
"destinationArn": "DestinationArn",
},
},
"delete": {
"function": "delete_subscription_filter",
"parameters": {
"logGroupName": "LogGroupName",
"filterName": "LogGroupName",
},
},
}
| true | true |
1c325ab77c0bd73ccd79c55ec7eed5d45e95eb30 | 4,024 | py | Python | unit_tests/database/ArkDBMySQLTest.py | fangzhouwang/ArkLibPy | e14051eda859ba31887eeb501c27c9d7bf5865c8 | [
"MIT"
] | null | null | null | unit_tests/database/ArkDBMySQLTest.py | fangzhouwang/ArkLibPy | e14051eda859ba31887eeb501c27c9d7bf5865c8 | [
"MIT"
] | null | null | null | unit_tests/database/ArkDBMySQLTest.py | fangzhouwang/ArkLibPy | e14051eda859ba31887eeb501c27c9d7bf5865c8 | [
"MIT"
] | null | null | null | import unittest
from arklibpy.database.ArkDBMySQL import *
from os import path
class ArkDBMySQLTestCase(unittest.TestCase):
def setUp(self):
here = path.abspath(path.dirname(__file__))
self.db_ = ArkDBMySQL(db_config_file=path.join(here, 'db_config_local_tester.txt'))
self.table_ = "test_table"
def test_config_file(self):
DB_HOST = "localhost"
DB_USER = "tester"
DB_PASSWORD = "tester"
DB_SCHEMA = "gtest"
DB_PORT = 3306
self.db_ = ArkDBMySQL(host=DB_HOST, user=DB_USER, password=DB_PASSWORD, schema=DB_SCHEMA, port=DB_PORT)
self.table_ = "test_table"
self.db_.run_sql(f'DROP TABLE IF EXISTS {self.table_}')
self.db_.run_sql(f'CREATE TABLE {self.table_} (str_col VARCHAR(20), int_col INT PRIMARY KEY)')
def test_connection(self):
self.db_.run_sql(f'DROP TABLE IF EXISTS {self.table_}')
self.db_.run_sql(f'CREATE TABLE {self.table_} (str_col VARCHAR(20), int_col INT PRIMARY KEY)')
self.db_.set_table(self.table_)
temp = self.db_.get_query_value('int_col', 'SELECT int_col FROM test_table')
self.assertIsNone(temp)
rowid = self.db_.insert({"int_col": 1, "str_col": "test"})
self.assertEqual(rowid, 0) # rowid starts with 0
self.db_.insert({"int_col": 2, "str_col": "test"})
self.db_.update(2, 'int_col', {"int_col": 5, "str_col": "test"})
self.db_.delete(1, 'int_col')
temp = self.db_.get_query_value('int_col', 'SELECT int_col FROM test_table')
self.assertEqual(temp, 2)
def test_create_table(self):
table_desc = dict()
table_desc['table_name'] = 'WORK_LIB'
table_columns = list()
table_columns.append({'name': 'idCELL', 'type': 'INT', 'property': 'NOT NULL AUTO_INCREMENT'})
table_columns.append({'name': 'CELL_PMOS_CNT', 'type': 'INT', 'property': 'NOT NULL'})
table_columns.append({'name': 'CELL_NMOS_CNT', 'type': 'INT', 'property': 'NOT NULL'})
table_columns.append({'name': 'CELL_NETLIST', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_BSF', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_columns.append({'name': 'CELL_NOTE', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_FAMILY', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_Schematic', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_BSF_UNIFIED', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_columns.append({'name': 'CELL_LEVEL', 'type': 'INT', 'property': 'NULL'})
table_columns.append({'name': 'CELL_SIM_RESULT', 'type': 'INT', 'property': 'NULL'})
table_columns.append({'name': 'CELL_BSF_weak', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_columns.append({'name': 'CELL_BSF_weak_UNIFIED', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_desc['table_columns'] = table_columns
table_primary_keys = ['idCELL', 'CELL_PMOS_CNT']
table_desc['table_pks'] = table_primary_keys
self.db_.run_sql(f"DROP TABLE IF EXISTS {table_desc['table_name']}")
self.db_.create_table(table_desc)
self.assertTrue(self.db_.is_table_exist(table_desc['table_name']))
# Creating the same table twice should fail
self.assertFalse(self.db_.create_table(table_desc))
# Force creating the same table twice should success
self.assertTrue(self.db_.create_table(table_desc, True))
# Clean up the test
self.db_.run_sql(f"DROP TABLE {table_desc['table_name']}")
if __name__ == '__main__':
unittest.main()
| 49.073171 | 111 | 0.631213 | import unittest
from arklibpy.database.ArkDBMySQL import *
from os import path
class ArkDBMySQLTestCase(unittest.TestCase):
def setUp(self):
here = path.abspath(path.dirname(__file__))
self.db_ = ArkDBMySQL(db_config_file=path.join(here, 'db_config_local_tester.txt'))
self.table_ = "test_table"
def test_config_file(self):
DB_HOST = "localhost"
DB_USER = "tester"
DB_PASSWORD = "tester"
DB_SCHEMA = "gtest"
DB_PORT = 3306
self.db_ = ArkDBMySQL(host=DB_HOST, user=DB_USER, password=DB_PASSWORD, schema=DB_SCHEMA, port=DB_PORT)
self.table_ = "test_table"
self.db_.run_sql(f'DROP TABLE IF EXISTS {self.table_}')
self.db_.run_sql(f'CREATE TABLE {self.table_} (str_col VARCHAR(20), int_col INT PRIMARY KEY)')
def test_connection(self):
self.db_.run_sql(f'DROP TABLE IF EXISTS {self.table_}')
self.db_.run_sql(f'CREATE TABLE {self.table_} (str_col VARCHAR(20), int_col INT PRIMARY KEY)')
self.db_.set_table(self.table_)
temp = self.db_.get_query_value('int_col', 'SELECT int_col FROM test_table')
self.assertIsNone(temp)
rowid = self.db_.insert({"int_col": 1, "str_col": "test"})
self.assertEqual(rowid, 0)
self.db_.insert({"int_col": 2, "str_col": "test"})
self.db_.update(2, 'int_col', {"int_col": 5, "str_col": "test"})
self.db_.delete(1, 'int_col')
temp = self.db_.get_query_value('int_col', 'SELECT int_col FROM test_table')
self.assertEqual(temp, 2)
def test_create_table(self):
table_desc = dict()
table_desc['table_name'] = 'WORK_LIB'
table_columns = list()
table_columns.append({'name': 'idCELL', 'type': 'INT', 'property': 'NOT NULL AUTO_INCREMENT'})
table_columns.append({'name': 'CELL_PMOS_CNT', 'type': 'INT', 'property': 'NOT NULL'})
table_columns.append({'name': 'CELL_NMOS_CNT', 'type': 'INT', 'property': 'NOT NULL'})
table_columns.append({'name': 'CELL_NETLIST', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_BSF', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_columns.append({'name': 'CELL_NOTE', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_FAMILY', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_Schematic', 'type': 'VARCHAR(1000)', 'property': 'NULL'})
table_columns.append({'name': 'CELL_BSF_UNIFIED', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_columns.append({'name': 'CELL_LEVEL', 'type': 'INT', 'property': 'NULL'})
table_columns.append({'name': 'CELL_SIM_RESULT', 'type': 'INT', 'property': 'NULL'})
table_columns.append({'name': 'CELL_BSF_weak', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_columns.append({'name': 'CELL_BSF_weak_UNIFIED', 'type': 'VARCHAR(256)',
'property': "CHARACTER SET 'utf8' COLLATE 'utf8_bin' NULL"})
table_desc['table_columns'] = table_columns
table_primary_keys = ['idCELL', 'CELL_PMOS_CNT']
table_desc['table_pks'] = table_primary_keys
self.db_.run_sql(f"DROP TABLE IF EXISTS {table_desc['table_name']}")
self.db_.create_table(table_desc)
self.assertTrue(self.db_.is_table_exist(table_desc['table_name']))
self.assertFalse(self.db_.create_table(table_desc))
self.assertTrue(self.db_.create_table(table_desc, True))
self.db_.run_sql(f"DROP TABLE {table_desc['table_name']}")
if __name__ == '__main__':
unittest.main()
| true | true |
1c325b46e0e840265cc762067f4583887f37c485 | 993 | py | Python | DataStructures/RingBufferQueue.py | sshh12/SchoolCode | 2b16f0e15d325e01194ce0388bf87eb813ed43e8 | [
"MIT"
] | 5 | 2017-09-26T19:36:51.000Z | 2020-10-01T16:59:07.000Z | DataStructures/RingBufferQueue.py | sshh12/SchoolCode | 2b16f0e15d325e01194ce0388bf87eb813ed43e8 | [
"MIT"
] | 18 | 2017-07-16T20:26:50.000Z | 2018-10-05T20:15:24.000Z | DataStructures/RingBufferQueue.py | sshh12/SchoolCode | 2b16f0e15d325e01194ce0388bf87eb813ed43e8 | [
"MIT"
] | 1 | 2019-04-22T17:38:46.000Z | 2019-04-22T17:38:46.000Z |
class RingBufferQueue:
def __init__(self, size=10):
self.items = [None] * size
self.size = size
self.write_index = 0
self.used = 0
def add(self, item):
if self.used != self.size:
self.items[self.write_index] = item
self.write_index = (self.write_index + 1) % self.size
self.used += 1
return True
return False
def peek(self):
return self.items[(self.write_index + (self.size - self.used)) % self.size]
def remove(self):
if self.used > 0:
item = self.peek()
self.used -= 1
return item
return None
def main():
for rbq_class in [RingBufferQueue]:
queue = rbq_class()
queue.add("1")
queue.add("2")
print(queue.peek())
print(queue.remove())
queue.add("3")
print(queue.remove())
print("------")
if __name__ == '__main__':
main()
| 16.278689 | 83 | 0.507553 |
class RingBufferQueue:
def __init__(self, size=10):
self.items = [None] * size
self.size = size
self.write_index = 0
self.used = 0
def add(self, item):
if self.used != self.size:
self.items[self.write_index] = item
self.write_index = (self.write_index + 1) % self.size
self.used += 1
return True
return False
def peek(self):
return self.items[(self.write_index + (self.size - self.used)) % self.size]
def remove(self):
if self.used > 0:
item = self.peek()
self.used -= 1
return item
return None
def main():
for rbq_class in [RingBufferQueue]:
queue = rbq_class()
queue.add("1")
queue.add("2")
print(queue.peek())
print(queue.remove())
queue.add("3")
print(queue.remove())
print("------")
if __name__ == '__main__':
main()
| true | true |
1c325c3597200d10e7f99e53b52661105c29c73c | 382 | py | Python | setup.py | riamf/depsPlot | 0d08e407928301580fbed780b3b22d72222678ad | [
"MIT"
] | null | null | null | setup.py | riamf/depsPlot | 0d08e407928301580fbed780b3b22d72222678ad | [
"MIT"
] | null | null | null | setup.py | riamf/depsPlot | 0d08e407928301580fbed780b3b22d72222678ad | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='depsPlot',
version='0.1',
description='wip',
url='#',
author='riamf',
author_email='riamf2@gmail.com',
license='MIT',
py_modules=['depsPlot'],
install_requires=[
'Click'
],
entry_points='''
[console_scripts]
depsPlot=cli
''',
zip_safe=False)
| 20.105263 | 38 | 0.536649 | from setuptools import setup
setup(name='depsPlot',
version='0.1',
description='wip',
url='#',
author='riamf',
author_email='riamf2@gmail.com',
license='MIT',
py_modules=['depsPlot'],
install_requires=[
'Click'
],
entry_points='''
[console_scripts]
depsPlot=cli
''',
zip_safe=False)
| true | true |
1c325d919da9763ec1636137e18e475fa5bb6c66 | 2,562 | py | Python | blackswan/pythonanattribution.py | bird-house/blackswan | 5f1f20423874315f5e8eea2cf7302f9d0c05adae | [
"BSD-3-Clause"
] | null | null | null | blackswan/pythonanattribution.py | bird-house/blackswan | 5f1f20423874315f5e8eea2cf7302f9d0c05adae | [
"BSD-3-Clause"
] | 4 | 2018-05-07T16:47:47.000Z | 2019-01-21T13:07:17.000Z | blackswan/pythonanattribution.py | bird-house/blackswan | 5f1f20423874315f5e8eea2cf7302f9d0c05adae | [
"BSD-3-Clause"
] | 2 | 2018-07-10T12:58:55.000Z | 2021-02-13T01:10:52.000Z | # import logging
# LOGGER = logging.getLogger("PYWPS")
import pandas
import random
import numpy as np
def analogs_generator(anafile, yfile, nsim=20):
"""
Simulates nsim values of the variable y using analogues for all the dates present in the file anafile
:param anafile: path to a file with the results of the analogues
:param yfile: path to the file containing the data. The file should have two columns:
- the first with the date with the following for format yyyymmdd
- the second with the variable of interest y, columns are separated by spaces and are supposed to have headers
:param nsim: number of simulations of the variablle y to generate with the analogues
:retun str: simulated variable
"""
def weight_analogues(date):
dist = disttable.loc[[date], :].transpose()
date = anatable.loc[[date], :].transpose()
weights = pandas.concat([date.reset_index(drop=True), dist.reset_index(drop=True)], axis=1)
weights.columns = ['date', 'dist']
weights = weights.set_index('date')
return weights
def select_y_analogues(date):
bidx = ytable.index.isin(anatable.loc[date, :])
return ytable.iloc[bidx, 0]
def generate_cond_ymean(date, nsim=20):
weights = weight_analogues(date)
ys = select_y_analogues(date)
dat = pandas.concat([ys, weights], axis=1, join="inner")
weights = np.random.multinomial(nsim, dat.dist / sum(dat.dist))
return random.sample(list(np.repeat(dat.iloc[:, 0], weights)), nsim)
ytable = pandas.read_table(yfile, sep=" ", skipinitialspace=True)
anatable = pandas.read_table(anafile, sep=" ", skipinitialspace=True)
nanalogs = len([s for s in anatable.columns if "dis" in s])
disttable = anatable.iloc[:, [0] + list(range(nanalogs + 1, 2 * nanalogs + 1))].copy()
cortable = anatable.iloc[:, [0] + list(range(2 * nanalogs + 1, 3 * nanalogs + 1))].copy()
anatable = anatable.iloc[:, 0:(nanalogs + 1)].copy()
ytable = ytable.set_index('date')
disttable = disttable.set_index('date')
cortable = cortable.set_index('date')
anatable = anatable.set_index('date')
condys = list(map(generate_cond_ymean, anatable.index, np.repeat(nsim, len(anatable.index))))
condys = pandas.DataFrame(condys)
condys = condys.transpose()
# condys = [x.reset_index(drop=True) for x in condys]
# condys = pandas.concat(condys, axis = 1)
condys.columns = anatable.index
return condys
# condyms = condys.mean(axis=1)
# return condyms
| 42.7 | 115 | 0.673302 |
import pandas
import random
import numpy as np
def analogs_generator(anafile, yfile, nsim=20):
def weight_analogues(date):
dist = disttable.loc[[date], :].transpose()
date = anatable.loc[[date], :].transpose()
weights = pandas.concat([date.reset_index(drop=True), dist.reset_index(drop=True)], axis=1)
weights.columns = ['date', 'dist']
weights = weights.set_index('date')
return weights
def select_y_analogues(date):
bidx = ytable.index.isin(anatable.loc[date, :])
return ytable.iloc[bidx, 0]
def generate_cond_ymean(date, nsim=20):
weights = weight_analogues(date)
ys = select_y_analogues(date)
dat = pandas.concat([ys, weights], axis=1, join="inner")
weights = np.random.multinomial(nsim, dat.dist / sum(dat.dist))
return random.sample(list(np.repeat(dat.iloc[:, 0], weights)), nsim)
ytable = pandas.read_table(yfile, sep=" ", skipinitialspace=True)
anatable = pandas.read_table(anafile, sep=" ", skipinitialspace=True)
nanalogs = len([s for s in anatable.columns if "dis" in s])
disttable = anatable.iloc[:, [0] + list(range(nanalogs + 1, 2 * nanalogs + 1))].copy()
cortable = anatable.iloc[:, [0] + list(range(2 * nanalogs + 1, 3 * nanalogs + 1))].copy()
anatable = anatable.iloc[:, 0:(nanalogs + 1)].copy()
ytable = ytable.set_index('date')
disttable = disttable.set_index('date')
cortable = cortable.set_index('date')
anatable = anatable.set_index('date')
condys = list(map(generate_cond_ymean, anatable.index, np.repeat(nsim, len(anatable.index))))
condys = pandas.DataFrame(condys)
condys = condys.transpose()
condys.columns = anatable.index
return condys
| true | true |
1c325dbf3e312be52259971858d330435aa51739 | 314 | py | Python | DailyProgrammer/20120221B.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | 2 | 2020-12-23T18:59:22.000Z | 2021-04-14T13:16:09.000Z | DailyProgrammer/20120221B.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | DailyProgrammer/20120221B.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | """
Create a program that will take any string and write it out to a text file, reversed.
input: "hello!"
output: "!olleh"
"""
with open('20120221B.txt', 'w') as f:
f.write(input('Enter string: ')[::-1])
print('DONE')
# found better version
# open('20120221B.txt', 'w').write(input('Enter string: ')[::-1])
| 22.428571 | 85 | 0.640127 |
with open('20120221B.txt', 'w') as f:
f.write(input('Enter string: ')[::-1])
print('DONE')
| true | true |
1c325dcde86abd941f521a970dca3c0e0fb1358e | 407 | py | Python | python/gauss-quadrature.py | simnalamburt/snippets | 8ba4cfcb1305d2b82ea892e3305613eeb7ba382b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 31 | 2016-01-27T07:03:25.000Z | 2022-02-25T07:59:11.000Z | python/gauss-quadrature.py | simnalamburt/snippets | 8ba4cfcb1305d2b82ea892e3305613eeb7ba382b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2015-01-26T01:27:21.000Z | 2015-01-30T16:16:30.000Z | python/gauss-quadrature.py | simnalamburt/snippets | 8ba4cfcb1305d2b82ea892e3305613eeb7ba382b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 3 | 2017-02-07T04:17:56.000Z | 2020-06-12T05:01:31.000Z | from decimal import Decimal
from collections.abc import Callable
from math import exp
def gauss_quadrature_3(f: Callable[[Decimal], Decimal]) -> Decimal:
return (
f(Decimal('-0.77459_66692')) * 5/9 +
f(Decimal(0)) * 8/9 +
f(Decimal('+0.77459_66692')) * 5/9
)
print(gauss_quadrature_3(lambda t:2/(1 + (2*t + 3)**2)))
print()
print(gauss_quadrature_3(lambda t:2*exp(6*t+6)))
| 27.133333 | 67 | 0.646192 | from decimal import Decimal
from collections.abc import Callable
from math import exp
def gauss_quadrature_3(f: Callable[[Decimal], Decimal]) -> Decimal:
return (
f(Decimal('-0.77459_66692')) * 5/9 +
f(Decimal(0)) * 8/9 +
f(Decimal('+0.77459_66692')) * 5/9
)
print(gauss_quadrature_3(lambda t:2/(1 + (2*t + 3)**2)))
print()
print(gauss_quadrature_3(lambda t:2*exp(6*t+6)))
| true | true |
1c325e83840db2256a5d7fb7eccfe98a622b5b16 | 1,264 | py | Python | var/spack/repos/builtin/packages/tppred/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-11-28T10:14:14.000Z | 2019-11-28T10:14:14.000Z | var/spack/repos/builtin/packages/tppred/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/tppred/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2017-01-21T17:19:32.000Z | 2017-01-21T17:19:32.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Tppred(Package):
"""TPPRED is a software package for the prediction of mitochondrial
targeting peptides from protein primary sequence."""
homepage = "https://tppred2.biocomp.unibo.it/tppred2/default/software"
url = "http://biocomp.unibo.it/savojard/tppred2.tar.gz"
version('2.0', sha256='0e180d5ce1f0bccfdbc3dbf9981b3fbe2101c85491c58c58c88856861688a4f5')
depends_on('python@2.7:2.999', type='run')
depends_on('py-scikit-learn@0.13.1', type='run')
depends_on('emboss')
def url_for_version(self, version):
url = 'http://biocomp.unibo.it/savojard/tppred{0}.tar.gz'
return url.format(version.up_to(1))
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir('bin'):
install('tppred2.py', prefix.bin)
install_tree('data', prefix.data)
install_tree('example', prefix.example)
install_tree('tppred2modules', prefix.modules)
def setup_run_environment(self, env):
env.set('TPPRED_ROOT', self.prefix)
| 35.111111 | 93 | 0.692247 |
from spack import *
class Tppred(Package):
homepage = "https://tppred2.biocomp.unibo.it/tppred2/default/software"
url = "http://biocomp.unibo.it/savojard/tppred2.tar.gz"
version('2.0', sha256='0e180d5ce1f0bccfdbc3dbf9981b3fbe2101c85491c58c58c88856861688a4f5')
depends_on('python@2.7:2.999', type='run')
depends_on('py-scikit-learn@0.13.1', type='run')
depends_on('emboss')
def url_for_version(self, version):
url = 'http://biocomp.unibo.it/savojard/tppred{0}.tar.gz'
return url.format(version.up_to(1))
def install(self, spec, prefix):
mkdirp(prefix.bin)
with working_dir('bin'):
install('tppred2.py', prefix.bin)
install_tree('data', prefix.data)
install_tree('example', prefix.example)
install_tree('tppred2modules', prefix.modules)
def setup_run_environment(self, env):
env.set('TPPRED_ROOT', self.prefix)
| true | true |
1c325fed87fbaa63dee2d08b90932038e5391f92 | 473 | py | Python | 9.11/tor/model_lstm_crf/config.py | JIAQING-XIE/Google_NLP_DL | 45f45e8cbca695ad079af58790edd0619783b0c2 | [
"MIT"
] | null | null | null | 9.11/tor/model_lstm_crf/config.py | JIAQING-XIE/Google_NLP_DL | 45f45e8cbca695ad079af58790edd0619783b0c2 | [
"MIT"
] | null | null | null | 9.11/tor/model_lstm_crf/config.py | JIAQING-XIE/Google_NLP_DL | 45f45e8cbca695ad079af58790edd0619783b0c2 | [
"MIT"
] | null | null | null | batch_size = 8
learning_rate = 0.001
decay_rate = 0.05
seed = 1314159
np_seed = 7894
embedding_size = 300
classfy_number = 13
dropout = 0.5
lstm_hidding_dim = 50
max_length = 83
labels_dict = {
'O':0,'B-POS':1,'I-POS':2,'E-POS':3,'S-POS':4,'B-NEG':5,'I-NEG':6,'E-NEG':7,'S-NEG':8,'B-NEU':9,
'I-NEU':10,'E-NEU':11,'S-NEU':12,'BEGIN':13,'END':14}
sentiments_dict = {1:'POS',2:'POS',3:'POS',4:'POS',5:'NEG',6:'NEG',7:'NEG',8:'NEG',9:'NEU',10:'NEU',11:'NEU',12:'NEU'}
| 29.5625 | 118 | 0.610994 | batch_size = 8
learning_rate = 0.001
decay_rate = 0.05
seed = 1314159
np_seed = 7894
embedding_size = 300
classfy_number = 13
dropout = 0.5
lstm_hidding_dim = 50
max_length = 83
labels_dict = {
'O':0,'B-POS':1,'I-POS':2,'E-POS':3,'S-POS':4,'B-NEG':5,'I-NEG':6,'E-NEG':7,'S-NEG':8,'B-NEU':9,
'I-NEU':10,'E-NEU':11,'S-NEU':12,'BEGIN':13,'END':14}
sentiments_dict = {1:'POS',2:'POS',3:'POS',4:'POS',5:'NEG',6:'NEG',7:'NEG',8:'NEG',9:'NEU',10:'NEU',11:'NEU',12:'NEU'}
| true | true |
1c32608743780b5486438bd3700e3575a16a1d5a | 8,789 | py | Python | kohtaaminen/kohtaaminen.py | sthagen/kohtaaminen | ce0784ccd8be109164d63f2b5dcea128bd6f4534 | [
"MIT"
] | 1 | 2021-11-13T10:57:55.000Z | 2021-11-13T10:57:55.000Z | kohtaaminen/kohtaaminen.py | sthagen/kohtaaminen | ce0784ccd8be109164d63f2b5dcea128bd6f4534 | [
"MIT"
] | 4 | 2021-11-14T15:12:06.000Z | 2021-11-30T13:54:47.000Z | kohtaaminen/kohtaaminen.py | sthagen/kohtaaminen | ce0784ccd8be109164d63f2b5dcea128bd6f4534 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=expression-not-assigned,line-too-long
"""Meeting, rendezvous, confluence (Finnish kohtaaminen) mark up, down, and up again. API."""
import itertools
import os
import pathlib
import re
import shutil
import sys
import tempfile
import zipfile
from typing import List, Optional, Tuple, Union
import mdformat
import pypandoc # type: ignore
DEBUG_VAR = 'KOHTAAMINEN_DEBUG'
DEBUG = os.getenv(DEBUG_VAR)
ENCODING = 'utf-8'
ENCODING_ERRORS_POLICY = 'ignore'
STDIN, STDOUT = 'STDIN', 'STDOUT'
DISPATCH = {
STDIN: sys.stdin,
STDOUT: sys.stdout,
}
MD_ROOT = pathlib.Path('kohtaaminen-md')
def verify_request(argv: Optional[List[str]]) -> Tuple[int, str, List[str]]:
"""Fail with grace."""
if not argv or len(argv) != 2:
return 2, 'received wrong number of arguments', ['']
command, inp = argv
if command not in ('translate'):
return 2, 'received unknown command', ['']
if inp:
in_path = pathlib.Path(str(inp))
if not in_path.is_file():
return 1, f'source ({in_path}) is no file', ['']
if not ''.join(in_path.suffixes).lower().endswith('.zip'):
return 1, 'source has not .zip extension', ['']
return 0, '', argv
def filter_index(data: List[str]) -> str:
"""WIP."""
less_noisy_lines = ['# Index']
noise_end_token, noise_end_seen = '## Available Pages:', False
for line in data:
if noise_end_seen:
less_noisy_lines.append(line)
else:
noise_end_seen = line.startswith(noise_end_token)
text = '\n'.join(
line for line in itertools.takewhile(lambda x: 'Document generated by ' not in x, less_noisy_lines)
)
text = re.sub(r'\n\s*\n', '\n\n', text) # squeeze space-only and blank lines
text = text.lstrip('\n') # no leading new lines
text = text.rstrip('\n') + '\n' # single new line at end of file
return text
def filter_leaf(data: List[str], assets: List[str]) -> str:
"""WIP."""
lines = [line for line in data if 'Created by <span class="author"> ' not in line]
for ndx, line in enumerate(lines):
if '# <span id="title-text">' in line:
prefix, title_plus = line.split('# <span id="title-text">', 1)
title = title_plus.strip().rstrip('</span>').strip()
lines[ndx] = f'{prefix}# {title}'
elif line.startswith('``` syntaxhighlighter-pre'):
lines[ndx] = '```'
elif '"><img src="' in line:
later_head, image_tail = line.split('"><img src="', 1)
later = f'{later_head}>\n'
src, rest = image_tail.split('" ', 1)
if src not in assets:
assets.append(src)
try:
_, height_plus = rest.split('data-height="', 1)
height, width_plus = height_plus.split('" data-width="', 1)
width, _ = width_plus.split('" ', 1)
except ValueError as err:
print(' ... note: survived image parsing with crash, using defaults. details:', err)
height, width, center = '42', '42', False
center = 'image-center' in line
span_tail = line.endswith('</span>')
attributes = f'width:{width}, height:{height}, center:{"true" if center else "false"}'
image = f'{{{attributes}}}'
lines[ndx] = later + image
if span_tail:
lines[ndx] += '\n</span>'
resplit = []
for line in lines:
if '\n' not in line:
resplit.append(line)
else:
for lin in line.split('\n'):
resplit.append(lin)
lines = [
line
for line in resplit
if not line.startswith('<span') and not line.startswith('class="') and line.strip() != '</span>'
]
lines = [line for line in itertools.takewhile(lambda x: 'Document generated by ' not in x, lines)]
text = '\n'.join(line for line in itertools.takewhile(lambda x: not x.startswith('## Attachments:'), lines))
text = re.sub(r'\n\s*\n', '\n\n', text) # squeeze space-only and blank lines
text = text.lstrip('\n') # no leading new lines
text = text.rstrip('\n') + '\n' # single new line at end of file
return text
def main(argv: Union[List[str], None] = None) -> int:
"""Drive the translation."""
error, message, strings = verify_request(argv)
if error:
print(message, file=sys.stderr)
return error
command, inp = strings
if not zipfile.is_zipfile(inp):
print('wrong magic number in zipfile')
return 1
tasks = []
with zipfile.ZipFile(inp, 'r') as zipper:
alerts = []
print(f'analyzing zip file listing of ({inp})')
for name in zipper.namelist():
if not name[0].isidentifier() or '..' in name:
alerts.append(f'suspicious entry ({name}) will be skipped')
if alerts:
print(f'found {len(alerts)} suspicious entries in zip file ({inp}):')
for alert in alerts:
print(f'- {alert}')
# return 1
asset_source_root = ''
assets: List[str] = []
with tempfile.TemporaryDirectory() as unpack:
print(f'unpacking zip file below ({unpack})')
zipper.extractall(path=unpack)
print(f'traversing folder ({unpack})')
for place in sorted(pathlib.Path(unpack).glob('**')):
print(f'* {place}')
for thing in sorted(place.iterdir()):
if thing.is_dir():
if not asset_source_root and thing.name == 'attachments':
asset_source_root = str(thing.parent)
continue
if thing.suffixes[-1] == '.html':
tasks.append(thing)
print(f' - {thing}')
out_root = MD_ROOT
print(f'translating html tree from ({inp if inp else STDIN}) into markdown tree below {out_root}')
start = None
for task in tasks:
if task.name == 'index.html':
start = task
break
for task in tasks:
marker = ' *' if task == start else ''
print(f'- {task}{marker}')
if not start:
print('did not find start target')
return 1
index_path = out_root / 'index.md'
index_path.parent.mkdir(parents=True, exist_ok=True)
output = pypandoc.convert_file(str(start), 'gfm', outputfile=str(index_path))
assert output == ''
with open(index_path, 'rt', encoding=ENCODING) as handle:
text = filter_index(
[line.rstrip() for line in handle.readlines() if '</div>' not in line and '<div ' not in line]
)
with open(index_path, 'wt', encoding=ENCODING) as handle:
handle.write(text)
for task in tasks:
if task == start:
continue
task_path = out_root / task.name.replace('html', 'md')
output = pypandoc.convert_file(str(task), 'gfm', outputfile=str(task_path))
assert output == ''
with open(task_path, 'rt', encoding=ENCODING) as handle:
text = filter_leaf(
[line.rstrip() for line in handle.readlines() if '</div>' not in line and '<div ' not in line],
assets,
)
with open(task_path, 'wt', encoding=ENCODING) as handle:
handle.write(text + '\n')
# Push the media assets (so the md format does not remove the links)
if assets:
nr_assets = len(assets)
print(f'imported {nr_assets} distinct asset{"" if nr_assets == 1 else "s"}:')
for asset in assets:
print(f'- {asset}')
asset_source = pathlib.Path(asset_source_root) / asset
asset_path = out_root / asset
asset_path.parent.mkdir(parents=True, exist_ok=True)
try:
shutil.copyfile(asset_source, asset_path)
except FileNotFoundError as err:
print(' ... note: survived wrongly parsed file source path on shutil copy. details:', err)
# Format the markdown
for task in tasks:
task_path = out_root / task.name.replace('html', 'md')
mdformat.file(task_path, options={'number': True, 'wrap': 142})
print(f'markdown tree is below ({out_root})')
return 0
| 38.718062 | 119 | 0.544203 |
import itertools
import os
import pathlib
import re
import shutil
import sys
import tempfile
import zipfile
from typing import List, Optional, Tuple, Union
import mdformat
import pypandoc
DEBUG_VAR = 'KOHTAAMINEN_DEBUG'
DEBUG = os.getenv(DEBUG_VAR)
ENCODING = 'utf-8'
ENCODING_ERRORS_POLICY = 'ignore'
STDIN, STDOUT = 'STDIN', 'STDOUT'
DISPATCH = {
STDIN: sys.stdin,
STDOUT: sys.stdout,
}
MD_ROOT = pathlib.Path('kohtaaminen-md')
def verify_request(argv: Optional[List[str]]) -> Tuple[int, str, List[str]]:
if not argv or len(argv) != 2:
return 2, 'received wrong number of arguments', ['']
command, inp = argv
if command not in ('translate'):
return 2, 'received unknown command', ['']
if inp:
in_path = pathlib.Path(str(inp))
if not in_path.is_file():
return 1, f'source ({in_path}) is no file', ['']
if not ''.join(in_path.suffixes).lower().endswith('.zip'):
return 1, 'source has not .zip extension', ['']
return 0, '', argv
def filter_index(data: List[str]) -> str:
less_noisy_lines = ['# Index']
noise_end_token, noise_end_seen = '## Available Pages:', False
for line in data:
if noise_end_seen:
less_noisy_lines.append(line)
else:
noise_end_seen = line.startswith(noise_end_token)
text = '\n'.join(
line for line in itertools.takewhile(lambda x: 'Document generated by ' not in x, less_noisy_lines)
)
text = re.sub(r'\n\s*\n', '\n\n', text)
text = text.lstrip('\n')
text = text.rstrip('\n') + '\n'
return text
def filter_leaf(data: List[str], assets: List[str]) -> str:
lines = [line for line in data if 'Created by <span class="author"> ' not in line]
for ndx, line in enumerate(lines):
if '# <span id="title-text">' in line:
prefix, title_plus = line.split('# <span id="title-text">', 1)
title = title_plus.strip().rstrip('</span>').strip()
lines[ndx] = f'{prefix}# {title}'
elif line.startswith('``` syntaxhighlighter-pre'):
lines[ndx] = '```'
elif '"><img src="' in line:
later_head, image_tail = line.split('"><img src="', 1)
later = f'{later_head}>\n'
src, rest = image_tail.split('" ', 1)
if src not in assets:
assets.append(src)
try:
_, height_plus = rest.split('data-height="', 1)
height, width_plus = height_plus.split('" data-width="', 1)
width, _ = width_plus.split('" ', 1)
except ValueError as err:
print(' ... note: survived image parsing with crash, using defaults. details:', err)
height, width, center = '42', '42', False
center = 'image-center' in line
span_tail = line.endswith('</span>')
attributes = f'width:{width}, height:{height}, center:{"true" if center else "false"}'
image = f'{{{attributes}}}'
lines[ndx] = later + image
if span_tail:
lines[ndx] += '\n</span>'
resplit = []
for line in lines:
if '\n' not in line:
resplit.append(line)
else:
for lin in line.split('\n'):
resplit.append(lin)
lines = [
line
for line in resplit
if not line.startswith('<span') and not line.startswith('class="') and line.strip() != '</span>'
]
lines = [line for line in itertools.takewhile(lambda x: 'Document generated by ' not in x, lines)]
text = '\n'.join(line for line in itertools.takewhile(lambda x: not x.startswith('## Attachments:'), lines))
text = re.sub(r'\n\s*\n', '\n\n', text)
text = text.lstrip('\n')
text = text.rstrip('\n') + '\n'
return text
def main(argv: Union[List[str], None] = None) -> int:
error, message, strings = verify_request(argv)
if error:
print(message, file=sys.stderr)
return error
command, inp = strings
if not zipfile.is_zipfile(inp):
print('wrong magic number in zipfile')
return 1
tasks = []
with zipfile.ZipFile(inp, 'r') as zipper:
alerts = []
print(f'analyzing zip file listing of ({inp})')
for name in zipper.namelist():
if not name[0].isidentifier() or '..' in name:
alerts.append(f'suspicious entry ({name}) will be skipped')
if alerts:
print(f'found {len(alerts)} suspicious entries in zip file ({inp}):')
for alert in alerts:
print(f'- {alert}')
asset_source_root = ''
assets: List[str] = []
with tempfile.TemporaryDirectory() as unpack:
print(f'unpacking zip file below ({unpack})')
zipper.extractall(path=unpack)
print(f'traversing folder ({unpack})')
for place in sorted(pathlib.Path(unpack).glob('**')):
print(f'* {place}')
for thing in sorted(place.iterdir()):
if thing.is_dir():
if not asset_source_root and thing.name == 'attachments':
asset_source_root = str(thing.parent)
continue
if thing.suffixes[-1] == '.html':
tasks.append(thing)
print(f' - {thing}')
out_root = MD_ROOT
print(f'translating html tree from ({inp if inp else STDIN}) into markdown tree below {out_root}')
start = None
for task in tasks:
if task.name == 'index.html':
start = task
break
for task in tasks:
marker = ' *' if task == start else ''
print(f'- {task}{marker}')
if not start:
print('did not find start target')
return 1
index_path = out_root / 'index.md'
index_path.parent.mkdir(parents=True, exist_ok=True)
output = pypandoc.convert_file(str(start), 'gfm', outputfile=str(index_path))
assert output == ''
with open(index_path, 'rt', encoding=ENCODING) as handle:
text = filter_index(
[line.rstrip() for line in handle.readlines() if '</div>' not in line and '<div ' not in line]
)
with open(index_path, 'wt', encoding=ENCODING) as handle:
handle.write(text)
for task in tasks:
if task == start:
continue
task_path = out_root / task.name.replace('html', 'md')
output = pypandoc.convert_file(str(task), 'gfm', outputfile=str(task_path))
assert output == ''
with open(task_path, 'rt', encoding=ENCODING) as handle:
text = filter_leaf(
[line.rstrip() for line in handle.readlines() if '</div>' not in line and '<div ' not in line],
assets,
)
with open(task_path, 'wt', encoding=ENCODING) as handle:
handle.write(text + '\n')
if assets:
nr_assets = len(assets)
print(f'imported {nr_assets} distinct asset{"" if nr_assets == 1 else "s"}:')
for asset in assets:
print(f'- {asset}')
asset_source = pathlib.Path(asset_source_root) / asset
asset_path = out_root / asset
asset_path.parent.mkdir(parents=True, exist_ok=True)
try:
shutil.copyfile(asset_source, asset_path)
except FileNotFoundError as err:
print(' ... note: survived wrongly parsed file source path on shutil copy. details:', err)
for task in tasks:
task_path = out_root / task.name.replace('html', 'md')
mdformat.file(task_path, options={'number': True, 'wrap': 142})
print(f'markdown tree is below ({out_root})')
return 0
| true | true |
1c3260c0e2eecca41e5e9561c9a6cc46d9880888 | 397 | py | Python | app/Images_DRF/wsgi.py | MateuszZalewski/images_drf | 8e4b3012ef6bce290d013db9c2318d1badb99b17 | [
"MIT"
] | null | null | null | app/Images_DRF/wsgi.py | MateuszZalewski/images_drf | 8e4b3012ef6bce290d013db9c2318d1badb99b17 | [
"MIT"
] | null | null | null | app/Images_DRF/wsgi.py | MateuszZalewski/images_drf | 8e4b3012ef6bce290d013db9c2318d1badb99b17 | [
"MIT"
] | null | null | null | """
WSGI config for Images_DRF project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Images_DRF.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Images_DRF.settings')
application = get_wsgi_application()
| true | true |
1c3260f34fa9e0aad04e8d0bc77a99f301bb1242 | 933 | py | Python | sentsplit/regexes.py | zaemyung/sentsplit | cce34e1ed372b6a79c739f42334c775581fc0de8 | [
"MIT"
] | 10 | 2021-03-26T19:34:45.000Z | 2021-11-23T17:36:45.000Z | sentsplit/regexes.py | zaemyung/sentsplit | cce34e1ed372b6a79c739f42334c775581fc0de8 | [
"MIT"
] | 10 | 2021-03-26T17:27:49.000Z | 2021-11-16T17:09:19.000Z | sentsplit/regexes.py | zaemyung/sentsplit | cce34e1ed372b6a79c739f42334c775581fc0de8 | [
"MIT"
] | null | null | null | """
`segment_regexes`: make sure a string is segmented at either the start or end of the matching group(s)
"""
newline = {
'name': 'newline',
'regex': r'\n',
'at': 'end'
}
ellipsis = {
'name': 'ellipsis',
'regex': r'…(?![\!\?\..?!])',
'at': 'end'
}
after_semicolon = {
'name': 'after_semicolon',
'regex': r' *;',
'at': 'end'
}
"""
`prevent_regexes`: make sure a string is not segmented at characters that fall within the matching group(s)
"""
liberal_url = {
# ref. https://gist.github.com/gruber/249502#gistcomment-1328838
'name': 'liberal_url',
'regex': r'\b((?:[a-z][\w\-]+:(?:\/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}\/)(?:[^\s()<>]|\((?:[^\s()<>]|(?:\([^\s()<>]+\)))*\))+(?:\((?:[^\s()<>]|(?:\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))'
}
period_followed_by_lowercase = {
'name': 'period_followed_by_lowercase',
'regex': r'\.(?= *[a-z])'
}
| 25.916667 | 225 | 0.487674 | newline = {
'name': 'newline',
'regex': r'\n',
'at': 'end'
}
ellipsis = {
'name': 'ellipsis',
'regex': r'…(?![\!\?\..?!])',
'at': 'end'
}
after_semicolon = {
'name': 'after_semicolon',
'regex': r' *;',
'at': 'end'
}
liberal_url = {
l_url',
'regex': r'\b((?:[a-z][\w\-]+:(?:\/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}\/)(?:[^\s()<>]|\((?:[^\s()<>]|(?:\([^\s()<>]+\)))*\))+(?:\((?:[^\s()<>]|(?:\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))'
}
period_followed_by_lowercase = {
'name': 'period_followed_by_lowercase',
'regex': r'\.(?= *[a-z])'
}
| true | true |
1c3261fa9f8dfffa8f5f181f97cefb2281e6fd79 | 13,511 | py | Python | al_utils/vae_sampling.py | PrateekMunjal/TorchAL | ec60b093333c66e4c8862d128d81680060fddf36 | [
"MIT"
] | null | null | null | al_utils/vae_sampling.py | PrateekMunjal/TorchAL | ec60b093333c66e4c8862d128d81680060fddf36 | [
"MIT"
] | null | null | null | al_utils/vae_sampling.py | PrateekMunjal/TorchAL | ec60b093333c66e4c8862d128d81680060fddf36 | [
"MIT"
] | null | null | null | import torch
import os
import math
import numpy as np
from copy import deepcopy
from pycls.core.config import cfg
import pycls.utils.distributed as du
from tqdm import tqdm
class AdversarySampler:
def __init__(self, budget):
self.budget = budget
self.cuda_id = torch.cuda.current_device()
def compute_dists(self, X, X_train):
dists = (
-2 * np.dot(X, X_train.T)
+ np.sum(X_train**2, axis=1)
+ np.sum(X**2, axis=1)[:, np.newaxis]
)
return dists
def greedy_k_center(self, labeled, unlabeled):
greedy_indices = []
# get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues):
min_dist = np.min(
self.compute_dists(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled),
axis=0,
)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
temp_range = 1000
for j in range(1, labeled.shape[0], temp_range):
if j + temp_range < labeled.shape[0]:
dist = self.compute_dists(labeled[j : j + temp_range, :], unlabeled)
else:
# for last iteration only :)
dist = self.compute_dists(labeled[j:, :], unlabeled)
# dist = pairwise_distances(labeled[j:, :], unlabeled,metric='euclidean')
min_dist = np.vstack(
(min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1])))
)
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
# iteratively insert the farthest index and recalculate the minimum distances:
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
amount = cfg.ACTIVE_LEARNING.BUDGET_SIZE - 1
for i in range(amount):
if i is not 0 and i % 500 == 0:
print("{} Sampled out of {}".format(i, amount + 1))
# dist = pairwise_distances(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled, metric='euclidean')
dist = self.compute_dists(
unlabeled[greedy_indices[-1], :].reshape((1, unlabeled.shape[1])),
unlabeled,
)
min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
remainSet = set(np.arange(unlabeled.shape[0])) - set(greedy_indices)
remainSet = np.array(list(remainSet))
return greedy_indices, remainSet
def get_vae_activations(self, vae, dataLoader):
acts = []
vae.eval()
temp_max_iter = len(dataLoader)
print("len(dataloader): {}".format(temp_max_iter))
temp_iter = 0
for x, y in dataLoader:
x = x.type(torch.cuda.FloatTensor)
x = x.cuda(self.cuda_id)
_, _, mu, _ = vae(x)
acts.append(mu.cpu().numpy())
if temp_iter % 100 == 0:
print(f"Iteration [{temp_iter}/{temp_max_iter}] Done!!")
temp_iter += 1
acts = np.concatenate(acts, axis=0)
return acts
def get_predictions(self, vae, discriminator, data, cuda):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
discriminator.training == False
), "Expected discriminator model to be in eval mode"
temp_idx = 0
for images, _ in data:
if cuda:
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds = discriminator(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
all_preds = all_preds.cpu().numpy()
return all_preds
def gpu_compute_dists(self, M1, M2):
"""
Computes L2 norm square on gpu
Assume
M1: M x D matrix
M2: N x D matrix
output: M x N matrix
"""
# print(f"Function call to gpu_compute dists; M1: {M1.shape} and M2: {M2.shape}")
M1_norm = (M1**2).sum(1).reshape(-1, 1)
M2_t = torch.transpose(M2, 0, 1)
M2_norm = (M2**2).sum(1).reshape(1, -1)
dists = M1_norm + M2_norm - 2.0 * torch.mm(M1, M2_t)
return dists
def efficient_compute_dists(self, labeled, unlabeled):
""" """
N_L = labeled.shape[0]
N_U = unlabeled.shape[0]
dist_matrix = None
temp_range = 1000
unlabeled = torch.from_numpy(unlabeled).cuda(self.cuda_id)
temp_dist_matrix = np.empty((N_U, temp_range))
# for i in range(0, N_L, temp_range):
for i in tqdm(range(0, N_L, temp_range), desc="Computing Distance Matrix"):
end_index = i + temp_range if i + temp_range < N_L else N_L
temp_labeled = labeled[i:end_index, :]
temp_labeled = torch.from_numpy(temp_labeled).cuda(self.cuda_id)
temp_dist_matrix = self.gpu_compute_dists(unlabeled, temp_labeled)
temp_dist_matrix = torch.min(temp_dist_matrix, dim=1)[0]
temp_dist_matrix = torch.reshape(
temp_dist_matrix, (temp_dist_matrix.shape[0], 1)
)
if dist_matrix is None:
dist_matrix = temp_dist_matrix
else:
dist_matrix = torch.cat((dist_matrix, temp_dist_matrix), dim=1)
dist_matrix = torch.min(dist_matrix, dim=1)[0]
dist_matrix = torch.reshape(dist_matrix, (dist_matrix.shape[0], 1))
return dist_matrix.cpu().numpy()
@torch.no_grad()
def vae_sample_for_labeling(
self, vae, uSet, lSet, unlabeled_dataloader, lSetLoader
):
vae.eval()
print("Computing activattions for uset....")
u_scores = self.get_vae_activations(vae, unlabeled_dataloader)
print("Computing activattions for lset....")
l_scores = self.get_vae_activations(vae, lSetLoader)
print("l_scores.shape: ", l_scores.shape)
print("u_scores.shape: ", u_scores.shape)
# dist_matrix = self.compute_dists(u_scores, l_scores)
dist_matrix = self.efficient_compute_dists(l_scores, u_scores)
print("Dist_matrix.shape: ", dist_matrix.shape)
min_scores = np.min(dist_matrix, axis=1)
sorted_idx = np.argsort(min_scores)[::-1]
activeSet = uSet[sorted_idx[0 : self.budget]]
remainSet = uSet[sorted_idx[self.budget :]]
return activeSet, remainSet
def sample_vaal_plus(self, vae, disc_task, data, cuda):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
disc_task.training == False
), "Expected disc_task model to be in eval mode"
temp_idx = 0
for images, _ in data:
if cuda:
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds, _ = disc_task(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
# need to multiply by -1 to be able to use torch.topk
all_preds *= -1
# select the points which the discriminator things are the most likely to be unlabeled
_, querry_indices = torch.topk(all_preds, int(self.budget))
querry_indices = querry_indices.numpy()
remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))
assert len(remain_indices) + len(querry_indices) == len(
all_indices
), " Indices are overlapped between activeSet and uSet"
activeSet = all_indices[querry_indices]
uSet = all_indices[remain_indices]
return activeSet, uSet
def sample(self, vae, discriminator, data, uSet, cfg):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
discriminator.training == False
), "Expected discriminator model to be in eval mode"
temp_idx = 0
for images, _ in tqdm(data, desc="Constructing VAE ActiveSet"):
images = images.type(torch.cuda.FloatTensor)
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds = discriminator(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
scores_save_path = cfg.OUT_DIR
os.makedirs(scores_save_path, exist_ok=True) # just to be safe
with open(os.path.join(scores_save_path, "actualScores.txt"), "w") as fpw:
for temp_idx, temp_rank in zip(uSet, all_preds):
fpw.write(f"{temp_idx}\t{temp_rank:.6f}\n")
fpw.close()
# need to multiply by -1 to be able to use torch.topk
all_preds *= -1
# select the points which the discriminator things are the most likely to be unlabeled
_, querry_indices = torch.topk(all_preds, int(self.budget))
querry_indices = querry_indices.numpy()
remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))
assert len(remain_indices) + len(querry_indices) == len(
all_indices
), " Indices are overlapped between activeSet and uSet"
activeSet = all_indices[querry_indices]
uSet = all_indices[remain_indices]
return activeSet, uSet
# def sample_for_labeling(self, cfg, uSetPath, lSetPath, dataObj, noAugDataset):
# """
# Picks samples from uSet to form activeSet.
# INPUT
# ------
# vae: object of model VAE
# discriminator: object of model discriminator
# unlabeled_dataloader: Sequential dataloader iterating over uSet
# uSet: Collection of unlabelled datapoints
# NOTE: Please pass the unlabelled dataloader as sequential dataloader else the
# results won't be appropriate.
# OUTPUT
# -------
# Returns activeSet, [remaining]uSet
# """
# current_device = torch.cuda.current_device()
# #Load vae -- out_dir/vae.pyth
# vae_dir = os.path.join(cfg.OUT_DIR, "vae/vae.pyth")
# #Load disc -- out_dir/disc.pyth
# disc_dir = os.path.join(cfg.OUT_DIR, "disc/disc.pyth")
# #Get uSet form uSetPath
# uSet = np.load(uSetPath, allow_pickle=True)
# #Get uSetLoader from uSet
# uSetLoader = dataObj.getSequentialDataLoader(indexes=uSet,batch_size=int(cfg.TRAIN.BATCH_SIZE/cfg.NUM_GPUS),\
# data=noAugDataset)
# #load vae from vae_dir
# vae_checkpoint = None#load from vae_dir
# vae = torch.load(vae_checkpoint['model'], map_location='cpu')
# vae.cuda(current_device)
# #load disc from disc_dir
# disc_checkpoint = None
# disc = torch.load(disc_checkpoint['model'], map_location='cpu')
# disc.cuda(current_device)
# sampler = AdversarySampler(cfg.ACTIVE_LEARNING.BUDGET_SIZE)
# activeSet, remainSet = sampler.sample(vae, disc, uSetLoader)
# activeSet = uSet[activeSet]
# remainSet = uSet[remainSet]
# return activeSet, remainSet
@torch.no_grad()
def sample_for_labeling(self, vae, discriminator, unlabeled_dataloader, uSet, cfg):
"""
Picks samples from uSet to form activeSet.
INPUT
------
vae: object of model VAE
discriminator: object of model discriminator
unlabeled_dataloader: Sequential dataloader iterating over uSet
uSet: Collection of unlabelled datapoints
NOTE: Please pass the unlabelled dataloader as sequential dataloader else the
results won't be appropriate.
OUTPUT
-------
Returns activeSet, [remaining]uSet
"""
print("Sampling....")
activeSet, remainSet = self.sample(
vae,
discriminator,
unlabeled_dataloader,
uSet,
cfg,
)
activeSet = uSet[activeSet]
remainSet = uSet[remainSet]
return activeSet, remainSet
# def vaal_sampling(self, cfg, uSetPath, lSetPath, dataObj, noAugDataset):
# lSet = np.load(lSetPath, allow_pickle=True)
# uSet = np.load(uSetPath, allow_pickle=True)
# activeSet, remainSet = self.sample_for_labeling(cfg, uSetPath, lSetPath, dataObj, noAugDataset)
# lSet = np.append(lSet, activeSet)
# uSet = remainSet
# #save all sets
# np.save(os.path.join(cfg.OUT_DIR, "lSet.npy"), lSet)
# np.save(os.path.join(cfg.OUT_DIR, "uSet.npy"), uSet)
# np.save(os.path.join(cfg.OUT_DIR, "activeSet.npy"), activeSet)
| 35.36911 | 136 | 0.595589 | import torch
import os
import math
import numpy as np
from copy import deepcopy
from pycls.core.config import cfg
import pycls.utils.distributed as du
from tqdm import tqdm
class AdversarySampler:
def __init__(self, budget):
self.budget = budget
self.cuda_id = torch.cuda.current_device()
def compute_dists(self, X, X_train):
dists = (
-2 * np.dot(X, X_train.T)
+ np.sum(X_train**2, axis=1)
+ np.sum(X**2, axis=1)[:, np.newaxis]
)
return dists
def greedy_k_center(self, labeled, unlabeled):
greedy_indices = []
min_dist = np.min(
self.compute_dists(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled),
axis=0,
)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
temp_range = 1000
for j in range(1, labeled.shape[0], temp_range):
if j + temp_range < labeled.shape[0]:
dist = self.compute_dists(labeled[j : j + temp_range, :], unlabeled)
else:
dist = self.compute_dists(labeled[j:, :], unlabeled)
min_dist = np.vstack(
(min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1])))
)
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
amount = cfg.ACTIVE_LEARNING.BUDGET_SIZE - 1
for i in range(amount):
if i is not 0 and i % 500 == 0:
print("{} Sampled out of {}".format(i, amount + 1))
dist = self.compute_dists(
unlabeled[greedy_indices[-1], :].reshape((1, unlabeled.shape[1])),
unlabeled,
)
min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))
min_dist = np.min(min_dist, axis=0)
min_dist = min_dist.reshape((1, min_dist.shape[0]))
farthest = np.argmax(min_dist)
greedy_indices.append(farthest)
remainSet = set(np.arange(unlabeled.shape[0])) - set(greedy_indices)
remainSet = np.array(list(remainSet))
return greedy_indices, remainSet
def get_vae_activations(self, vae, dataLoader):
acts = []
vae.eval()
temp_max_iter = len(dataLoader)
print("len(dataloader): {}".format(temp_max_iter))
temp_iter = 0
for x, y in dataLoader:
x = x.type(torch.cuda.FloatTensor)
x = x.cuda(self.cuda_id)
_, _, mu, _ = vae(x)
acts.append(mu.cpu().numpy())
if temp_iter % 100 == 0:
print(f"Iteration [{temp_iter}/{temp_max_iter}] Done!!")
temp_iter += 1
acts = np.concatenate(acts, axis=0)
return acts
def get_predictions(self, vae, discriminator, data, cuda):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
discriminator.training == False
), "Expected discriminator model to be in eval mode"
temp_idx = 0
for images, _ in data:
if cuda:
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds = discriminator(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
all_preds = all_preds.cpu().numpy()
return all_preds
def gpu_compute_dists(self, M1, M2):
M1_norm = (M1**2).sum(1).reshape(-1, 1)
M2_t = torch.transpose(M2, 0, 1)
M2_norm = (M2**2).sum(1).reshape(1, -1)
dists = M1_norm + M2_norm - 2.0 * torch.mm(M1, M2_t)
return dists
def efficient_compute_dists(self, labeled, unlabeled):
N_L = labeled.shape[0]
N_U = unlabeled.shape[0]
dist_matrix = None
temp_range = 1000
unlabeled = torch.from_numpy(unlabeled).cuda(self.cuda_id)
temp_dist_matrix = np.empty((N_U, temp_range))
for i in tqdm(range(0, N_L, temp_range), desc="Computing Distance Matrix"):
end_index = i + temp_range if i + temp_range < N_L else N_L
temp_labeled = labeled[i:end_index, :]
temp_labeled = torch.from_numpy(temp_labeled).cuda(self.cuda_id)
temp_dist_matrix = self.gpu_compute_dists(unlabeled, temp_labeled)
temp_dist_matrix = torch.min(temp_dist_matrix, dim=1)[0]
temp_dist_matrix = torch.reshape(
temp_dist_matrix, (temp_dist_matrix.shape[0], 1)
)
if dist_matrix is None:
dist_matrix = temp_dist_matrix
else:
dist_matrix = torch.cat((dist_matrix, temp_dist_matrix), dim=1)
dist_matrix = torch.min(dist_matrix, dim=1)[0]
dist_matrix = torch.reshape(dist_matrix, (dist_matrix.shape[0], 1))
return dist_matrix.cpu().numpy()
@torch.no_grad()
def vae_sample_for_labeling(
self, vae, uSet, lSet, unlabeled_dataloader, lSetLoader
):
vae.eval()
print("Computing activattions for uset....")
u_scores = self.get_vae_activations(vae, unlabeled_dataloader)
print("Computing activattions for lset....")
l_scores = self.get_vae_activations(vae, lSetLoader)
print("l_scores.shape: ", l_scores.shape)
print("u_scores.shape: ", u_scores.shape)
dist_matrix = self.efficient_compute_dists(l_scores, u_scores)
print("Dist_matrix.shape: ", dist_matrix.shape)
min_scores = np.min(dist_matrix, axis=1)
sorted_idx = np.argsort(min_scores)[::-1]
activeSet = uSet[sorted_idx[0 : self.budget]]
remainSet = uSet[sorted_idx[self.budget :]]
return activeSet, remainSet
def sample_vaal_plus(self, vae, disc_task, data, cuda):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
disc_task.training == False
), "Expected disc_task model to be in eval mode"
temp_idx = 0
for images, _ in data:
if cuda:
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds, _ = disc_task(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
all_preds *= -1
_, querry_indices = torch.topk(all_preds, int(self.budget))
querry_indices = querry_indices.numpy()
remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))
assert len(remain_indices) + len(querry_indices) == len(
all_indices
), " Indices are overlapped between activeSet and uSet"
activeSet = all_indices[querry_indices]
uSet = all_indices[remain_indices]
return activeSet, uSet
def sample(self, vae, discriminator, data, uSet, cfg):
all_preds = []
all_indices = []
assert vae.training == False, "Expected vae model to be in eval mode"
assert (
discriminator.training == False
), "Expected discriminator model to be in eval mode"
temp_idx = 0
for images, _ in tqdm(data, desc="Constructing VAE ActiveSet"):
images = images.type(torch.cuda.FloatTensor)
images = images.cuda()
with torch.no_grad():
_, _, mu, _ = vae(images)
preds = discriminator(mu)
preds = preds.cpu().data
all_preds.extend(preds)
temp_idx += images.shape[0]
all_indices = np.arange(temp_idx)
all_preds = torch.stack(all_preds)
all_preds = all_preds.view(-1)
scores_save_path = cfg.OUT_DIR
os.makedirs(scores_save_path, exist_ok=True)
with open(os.path.join(scores_save_path, "actualScores.txt"), "w") as fpw:
for temp_idx, temp_rank in zip(uSet, all_preds):
fpw.write(f"{temp_idx}\t{temp_rank:.6f}\n")
fpw.close()
all_preds *= -1
_, querry_indices = torch.topk(all_preds, int(self.budget))
querry_indices = querry_indices.numpy()
remain_indices = np.asarray(list(set(all_indices) - set(querry_indices)))
assert len(remain_indices) + len(querry_indices) == len(
all_indices
), " Indices are overlapped between activeSet and uSet"
activeSet = all_indices[querry_indices]
uSet = all_indices[remain_indices]
return activeSet, uSet
# Picks samples from uSet to form activeSet.
# INPUT
# ------
# vae: object of model VAE
# discriminator: object of model discriminator
# unlabeled_dataloader: Sequential dataloader iterating over uSet
# uSet: Collection of unlabelled datapoints
# NOTE: Please pass the unlabelled dataloader as sequential dataloader else the
# results won't be appropriate.
# OUTPUT
# -------
# Returns activeSet, [remaining]uSet
# """
# current_device = torch.cuda.current_device()
# #Load vae -- out_dir/vae.pyth
# vae_dir = os.path.join(cfg.OUT_DIR, "vae/vae.pyth")
# #Load disc -- out_dir/disc.pyth
# disc_dir = os.path.join(cfg.OUT_DIR, "disc/disc.pyth")
# #Get uSet form uSetPath
# uSet = np.load(uSetPath, allow_pickle=True)
# #Get uSetLoader from uSet
# uSetLoader = dataObj.getSequentialDataLoader(indexes=uSet,batch_size=int(cfg.TRAIN.BATCH_SIZE/cfg.NUM_GPUS),\
# data=noAugDataset)
# #load vae from vae_dir
# vae_checkpoint = None#load from vae_dir
# vae = torch.load(vae_checkpoint['model'], map_location='cpu')
# vae.cuda(current_device)
# #load disc from disc_dir
# disc_checkpoint = None
# disc = torch.load(disc_checkpoint['model'], map_location='cpu')
# disc.cuda(current_device)
# sampler = AdversarySampler(cfg.ACTIVE_LEARNING.BUDGET_SIZE)
# activeSet, remainSet = sampler.sample(vae, disc, uSetLoader)
# activeSet = uSet[activeSet]
# remainSet = uSet[remainSet]
# return activeSet, remainSet
@torch.no_grad()
def sample_for_labeling(self, vae, discriminator, unlabeled_dataloader, uSet, cfg):
print("Sampling....")
activeSet, remainSet = self.sample(
vae,
discriminator,
unlabeled_dataloader,
uSet,
cfg,
)
activeSet = uSet[activeSet]
remainSet = uSet[remainSet]
return activeSet, remainSet
# def vaal_sampling(self, cfg, uSetPath, lSetPath, dataObj, noAugDataset):
# lSet = np.load(lSetPath, allow_pickle=True)
# uSet = np.load(uSetPath, allow_pickle=True)
# activeSet, remainSet = self.sample_for_labeling(cfg, uSetPath, lSetPath, dataObj, noAugDataset)
# lSet = np.append(lSet, activeSet)
# uSet = remainSet
# #save all sets
# np.save(os.path.join(cfg.OUT_DIR, "lSet.npy"), lSet)
# np.save(os.path.join(cfg.OUT_DIR, "uSet.npy"), uSet)
# np.save(os.path.join(cfg.OUT_DIR, "activeSet.npy"), activeSet)
| true | true |
1c326417f077d789ca1b7eb5a188712ad78f5c31 | 1,986 | py | Python | src/metabase_manager/registry.py | chasleslr/metabase-manager | 091d2146d08850f2020a3c8b1d23f8deb5dce888 | [
"MIT"
] | null | null | null | src/metabase_manager/registry.py | chasleslr/metabase-manager | 091d2146d08850f2020a3c8b1d23f8deb5dce888 | [
"MIT"
] | 19 | 2022-03-05T02:03:45.000Z | 2022-03-21T03:02:06.000Z | src/metabase_manager/registry.py | chasleslr/metabase-manager | 091d2146d08850f2020a3c8b1d23f8deb5dce888 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional, Type
import metabase
from metabase import (
Database,
Field,
Metabase,
Metric,
PermissionGroup,
Segment,
Table,
User,
)
from metabase.resource import Resource
from metabase_manager.exceptions import DuplicateKeyError
@dataclass
class MetabaseRegistry:
client: Metabase
databases: List[Database] = field(default_factory=list)
tables: List[Table] = field(default_factory=list)
users: List[User] = field(default_factory=list)
groups: List[PermissionGroup] = field(default_factory=list)
fields: List[Field] = field(default_factory=list)
metrics: List[Metric] = field(default_factory=list)
segments: List[Segment] = field(default_factory=list)
_REGISTRY = {
"groups": PermissionGroup,
"users": User,
}
@classmethod
def get_registry_keys(cls) -> List[str]:
return list(cls._REGISTRY.keys())
def cache(self, select: List[str] = None, exclude: List[str] = None):
if not select:
select = self.get_registry_keys()
if exclude is None:
exclude = {}
for key in set(select).difference(set(exclude)):
# call .list() method on objects in self._MAPPING for every
# key in `select` not in `exclude, and set attribute
setattr(self, key, self._REGISTRY[key].list(using=self.client))
def get_instances_for_object(self, obj: Type[Resource]) -> List[Resource]:
if obj == User:
return self.users
if obj == PermissionGroup:
return self.groups
def get_group_by_name(self, name: str) -> Optional[metabase.PermissionGroup]:
groups = list(filter(lambda g: g.name == name, self.groups))
if len(groups) > 1:
raise DuplicateKeyError(
f"Found more than one group with the same name: {name}"
)
return next(iter(groups), None)
| 29.205882 | 81 | 0.646526 | from dataclasses import dataclass, field
from typing import List, Optional, Type
import metabase
from metabase import (
Database,
Field,
Metabase,
Metric,
PermissionGroup,
Segment,
Table,
User,
)
from metabase.resource import Resource
from metabase_manager.exceptions import DuplicateKeyError
@dataclass
class MetabaseRegistry:
client: Metabase
databases: List[Database] = field(default_factory=list)
tables: List[Table] = field(default_factory=list)
users: List[User] = field(default_factory=list)
groups: List[PermissionGroup] = field(default_factory=list)
fields: List[Field] = field(default_factory=list)
metrics: List[Metric] = field(default_factory=list)
segments: List[Segment] = field(default_factory=list)
_REGISTRY = {
"groups": PermissionGroup,
"users": User,
}
@classmethod
def get_registry_keys(cls) -> List[str]:
return list(cls._REGISTRY.keys())
def cache(self, select: List[str] = None, exclude: List[str] = None):
if not select:
select = self.get_registry_keys()
if exclude is None:
exclude = {}
for key in set(select).difference(set(exclude)):
setattr(self, key, self._REGISTRY[key].list(using=self.client))
def get_instances_for_object(self, obj: Type[Resource]) -> List[Resource]:
if obj == User:
return self.users
if obj == PermissionGroup:
return self.groups
def get_group_by_name(self, name: str) -> Optional[metabase.PermissionGroup]:
groups = list(filter(lambda g: g.name == name, self.groups))
if len(groups) > 1:
raise DuplicateKeyError(
f"Found more than one group with the same name: {name}"
)
return next(iter(groups), None)
| true | true |
1c3264d05fa3133a8547727b7c7c279a53e36aba | 219 | py | Python | backend/views/meta.py | TeamAutoMod/AutoMod | 5934881c7bb71ba17c8e40e11c6fd2bcfad7ae50 | [
"MIT"
] | 13 | 2021-09-29T13:08:23.000Z | 2022-03-21T20:59:27.000Z | backend/views/meta.py | TeamAutoMod/AutoMod | 5934881c7bb71ba17c8e40e11c6fd2bcfad7ae50 | [
"MIT"
] | null | null | null | backend/views/meta.py | TeamAutoMod/AutoMod | 5934881c7bb71ba17c8e40e11c6fd2bcfad7ae50 | [
"MIT"
] | 7 | 2021-09-26T19:10:47.000Z | 2022-03-26T11:27:11.000Z | import discord
from discord.ui import View
from .buttons import DeleteBtn
class DeleteView(View):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_item(DeleteBtn()) | 19.909091 | 41 | 0.684932 | import discord
from discord.ui import View
from .buttons import DeleteBtn
class DeleteView(View):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_item(DeleteBtn()) | true | true |
1c32653de6c0dc6659fc5d9ed3f6ce46cdd2ed42 | 2,067 | py | Python | data/test/python/1c32653de6c0dc6659fc5d9ed3f6ce46cdd2ed42market_v4.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/test/python/1c32653de6c0dc6659fc5d9ed3f6ce46cdd2ed42market_v4.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/test/python/1c32653de6c0dc6659fc5d9ed3f6ce46cdd2ed42market_v4.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | from __future__ import print_function
from multiprocessing import Process
import zmq
import itertools
from lib import *
import Queue
context = zmq.Context()
from traders import Trader
from brokers import JobQueueBroker
from workers import AuthWorker, DBWorker
from time import sleep
AddressManager.register_endpoint('db_frontend', 'tcp', 'localhost', 5560)
AddressManager.register_endpoint('db_backend', 'tcp', 'localhost', 5561)
AddressManager.register_endpoint('market_frontend', 'tcp', 'localhost', 5562)
AddressManager.register_endpoint('market_backend', 'tcp', 'localhost', 5563)
def run_auth_cluster(n_workers, verbose = False):
# market_broker = JobQueueBroker('market_gateway', {'frontend':'market_frontend', 'backend':'market_backend'}, verbose = verbose)
# market_broker.start()
for i in xrange(n_workers):
# AuthWorker('auth_worker', {'frontend':'db_frontend', 'backend':'db_backend'}, verbose = verbose).start()
DBWorker('auth_worker', {'frontend':'market_backend'}, verbose = verbose).start()
# AuthWorker('auth_worker', {'frontend':'db_frontend'}, verbose = verbose).start()
# for i in xrange(n_workers): Auth('authenticator', {'frontend':'market_backend', 'backend':'db_frontend'}, verbose = verbose).start()
def run_db_cluster(n_workers, verbose = False):
db_broker = JobQueueBroker('db_broker', {'frontend':'db_frontend', 'backend':'db_backend'}, verbose = verbose)
db_broker.start()
# for i in xrange(n_workers): DBWorker('db_worker', {'frontend': 'db_backend'}, verbose = verbose).start()
# def run_auction_cluster(n_workers, verbose = False):
# auction_broker = JobQueueBroker('db_broker', 'db_frontend', 'db_backend', verbose = verbose)
# db_broker.start()
# for i in xrange(n_workers): Auction('auction', '', None, verbose = verbose).start()
if __name__ == '__main__':
# Trader(name = 'trader', endpoints = {'backend': 'market_frontend'}, verbose = True).start()
# TeztAgent('trader', {'backend' : 'market_frontend'}, verbose = True).start()
run_auth_cluster(1, True)
# run_db_cluster(1, False)
| 31.8 | 134 | 0.734398 | from __future__ import print_function
from multiprocessing import Process
import zmq
import itertools
from lib import *
import Queue
context = zmq.Context()
from traders import Trader
from brokers import JobQueueBroker
from workers import AuthWorker, DBWorker
from time import sleep
AddressManager.register_endpoint('db_frontend', 'tcp', 'localhost', 5560)
AddressManager.register_endpoint('db_backend', 'tcp', 'localhost', 5561)
AddressManager.register_endpoint('market_frontend', 'tcp', 'localhost', 5562)
AddressManager.register_endpoint('market_backend', 'tcp', 'localhost', 5563)
def run_auth_cluster(n_workers, verbose = False):
for i in xrange(n_workers):
DBWorker('auth_worker', {'frontend':'market_backend'}, verbose = verbose).start()
def run_db_cluster(n_workers, verbose = False):
db_broker = JobQueueBroker('db_broker', {'frontend':'db_frontend', 'backend':'db_backend'}, verbose = verbose)
db_broker.start()
if __name__ == '__main__':
run_auth_cluster(1, True)
| true | true |
1c32658d8f2697e42b6d3d48d7586f3159a5887a | 117 | py | Python | Part 1/Chapter 10/exercise_10.11a.py | kg55555/pypractice | 1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef | [
"MIT"
] | null | null | null | Part 1/Chapter 10/exercise_10.11a.py | kg55555/pypractice | 1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef | [
"MIT"
] | null | null | null | Part 1/Chapter 10/exercise_10.11a.py | kg55555/pypractice | 1867f001b3d2a7174ea00d7b9e2fa22e9f1877ef | [
"MIT"
] | null | null | null | import json
fav = input("What is your favourite number?\n")
with open('r_fav', 'w') as fn:
json.dump(fav, fn)
| 14.625 | 47 | 0.641026 | import json
fav = input("What is your favourite number?\n")
with open('r_fav', 'w') as fn:
json.dump(fav, fn)
| true | true |
1c326597414a341be5f886a62626944c3c0b78dc | 1,879 | py | Python | apolloMusicRipper/run.py | ebber/apolloRipper | 863789229639036c5d4bc0d021b9cef0c8c41167 | [
"MIT"
] | null | null | null | apolloMusicRipper/run.py | ebber/apolloRipper | 863789229639036c5d4bc0d021b9cef0c8c41167 | [
"MIT"
] | null | null | null | apolloMusicRipper/run.py | ebber/apolloRipper | 863789229639036c5d4bc0d021b9cef0c8c41167 | [
"MIT"
] | null | null | null | import os
import config
from ripper.ytRipper import Ripper
from model.song import Song
from tagging.tagger import Tagger
import logging
tagger = Tagger()
ripper = Ripper()
notDone = True
logger = logging.getLogger("mainLoopLogger")
#goes through all songs in the directory, calls tagFile
def tagDir(dir=config.DIR_UNTAGGED):
logger.info("tagging songs in "+dir)
for filename in os.listdir(dir):
if filename.endswith(".mp3"):
song = Song(dir, filename)
if tagger.tag_song(song) == -1:
song.save(config.DIR_FAILED_TAG)
else: #song was succesfully tagged move it to completed songs
song.save(config.DIR_TAGGED)
#really this should spawn a child process
def tag_continous(dir = config.DIR_UNTAGGED):
logger.info("Tagging songs contiously in "+dir)
files = os.listdir(dir)
while(notDone):
new_files = os.listdir(dir)
for fname in new_files:
if fname not in files:
song = Song(dir, filename=fname)
pass
def rip_playlist(playlist_url):
logger.info("ripping playlist " + playlist_url)
ripper.rip(playlist_url)
def print_prompt():
help_text = "help\n"
help_text += " tag <directory> - tag a directory \n"
help_text += " rip <playlist url> - rip a song or playlist from youtube \n"
help_text += " q - quit"
print(help_text)
while True:
input_cmd = raw_input("What do you want to do?")
print(input_cmd)
cmd= input_cmd.rstrip().split(" ")
if '?' == cmd[0]:
print_prompt()
elif "tag" == cmd[0]:
tagDir(config.DIR_UNTAGGED)
elif "rip" == cmd[0]:
if len(cmd) == 1:
print("seperate the command and playlist with a space")
else:
rip_playlist(cmd[1])
elif 'q' == cmd[0]:
break
else:
print_prompt()
| 27.632353 | 79 | 0.625865 | import os
import config
from ripper.ytRipper import Ripper
from model.song import Song
from tagging.tagger import Tagger
import logging
tagger = Tagger()
ripper = Ripper()
notDone = True
logger = logging.getLogger("mainLoopLogger")
def tagDir(dir=config.DIR_UNTAGGED):
logger.info("tagging songs in "+dir)
for filename in os.listdir(dir):
if filename.endswith(".mp3"):
song = Song(dir, filename)
if tagger.tag_song(song) == -1:
song.save(config.DIR_FAILED_TAG)
else:
song.save(config.DIR_TAGGED)
def tag_continous(dir = config.DIR_UNTAGGED):
logger.info("Tagging songs contiously in "+dir)
files = os.listdir(dir)
while(notDone):
new_files = os.listdir(dir)
for fname in new_files:
if fname not in files:
song = Song(dir, filename=fname)
pass
def rip_playlist(playlist_url):
logger.info("ripping playlist " + playlist_url)
ripper.rip(playlist_url)
def print_prompt():
help_text = "help\n"
help_text += " tag <directory> - tag a directory \n"
help_text += " rip <playlist url> - rip a song or playlist from youtube \n"
help_text += " q - quit"
print(help_text)
while True:
input_cmd = raw_input("What do you want to do?")
print(input_cmd)
cmd= input_cmd.rstrip().split(" ")
if '?' == cmd[0]:
print_prompt()
elif "tag" == cmd[0]:
tagDir(config.DIR_UNTAGGED)
elif "rip" == cmd[0]:
if len(cmd) == 1:
print("seperate the command and playlist with a space")
else:
rip_playlist(cmd[1])
elif 'q' == cmd[0]:
break
else:
print_prompt()
| true | true |
1c3265c5e63e7d32d1a24668a6009ee611fd3470 | 1,086 | py | Python | similarity/discursos_separados_em_json.py | Oguiraw/Me-diz-quem-tu-es | 66d2d79f789f112f70f81d38fed57e3a9c87be86 | [
"MIT"
] | null | null | null | similarity/discursos_separados_em_json.py | Oguiraw/Me-diz-quem-tu-es | 66d2d79f789f112f70f81d38fed57e3a9c87be86 | [
"MIT"
] | null | null | null | similarity/discursos_separados_em_json.py | Oguiraw/Me-diz-quem-tu-es | 66d2d79f789f112f70f81d38fed57e3a9c87be86 | [
"MIT"
] | 1 | 2018-12-19T16:21:55.000Z | 2018-12-19T16:21:55.000Z | import gensim, nltk, re, json, os
from gensim import corpora,similarities, models
from nltk.tokenize import word_tokenize
from datetime import date,datetime
import csv
import base64
from sklearn.cluster import k_means
import numpy as np
csv.field_size_limit(100000000)
def open_file(name_file,data):
with open(name_file,'r') as f:
reader = csv.reader(f,delimiter=';')
lista_por_arquivo = list(reader)
lista_por_arquivo.pop(0)
data += lista_por_arquivo
data = []
open_file('discursos_sep.csv',data)
BASEDIR = os.getcwd()
dados_dict = {}
for i in range(len(data)):
try:
if(int(data[i][0]) > 0):
if data[i][1] in dados_dict:
dados_dict[data[i][1]].append([data[i][4],data[i][5]])
else:
dados_dict[data[i][1]] = []
dados_dict[data[i][1]].append([data[i][4],data[i][5]])
except:
pass
for dic in dados_dict:
with open("discursos/"+dic+".json", 'w',encoding='utf8') as outfile:
json.dump({dic: dados_dict[dic]},outfile, ensure_ascii=False)
| 27.15 | 72 | 0.631676 | import gensim, nltk, re, json, os
from gensim import corpora,similarities, models
from nltk.tokenize import word_tokenize
from datetime import date,datetime
import csv
import base64
from sklearn.cluster import k_means
import numpy as np
csv.field_size_limit(100000000)
def open_file(name_file,data):
with open(name_file,'r') as f:
reader = csv.reader(f,delimiter=';')
lista_por_arquivo = list(reader)
lista_por_arquivo.pop(0)
data += lista_por_arquivo
data = []
open_file('discursos_sep.csv',data)
BASEDIR = os.getcwd()
dados_dict = {}
for i in range(len(data)):
try:
if(int(data[i][0]) > 0):
if data[i][1] in dados_dict:
dados_dict[data[i][1]].append([data[i][4],data[i][5]])
else:
dados_dict[data[i][1]] = []
dados_dict[data[i][1]].append([data[i][4],data[i][5]])
except:
pass
for dic in dados_dict:
with open("discursos/"+dic+".json", 'w',encoding='utf8') as outfile:
json.dump({dic: dados_dict[dic]},outfile, ensure_ascii=False)
| true | true |
1c3266a4c989d03da9f5adcb2e5b18de48ce8374 | 5,878 | py | Python | ManageOrdersS3.py | jorgeMorfinezM/AwsS3FileManager | 443800c58371391b8c127ed538c21766b2315eac | [
"Apache-2.0"
] | 1 | 2020-08-13T21:28:19.000Z | 2020-08-13T21:28:19.000Z | ManageOrdersS3.py | jorgeMorfinezM/AwsS3FileManager | 443800c58371391b8c127ed538c21766b2315eac | [
"Apache-2.0"
] | null | null | null | ManageOrdersS3.py | jorgeMorfinezM/AwsS3FileManager | 443800c58371391b8c127ed538c21766b2315eac | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Requires Python 3.6 or later
"""
__author__ = "Jorge Morfinez Mojica (jorge.morfinez.m@gmail.com)"
__copyright__ = "Copyright 2020, Jorge Morfinez Mojica"
__license__ = ""
__history__ = """ Se conecta, valida y copia
documentos/archivos a un
Bucket de AWS S3 a partir
de un directorio local."""
__version__ = "1.20.H07.1.1.0 ($Rev: 3 $)"
from constants.constants import Constants as Const
import fnmatch
import boto3
from botocore.exceptions import ClientError
from ftplib import FTP_TLS
import argparse
from logger_controller.logger_control import *
import time
import os
logger = configure_logger()
# Conecta a AWS S3 para descargar y leer cada archivo XML
def connect_aws_s3():
cfg = get_config_constant_file()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
s3_access_key = cfg['BUCKET_AWS_S3']['ACCESS_KEY']
s3_secret_key = cfg['BUCKET_AWS_S3']['SECRET_KEY']
bucketname = bucket_s3_name
s3 = boto3.resource('s3', aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key)
bucket_pedidos = s3.Bucket(bucketname)
# s3.Object(bucketname, pedido).upload_file(Filename=pedido)
return bucket_pedidos
# Contiene el codigo para conectar Bucket AWS de S3
# y subir el archivo:
def copy_file_to_aws_s3(pedido, file_path):
cfg = get_config_constant_file()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
s3_access_key = cfg['BUCKET_AWS_S3']['ACCESS_KEY']
s3_secret_key = cfg['BUCKET_AWS_S3']['SECRET_KEY']
bucketname = bucket_s3_name
logger.info('Bucket S3 to Upload file: %s', bucketname)
logger.info('File to upload: %s', pedido)
s3 = boto3.resource('s3', aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key)
bucket_pedidos = s3.Bucket(bucketname)
# s3.Bucket(bucketname).upload_file(pedido, pedido)
try:
# response = s3.Object(bucketname, pedido).upload_file(Filename=pedido)
# response = s3.meta.client.upload_file(file_path, bucketname, pedido)
response = s3.Object(bucketname, pedido).upload_file(file_path)
except ClientError as e:
logging.error(e)
logging.error(response)
return False
return True
# Valida si existe un archivo en el bucket:
def validate_file_exists_s3(pedido_order):
file_s3_exists = False
bucket_pedidos = connect_aws_s3()
logger.info('File to validate in S3: %s', str(pedido_order))
for pedido in bucket_pedidos.objects.all():
order_name = pedido.key
logger.info('File into S3 Bucket: %s', str(order_name))
if str(pedido_order) in str(order_name):
file_s3_exists = True
else:
file_s3_exists = False
return file_s3_exists
# Elimina archivo de raiz del bucket S3:
def delete_order_from_s3_root(order_to_delete):
cfg = get_config_constant_file()
bucket_pedidos = connect_aws_s3()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
# se usa un pattern para filtrar solo archivos con una extension particular
pattern = cfg['EXT_ORDERS_TV']
bucketname = bucket_s3_name
if fnmatch.fnmatch(order_to_delete, pattern):
source_s3_order_path = '/' + order_to_delete
logger.info('Elimina el: %s', 'Archivo: {} de: {}'.format(str(order_to_delete),
str(source_s3_order_path)))
bucket_pedidos.Object(bucketname, order_to_delete).delete()
def parse_xml_files_in_bucket():
cfg = get_config_constant_file()
remote_path = cfg['PATH_REMOTE_BUCKET']
local_temp_path = cfg['PATH_LOCAL']
pattern = cfg['EXT_ORDERS_TV']
pedidos = os.listdir(local_temp_path)
for pedido in pedidos:
# Validate pattern of all entries that are files
if fnmatch.fnmatch(pedido, pattern):
file_remote = remote_path + '/' + pedido
file_local = local_temp_path + '/' + pedido
pedido_s3_exists = validate_file_exists_s3(pedido)
logger.info('File Exists in S3: %s',
'File: {0} ¿exists?: {1}'.format(pedido,
pedido_s3_exists))
if pedido_s3_exists is False:
logger.info('Server File >>> ' + file_remote + ' : ' + file_local + ' <<< Local File')
copy_file_to_aws_s3(pedido, file_local)
# If file exists, delete it from local path #
if os.path.isfile(file_local):
os.remove(file_local)
logger.info('Local File Pedido was deleted: %s', str(file_local))
else: # Show an error #
logger.error("Error: %s file not found" % file_local)
else:
logger.info('File: %s', '{0} already exists in Bucket S3!'.format(pedido))
# Define y obtiene el configurador para las constantes del sistema:
def get_config_constant_file():
"""Contiene la obtencion del objeto config
para setear datos de constantes en archivo
configurador
:rtype: object
"""
# TEST
_constants_file = "constants/constants.yml"
# PROD
# _constants_file = 'constants/constants.yml'
cfg = Const.get_constants_file(_constants_file)
return cfg
def main():
pass
parser = argparse.ArgumentParser()
parser.add_argument('--file_type', required=False, type=str,
help="Parametro Tipo de archivos")
args = parser.parse_args()
order_type = args.order_type
# To this system, can contain an argument
# from executing system and parse to the
# principal function
logger.info('ORDER_TYPE ARG: %s', str(order_type))
parse_xml_files_in_bucket()
if __name__ == "__main__":
pass
main()
| 27.85782 | 102 | 0.652943 |
__author__ = "Jorge Morfinez Mojica (jorge.morfinez.m@gmail.com)"
__copyright__ = "Copyright 2020, Jorge Morfinez Mojica"
__license__ = ""
__history__ = """ Se conecta, valida y copia
documentos/archivos a un
Bucket de AWS S3 a partir
de un directorio local."""
__version__ = "1.20.H07.1.1.0 ($Rev: 3 $)"
from constants.constants import Constants as Const
import fnmatch
import boto3
from botocore.exceptions import ClientError
from ftplib import FTP_TLS
import argparse
from logger_controller.logger_control import *
import time
import os
logger = configure_logger()
def connect_aws_s3():
cfg = get_config_constant_file()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
s3_access_key = cfg['BUCKET_AWS_S3']['ACCESS_KEY']
s3_secret_key = cfg['BUCKET_AWS_S3']['SECRET_KEY']
bucketname = bucket_s3_name
s3 = boto3.resource('s3', aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key)
bucket_pedidos = s3.Bucket(bucketname)
return bucket_pedidos
def copy_file_to_aws_s3(pedido, file_path):
cfg = get_config_constant_file()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
s3_access_key = cfg['BUCKET_AWS_S3']['ACCESS_KEY']
s3_secret_key = cfg['BUCKET_AWS_S3']['SECRET_KEY']
bucketname = bucket_s3_name
logger.info('Bucket S3 to Upload file: %s', bucketname)
logger.info('File to upload: %s', pedido)
s3 = boto3.resource('s3', aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key)
bucket_pedidos = s3.Bucket(bucketname)
try:
response = s3.Object(bucketname, pedido).upload_file(file_path)
except ClientError as e:
logging.error(e)
logging.error(response)
return False
return True
def validate_file_exists_s3(pedido_order):
file_s3_exists = False
bucket_pedidos = connect_aws_s3()
logger.info('File to validate in S3: %s', str(pedido_order))
for pedido in bucket_pedidos.objects.all():
order_name = pedido.key
logger.info('File into S3 Bucket: %s', str(order_name))
if str(pedido_order) in str(order_name):
file_s3_exists = True
else:
file_s3_exists = False
return file_s3_exists
def delete_order_from_s3_root(order_to_delete):
cfg = get_config_constant_file()
bucket_pedidos = connect_aws_s3()
bucket_s3_name = cfg['BUCKET_AWS_S3']['S3_NAME']
pattern = cfg['EXT_ORDERS_TV']
bucketname = bucket_s3_name
if fnmatch.fnmatch(order_to_delete, pattern):
source_s3_order_path = '/' + order_to_delete
logger.info('Elimina el: %s', 'Archivo: {} de: {}'.format(str(order_to_delete),
str(source_s3_order_path)))
bucket_pedidos.Object(bucketname, order_to_delete).delete()
def parse_xml_files_in_bucket():
cfg = get_config_constant_file()
remote_path = cfg['PATH_REMOTE_BUCKET']
local_temp_path = cfg['PATH_LOCAL']
pattern = cfg['EXT_ORDERS_TV']
pedidos = os.listdir(local_temp_path)
for pedido in pedidos:
if fnmatch.fnmatch(pedido, pattern):
file_remote = remote_path + '/' + pedido
file_local = local_temp_path + '/' + pedido
pedido_s3_exists = validate_file_exists_s3(pedido)
logger.info('File Exists in S3: %s',
'File: {0} ¿exists?: {1}'.format(pedido,
pedido_s3_exists))
if pedido_s3_exists is False:
logger.info('Server File >>> ' + file_remote + ' : ' + file_local + ' <<< Local File')
copy_file_to_aws_s3(pedido, file_local)
if os.path.isfile(file_local):
os.remove(file_local)
logger.info('Local File Pedido was deleted: %s', str(file_local))
else: logger.error("Error: %s file not found" % file_local)
else:
logger.info('File: %s', '{0} already exists in Bucket S3!'.format(pedido))
def get_config_constant_file():
_constants_file = "constants/constants.yml"
cfg = Const.get_constants_file(_constants_file)
return cfg
def main():
pass
parser = argparse.ArgumentParser()
parser.add_argument('--file_type', required=False, type=str,
help="Parametro Tipo de archivos")
args = parser.parse_args()
order_type = args.order_type
logger.info('ORDER_TYPE ARG: %s', str(order_type))
parse_xml_files_in_bucket()
if __name__ == "__main__":
pass
main()
| true | true |
1c3266af38e628e6e69888ae24ffb3ecb5af0766 | 1,717 | py | Python | CodeGenX64/isel_tester.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 171 | 2020-01-30T16:58:07.000Z | 2022-03-27T22:12:17.000Z | CodeGenX64/isel_tester.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 14 | 2021-05-15T02:12:09.000Z | 2022-03-16T04:16:18.000Z | CodeGenX64/isel_tester.py | robertmuth/Cwerg | fdf30b06c93b4620c0a45b448b6d92acb81c35f0 | [
"Apache-2.0"
] | 5 | 2021-03-01T20:52:13.000Z | 2022-03-07T06:35:03.000Z | #!/usr/bin/python3
"""Testing helper for table driven code selection"""
from CpuX64 import symbolic
from Base import serialize
from Base import ir
from CodeGenX64 import isel_tab
from CodeGenX64 import regs
from typing import Any
import sys
def OpToStr(op: Any) -> str:
return str(op)
def OpTypeStr(op: Any) -> str:
if isinstance(op, ir.Reg):
return op.kind.name
elif isinstance(op, ir.Const):
return op.kind.name
else:
return "_"
def HandleIns(ins: ir.Ins, ctx: regs.EmitContext):
print("INS: " + serialize.InsRenderToAsm(
ins).strip() + f" [{' '.join(OpTypeStr(o) for o in ins.operands)}]")
if ins.opcode in isel_tab.OPCODES_REQUIRING_SPECIAL_HANDLING:
print(f" SPECIAL")
return
pattern = isel_tab.FindMatchingPattern(ins)
print(
f"PAT: reg:[{' '.join(a.name for a in pattern.type_constraints)}] "
f"op:[{' '.join(a.name for a in pattern.op_curbs)}]")
for tmpl in pattern.emit:
x64ins = tmpl.MakeInsFromTmpl(ins, ctx)
name, ops = symbolic.InsSymbolize(x64ins)
print(f" {name} {' '.join(ops)}")
def Translate(fin):
unit = serialize.UnitParseFromAsm(fin, cpu_regs=regs.CPU_REGS_MAP)
for fun in unit.funs:
ctx = regs.EmitContext(0, 0, 0)
ctx.scratch_cpu_reg = ir.CPU_REG_INVALID
ctx.scratch_cpu_reg = ir.CPU_REG_INVALID
fun.FinalizeStackSlots()
for bbl in fun.bbls:
for ins in bbl.inss:
if "gpr_scratch" in fun.name:
ctx.scratch_cpu_reg = regs.CPU_REGS_MAP["rax"]
print()
HandleIns(ins, ctx)
if __name__ == "__main__":
Translate(sys.stdin)
| 27.693548 | 77 | 0.627257 |
from CpuX64 import symbolic
from Base import serialize
from Base import ir
from CodeGenX64 import isel_tab
from CodeGenX64 import regs
from typing import Any
import sys
def OpToStr(op: Any) -> str:
return str(op)
def OpTypeStr(op: Any) -> str:
if isinstance(op, ir.Reg):
return op.kind.name
elif isinstance(op, ir.Const):
return op.kind.name
else:
return "_"
def HandleIns(ins: ir.Ins, ctx: regs.EmitContext):
print("INS: " + serialize.InsRenderToAsm(
ins).strip() + f" [{' '.join(OpTypeStr(o) for o in ins.operands)}]")
if ins.opcode in isel_tab.OPCODES_REQUIRING_SPECIAL_HANDLING:
print(f" SPECIAL")
return
pattern = isel_tab.FindMatchingPattern(ins)
print(
f"PAT: reg:[{' '.join(a.name for a in pattern.type_constraints)}] "
f"op:[{' '.join(a.name for a in pattern.op_curbs)}]")
for tmpl in pattern.emit:
x64ins = tmpl.MakeInsFromTmpl(ins, ctx)
name, ops = symbolic.InsSymbolize(x64ins)
print(f" {name} {' '.join(ops)}")
def Translate(fin):
unit = serialize.UnitParseFromAsm(fin, cpu_regs=regs.CPU_REGS_MAP)
for fun in unit.funs:
ctx = regs.EmitContext(0, 0, 0)
ctx.scratch_cpu_reg = ir.CPU_REG_INVALID
ctx.scratch_cpu_reg = ir.CPU_REG_INVALID
fun.FinalizeStackSlots()
for bbl in fun.bbls:
for ins in bbl.inss:
if "gpr_scratch" in fun.name:
ctx.scratch_cpu_reg = regs.CPU_REGS_MAP["rax"]
print()
HandleIns(ins, ctx)
if __name__ == "__main__":
Translate(sys.stdin)
| true | true |
1c3266af66ce2cbcce02c1fab886491c7e384cea | 2,607 | py | Python | aedes_server/core/models.py | henriquenogueira/aedes | 43b8376a091c57dd45d342dea78301c0a79b5df0 | [
"MIT"
] | null | null | null | aedes_server/core/models.py | henriquenogueira/aedes | 43b8376a091c57dd45d342dea78301c0a79b5df0 | [
"MIT"
] | 8 | 2016-03-27T03:15:42.000Z | 2016-03-28T23:59:55.000Z | aedes_server/core/models.py | henriquenogueira/aedes | 43b8376a091c57dd45d342dea78301c0a79b5df0 | [
"MIT"
] | null | null | null | from django.db import models
class Report(models.Model):
'''
Model that represents a given report by an user.
It contains information about the location of
the report as well as the type and timestamp.
'''
REPORT_CATEGORIES = (
('F', 'Foco'), # Represents a potential place where aedes can appear
('C', 'Criadouro'), # Represents aedes' larva on a spot
('SD', 'Suspeita de dengue'), # Someone near feeling symptoms of dengue
('SZ', 'Suspeita de zika'), # Someone near feeling symptoms of zika
('SC', 'Suspeita de chikungunya'), # Someone near feeling symptoms of chikungunya
)
latitude = models.FloatField('latitude')
longitude = models.FloatField('longitude')
photo = models.ImageField('foto', upload_to='upload/%Y/%m/%d/', blank=True)
device_id = models.CharField('ID do aparelho', max_length=255)
comment = models.TextField('comentário', blank=True, default='')
category = models.CharField('categoria', max_length=2, choices=REPORT_CATEGORIES)
resolved = models.BooleanField('resolvido', default=False)
reported_at = models.DateTimeField('reportado em', auto_now_add=True)
class Meta:
ordering = '-id',
verbose_name = 'ocorrência'
verbose_name_plural = 'ocorrências'
def __str__(self):
return '({}, {}) - {}'.format(self.latitude, self.longitude, self.category)
class Cluster(models.Model):
'''
Represents a cluster into the database.
'''
label = models.IntegerField('etiqueta', unique=True)
latitude = models.FloatField('latitude')
longitude = models.FloatField('longitude')
address = models.CharField('endereço', max_length=512, blank=True)
breeding_count = models.PositiveIntegerField('criadouros', default=0)
focus_count = models.PositiveIntegerField('foco', default=0)
suspicion_count = models.PositiveIntegerField('suspeita', default=0)
created_at = models.DateTimeField('criado em', auto_now_add=True)
class Meta:
ordering = 'label',
verbose_name = 'aglomerado'
verbose_name_plural = 'aglomerados'
@property
def score(self):
'''Return urgency score for the cluster'''
count_sum = self.breeding_count + self.focus_count + self.suspicion_count
if count_sum == 0:
return 0
pounds = 0.2 * self.focus_count + 0.3 * self.breeding_count + 0.5 * self.suspicion_count
return pounds / count_sum
def __str__(self):
return '{} - ({}, {}) - {}'.format(self.label, self.latitude, self.longitude, self.address)
| 38.910448 | 99 | 0.666667 | from django.db import models
class Report(models.Model):
REPORT_CATEGORIES = (
('F', 'Foco'),
('C', 'Criadouro'),
('SD', 'Suspeita de dengue'), # Someone near feeling symptoms of dengue
('SZ', 'Suspeita de zika'), # Someone near feeling symptoms of zika
('SC', 'Suspeita de chikungunya'), # Someone near feeling symptoms of chikungunya
)
latitude = models.FloatField('latitude')
longitude = models.FloatField('longitude')
photo = models.ImageField('foto', upload_to='upload/%Y/%m/%d/', blank=True)
device_id = models.CharField('ID do aparelho', max_length=255)
comment = models.TextField('comentário', blank=True, default='')
category = models.CharField('categoria', max_length=2, choices=REPORT_CATEGORIES)
resolved = models.BooleanField('resolvido', default=False)
reported_at = models.DateTimeField('reportado em', auto_now_add=True)
class Meta:
ordering = '-id',
verbose_name = 'ocorrência'
verbose_name_plural = 'ocorrências'
def __str__(self):
return '({}, {}) - {}'.format(self.latitude, self.longitude, self.category)
class Cluster(models.Model):
label = models.IntegerField('etiqueta', unique=True)
latitude = models.FloatField('latitude')
longitude = models.FloatField('longitude')
address = models.CharField('endereço', max_length=512, blank=True)
breeding_count = models.PositiveIntegerField('criadouros', default=0)
focus_count = models.PositiveIntegerField('foco', default=0)
suspicion_count = models.PositiveIntegerField('suspeita', default=0)
created_at = models.DateTimeField('criado em', auto_now_add=True)
class Meta:
ordering = 'label',
verbose_name = 'aglomerado'
verbose_name_plural = 'aglomerados'
@property
def score(self):
count_sum = self.breeding_count + self.focus_count + self.suspicion_count
if count_sum == 0:
return 0
pounds = 0.2 * self.focus_count + 0.3 * self.breeding_count + 0.5 * self.suspicion_count
return pounds / count_sum
def __str__(self):
return '{} - ({}, {}) - {}'.format(self.label, self.latitude, self.longitude, self.address)
| true | true |
1c32683d609fb544f45af4fd713ee418abb2c7ad | 3,311 | py | Python | example/dj/apps/test_chamber/tests/models/dispatchers.py | rubickcz/django-chamber | 0b4c51c66e0f496c6ebdbf4130bab8dd2fc53984 | [
"BSD-3-Clause"
] | null | null | null | example/dj/apps/test_chamber/tests/models/dispatchers.py | rubickcz/django-chamber | 0b4c51c66e0f496c6ebdbf4130bab8dd2fc53984 | [
"BSD-3-Clause"
] | null | null | null | example/dj/apps/test_chamber/tests/models/dispatchers.py | rubickcz/django-chamber | 0b4c51c66e0f496c6ebdbf4130bab8dd2fc53984 | [
"BSD-3-Clause"
] | null | null | null | from nose.tools import raises # pylint: disable=E0401
from django.core.exceptions import ImproperlyConfigured
from django.test import TransactionTestCase
from chamber.models.dispatchers import BaseDispatcher, StateDispatcher
from chamber.shortcuts import change_and_save
from chamber.utils.transaction import transaction_signals
from germanium.tools import assert_equal # pylint: disable=E0401
from test_chamber.models import (
CSVRecord, TestDispatchersModel, TestFieldsModel, TestSmartModel, TestOnDispatchModel
) # pylint: disable=E0401
class DispatchersTestCase(TransactionTestCase):
def test_state_dispatcher(self):
m = TestDispatchersModel.objects.create()
# Moving TestDispatcher model to SECOND state should create new TestSmartModel instance
assert_equal(TestSmartModel.objects.count(), 0)
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(TestSmartModel.objects.count(), 1)
# But subsequent saves should not create more instances
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(TestSmartModel.objects.count(), 1)
# Moving back and forth between the states creates another instance
change_and_save(m, state=TestDispatchersModel.STATE.FIRST)
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(TestSmartModel.objects.count(), 2)
def test_property_dispatcher(self):
# Saving the model should always fire up the one property handler, not the second
assert_equal(TestFieldsModel.objects.count(), 0)
TestDispatchersModel.objects.create()
assert_equal(TestFieldsModel.objects.count(), 1)
assert_equal(TestDispatchersModel.objects.count(), 1)
def test_created_dispatcher(self):
assert_equal(CSVRecord.objects.count(), 0)
m = TestDispatchersModel.objects.create()
assert_equal(CSVRecord.objects.count(), 1)
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(CSVRecord.objects.count(), 1)
def _create_model_and_invalid_field(self):
model = TestDispatchersModel.objects.create()
model.state = TestDispatchersModel.STATE.SECOND
return model, TestDispatchersModel._meta.get_field('state') # pylint: disable=W0212
def test_more_test_on_dispatch_instances_should_be_created_if_transaction_signals_is_not_activated(self):
model = TestDispatchersModel.objects.create()
assert_equal(TestOnDispatchModel.objects.count(), 1)
model.change_and_save(state=2)
assert_equal(TestOnDispatchModel.objects.count(), 2)
model.change_and_save(state=1)
assert_equal(TestOnDispatchModel.objects.count(), 3)
def test_only_one_test_on_dispatch_instances_should_be_created_if_transaction_signals_is_activated(self):
with transaction_signals():
model = TestDispatchersModel.objects.create()
assert_equal(TestOnDispatchModel.objects.count(), 0)
model.change_and_save(state=2)
assert_equal(TestOnDispatchModel.objects.count(), 0)
model.change_and_save(state=1)
assert_equal(TestOnDispatchModel.objects.count(), 0)
assert_equal(TestOnDispatchModel.objects.count(), 1)
| 45.986111 | 109 | 0.749018 | from nose.tools import raises
from django.core.exceptions import ImproperlyConfigured
from django.test import TransactionTestCase
from chamber.models.dispatchers import BaseDispatcher, StateDispatcher
from chamber.shortcuts import change_and_save
from chamber.utils.transaction import transaction_signals
from germanium.tools import assert_equal
from test_chamber.models import (
CSVRecord, TestDispatchersModel, TestFieldsModel, TestSmartModel, TestOnDispatchModel
)
class DispatchersTestCase(TransactionTestCase):
def test_state_dispatcher(self):
m = TestDispatchersModel.objects.create()
assert_equal(TestSmartModel.objects.count(), 0)
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(TestSmartModel.objects.count(), 1)
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(TestSmartModel.objects.count(), 1)
change_and_save(m, state=TestDispatchersModel.STATE.FIRST)
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(TestSmartModel.objects.count(), 2)
def test_property_dispatcher(self):
assert_equal(TestFieldsModel.objects.count(), 0)
TestDispatchersModel.objects.create()
assert_equal(TestFieldsModel.objects.count(), 1)
assert_equal(TestDispatchersModel.objects.count(), 1)
def test_created_dispatcher(self):
assert_equal(CSVRecord.objects.count(), 0)
m = TestDispatchersModel.objects.create()
assert_equal(CSVRecord.objects.count(), 1)
change_and_save(m, state=TestDispatchersModel.STATE.SECOND)
assert_equal(CSVRecord.objects.count(), 1)
def _create_model_and_invalid_field(self):
model = TestDispatchersModel.objects.create()
model.state = TestDispatchersModel.STATE.SECOND
return model, TestDispatchersModel._meta.get_field('state')
def test_more_test_on_dispatch_instances_should_be_created_if_transaction_signals_is_not_activated(self):
model = TestDispatchersModel.objects.create()
assert_equal(TestOnDispatchModel.objects.count(), 1)
model.change_and_save(state=2)
assert_equal(TestOnDispatchModel.objects.count(), 2)
model.change_and_save(state=1)
assert_equal(TestOnDispatchModel.objects.count(), 3)
def test_only_one_test_on_dispatch_instances_should_be_created_if_transaction_signals_is_activated(self):
with transaction_signals():
model = TestDispatchersModel.objects.create()
assert_equal(TestOnDispatchModel.objects.count(), 0)
model.change_and_save(state=2)
assert_equal(TestOnDispatchModel.objects.count(), 0)
model.change_and_save(state=1)
assert_equal(TestOnDispatchModel.objects.count(), 0)
assert_equal(TestOnDispatchModel.objects.count(), 1)
| true | true |
1c3269d2b4f3191e5d18f00db8deca25f16330f6 | 2,748 | py | Python | fairml/metrics/utils.py | SchernHe/FairML | ebd32df6dec1d5232e05e18c88e89179a420659e | [
"MIT"
] | null | null | null | fairml/metrics/utils.py | SchernHe/FairML | ebd32df6dec1d5232e05e18c88e89179a420659e | [
"MIT"
] | null | null | null | fairml/metrics/utils.py | SchernHe/FairML | ebd32df6dec1d5232e05e18c88e89179a420659e | [
"MIT"
] | null | null | null | """Provides useful functions to calculate fairness metrics"""
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score
import numpy as np
import pandas as pd
def calculate_precision(df, target_variable, prediction_variable):
"""Calculate precision / positive predictive value PPV"""
tn, fp, fn, tp = confusion_matrix(
df[target_variable], df[prediction_variable]
).ravel()
if (tp + fp) != 0:
return (tp / (tp + fp)) * 100
else:
return np.nan
def calculate_recall(df, target_variable, prediction_variable):
"""Calculate recall / true positive rate TPR / sensitivity"""
tn, fp, fn, tp = confusion_matrix(
df[target_variable], df[prediction_variable]
).ravel()
if (tp + fn) != 0:
return (tp / (tp + fn)) * 100
else:
return np.nan
def calculate_fpr(df, target_variable, prediction_variable):
"""Calculate false positive rate FPR / false alarm ratio"""
tn, fp, fn, tp = confusion_matrix(
df[target_variable], df[prediction_variable]
).ravel()
if (fp + tn) != 0:
return (fp / (fp + tn)) * 100
else:
return np.nan
def _get_nn_idx(row, neigh, radius, columns):
"""Retrieve the NN of a sample within a specified radius.
Parameters
----------
row : pd.Series
neigh : sklearn.NearestNeighbors
radius : float
columns : list
Returns
-------
list
Nearest Neighbors of given sample within radius
"""
neigh_dist, neigh_idx = neigh.radius_neighbors([row[columns]], radius)
return neigh_idx[0], len(neigh_idx[0])
def get_nn_idx(df, neigh, informative_variables, radius):
"""Assign each sample the indizes of NN.
Parameters
----------
df : pd.DataFrame
neigh : sklearn.NearestNeighbors
informative_variables : list
radius : float
Returns
-------
list
Score values: Consistency, Accuracy and Precision
"""
series = df.apply(
lambda row: _get_nn_idx(row, neigh, radius, informative_variables), axis=1
)
df[["KNN_IDX", "Num_NN"]] = pd.DataFrame(series.tolist(), index=series.index)
return df
def calculate_performance_scores(df, target_variable, min_tau, max_tau, step_size):
accuracy_scores = []
precision_scores = []
for tau in np.arange(min_tau, max_tau + step_size, step_size):
model_col = "Y_" + str(int(tau * 100))
df[model_col] = df["Y_SCORE"].apply(lambda row: 1 if row >= tau else 0)
accuracy_scores.extend([accuracy_score(df[target_variable], df[model_col])])
precision_scores.extend([precision_score(df[target_variable], df[model_col])])
return np.array(accuracy_scores), np.array(precision_scores)
| 28.329897 | 86 | 0.654294 | from sklearn.metrics import confusion_matrix, accuracy_score, precision_score
import numpy as np
import pandas as pd
def calculate_precision(df, target_variable, prediction_variable):
tn, fp, fn, tp = confusion_matrix(
df[target_variable], df[prediction_variable]
).ravel()
if (tp + fp) != 0:
return (tp / (tp + fp)) * 100
else:
return np.nan
def calculate_recall(df, target_variable, prediction_variable):
tn, fp, fn, tp = confusion_matrix(
df[target_variable], df[prediction_variable]
).ravel()
if (tp + fn) != 0:
return (tp / (tp + fn)) * 100
else:
return np.nan
def calculate_fpr(df, target_variable, prediction_variable):
tn, fp, fn, tp = confusion_matrix(
df[target_variable], df[prediction_variable]
).ravel()
if (fp + tn) != 0:
return (fp / (fp + tn)) * 100
else:
return np.nan
def _get_nn_idx(row, neigh, radius, columns):
neigh_dist, neigh_idx = neigh.radius_neighbors([row[columns]], radius)
return neigh_idx[0], len(neigh_idx[0])
def get_nn_idx(df, neigh, informative_variables, radius):
series = df.apply(
lambda row: _get_nn_idx(row, neigh, radius, informative_variables), axis=1
)
df[["KNN_IDX", "Num_NN"]] = pd.DataFrame(series.tolist(), index=series.index)
return df
def calculate_performance_scores(df, target_variable, min_tau, max_tau, step_size):
accuracy_scores = []
precision_scores = []
for tau in np.arange(min_tau, max_tau + step_size, step_size):
model_col = "Y_" + str(int(tau * 100))
df[model_col] = df["Y_SCORE"].apply(lambda row: 1 if row >= tau else 0)
accuracy_scores.extend([accuracy_score(df[target_variable], df[model_col])])
precision_scores.extend([precision_score(df[target_variable], df[model_col])])
return np.array(accuracy_scores), np.array(precision_scores)
| true | true |
1c326a10a08831667792f3baae827a68ee85eae2 | 32,426 | py | Python | metalcode_v1_0.py | mpiecka/metalcode | b0306dc9d8de53d797c946254fa63fa8b3fbf093 | [
"MIT"
] | 1 | 2021-12-13T17:20:44.000Z | 2021-12-13T17:20:44.000Z | metalcode_v1_0.py | mpiecka/metalcode | b0306dc9d8de53d797c946254fa63fa8b3fbf093 | [
"MIT"
] | null | null | null | metalcode_v1_0.py | mpiecka/metalcode | b0306dc9d8de53d797c946254fa63fa8b3fbf093 | [
"MIT"
] | null | null | null | from numpy import *
import matplotlib.pyplot as plt
import time
import os
import warnings
warnings.filterwarnings("ignore", category=VisibleDeprecationWarning)
# from metalcode_calib_metal import metal_transf
from metalcode_calib_tempe import Teff
from metalcode_calib_tempe import BolCorBV
from metalcode_calib_absmg import absmag
from metalcode_calib_clrex import clrexc_multiplier
from metalcode_calc_lstsqr import LstSqr
#-------------------------------------------------------
#---------------------INITIALISATION--------------------
#-------------------------------------------------------
def initialise():
# input values - photometric system
print('Initialisation ...')
inputing=True
list_vals=['G','J','2']
while inputing:
photosystem=input(' -- Pick photometric system (G,2,J): ')
if (photosystem in list_vals):
inputing=False
# input values - grid spacing (age)
inputing=True
list_vals=[0.1,0.2]
while inputing:
try:
age_step=float(input(' -- Pick grid spacing, age (0.1,0.2): '))
if (age_step in list_vals):
inputing=False
except ValueError:
pass
# input values - grid spacing (Z)
inputing=True
list_vals=[0.005]
while inputing:
try:
z_step=float(input(' -- Pick grid spacing, Z (0.005): '))
if (z_step in list_vals):
inputing=False
except ValueError:
pass
# input values - Nredd
inputing=True
while inputing:
try:
Nredd=int(input(' -- Nredd (3): '))
if ((Nredd == 0) or (Nredd % 2 == 1)):
inputing=False
except ValueError:
pass
# input values - reddening range
inputing=True
list_vals=[0.005]
while inputing:
try:
redAdj=float(input(' -- Reddening range (0.0 .. 1.0): '))
if (redAdj>0.0 and redAdj<1.0):
inputing=False
except ValueError:
pass
# input values - Niter
inputing=True
while inputing:
try:
Niter=int(input(' -- Niter (6): '))
if (Niter >= 3):
inputing=False
except ValueError:
pass
return (photosystem,age_step,z_step,Nredd,Niter,redAdj)
#-------------------------------------------------------
#-------------------END INITIALISATION------------------
#-------------------------------------------------------
#-------------------------------------------------------
#-----------------------CALCULATIONS--------------------
#-------------------------------------------------------
# systematic corrections to the temperature calibration of observed data
#
# what you need is only q3 and q4 collections for all [Age,Z] in the grid,
# then you can correct for the systematics, works very well for less bright stars,
# but also slightly improves the situation for giants (especially for higher ages)
def sys_temp(sys_age,sys_z,sys_photosystem):
global isochronesLTN
global isochronesCMD
global age_values
global z_values
sys_q1=[]
sys_q2=[]
sys_i=age_values.index(round(sys_age,1))
sys_j=z_values.index(round(sys_z,3))
sys_b,sys_a=isochronesLTN[sys_i][sys_j]
sys_y,sys_x=isochronesCMD[sys_i][sys_j]
sys_xx=[]
sys_yy=[]
sys_aa=[]
sys_bb=[]
for sys_k in range(len(sys_x)):
sys_y0=logL( sys_y[sys_k] , BolCorBV(sys_x[sys_k],sys_y[sys_k],z_values[sys_j],sys_photosystem) )
sys_x0=logTN( Teff(sys_x[sys_k],sys_y[sys_k],z_values[sys_j],sys_photosystem) , sys_y0 )
if (sys_x0>-3 and sys_x0<3 and sys_y0>-5 and sys_y0<5):
sys_yy.append(sys_y0)
sys_xx.append(sys_x0)
sys_aa.append(sys_a[sys_k])
sys_bb.append(sys_b[sys_k])
sys_q2.append(sys_a[sys_k]-sys_x0)
sys_q1.append(sys_y0)
sys_qx=sorted(sys_q1)
sys_qy=[]
for sys_i in range(len(sys_qx)):
sys_qy.append(sys_q2[sys_q1.index(sys_qx[sys_i])])
sys_q3=[]
sys_q4=[]
for sys_j in range(35):
sys_qq=[]
for sys_i in range(len(sys_qx)):
if (sys_qx[sys_i]>(-1.0+sys_j*0.2) and sys_qx[sys_i]<=(-1.0+(sys_j+1)*0.2)):
sys_qq.append(sys_qy[sys_i])
if (len(sys_qq)>0):
sys_q3.append((-1.0+(sys_j+0.5)*0.2))
sys_q4.append(mean(sys_qq))
return [sys_q3,sys_q4]
def funfind(funv,funq3,funq4):
funw=0.0
funt=True
funi=0
while funt:
if (funq3[funi]<=funv and funq3[funi+1]>=funv):
funt=False
funqa=(funq4[funi]-funq4[funi+1])/(funq3[funi]-funq3[funi+1])
funqb=funq4[funi]-funqa*funq3[funi]
funw=funqa*funv+funqb
else:
funi+=1
if (funi==len(funq3)-1):
funt=False
return funw
def logL(logL_V,logL_BC):
return (1.896-0.4*(logL_V+logL_BC))
def logTN(TN_Teff,TN_logL):
global ZAMS
# TN_Teff must be in absolute value, not log
logTZAMS=-9999.9
ZAMS_T=list(ZAMS[:,0])
ZAMS_L=list(ZAMS[:,1])
if (TN_logL>=ZAMS_L[0] and TN_logL<=ZAMS_L[-1]):
TN_i=0
TN_found=0
while (TN_found==0):
if ((TN_logL>=ZAMS_L[TN_i]) and (TN_logL<=ZAMS_L[TN_i+1])):
logTZAMS=TN_logL*(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])+ZAMS_T[TN_i]-(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])*ZAMS_L[TN_i]
#logTZAMS=ZAMS_T[TN_i]
TN_found=1
elif (TN_i<len(ZAMS_T)-1):
TN_i+=1
else:
TN_found=1
elif (TN_logL<ZAMS_L[0]):
logTZAMS=TN_logL*(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])+ZAMS_T[0]-(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])*ZAMS_L[0]
else:
logTZAMS=TN_logL*(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])+ZAMS_T[-2]-(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])*ZAMS_L[-2]
return log10(TN_Teff)-logTZAMS
def logTZ(TN_Teff,TN_logL):
global ZAMS
# TN_Teff must be in absolute value, not log
logTZAMS=-9999.9
ZAMS_T=list(ZAMS[:,0])
ZAMS_L=list(ZAMS[:,1])
if (TN_logL>=ZAMS_L[0] and TN_logL<=ZAMS_L[-1]):
TN_i=0
TN_found=0
while (TN_found==0):
if ((TN_logL>=ZAMS_L[TN_i]) and (TN_logL<=ZAMS_L[TN_i+1])):
logTZAMS=TN_logL*(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])+ZAMS_T[TN_i]-(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])*ZAMS_L[TN_i]
#logTZAMS=ZAMS_T[TN_i]
TN_found=1
elif (TN_i<len(ZAMS_T)-1):
TN_i+=1
else:
TN_found=1
elif (TN_logL<ZAMS_L[0]):
logTZAMS=TN_logL*(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])+ZAMS_T[0]-(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])*ZAMS_L[0]
else:
logTZAMS=TN_logL*(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])+ZAMS_T[-2]-(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])*ZAMS_L[-2]
return logTZAMS
def isochrone_grid(grid_age,grid_z,grid_v):
global isochronesLTN
global isochronesCMD
global isochronesZMS
global isochronesTEF
global age_values
global z_values
if (grid_v=='LTN'):
grid_iso=isochronesLTN[age_values.index(grid_age)][z_values.index(grid_z)]
grid_TN=[]
for grid_i in range(len(array(grid_iso)[:,1])):
grid_TN.append(logTN(10.0**array(grid_iso)[grid_i,1] , array(grid_iso)[grid_i,0]))
return [array(grid_iso)[:,0] , array(grid_TN)]
elif (grid_v=='CMD'):
grid_iso=isochronesCMD[age_values.index(grid_age)][z_values.index(grid_z)]
return [array(grid_iso)[:,0] , array(grid_iso)[:,1]]
elif (grid_v=='ZMS'):
grid_iso=isochronesZMS[age_values.index(grid_age)][z_values.index(grid_z)]
grid_TZ=[]
for grid_i in range(len(array(grid_iso)[:,1])):
grid_TZ.append(logTZ(10.0**array(grid_iso)[grid_i,1] , array(grid_iso)[grid_i,0]))
return [array(grid_iso)[:,0] , array(grid_TZ)]
elif (grid_v=='TEF'):
grid_iso=isochronesTEF[age_values.index(grid_age)][z_values.index(grid_z)]
return [array(grid_iso)[:,0] , array(grid_iso)[:,1]]
#-------------------------------------------------------
#-------------------END CALCULATIONS--------------------
#-------------------------------------------------------
#-------------------------------------------------------
#---------------------BINARY CHECK----------------------
#-------------------------------------------------------
# DO NOT USE !
# DO NOT USE !
# DO NOT USE !
def BinaryJob(dist,clrexc,metal,doname,filt,b,expcor):
# expcor should be either 0 or 1
if (expcor==0):
pass
else:
expcor=1
# transformation from Z to Fe/H
# metal=metal_transf(0,metal)
# not necessary anymore
# transformation factor between E(B-V) and the chosen photometric system colour
factor_clrexc=clrexc_multiplier(dist,b,filt,expcor)
# input stellar data
fdata=loadtxt('clusters/'+str(doname)+'.txt', skiprows=1)
for i in range(len(fdata)):
fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc
lumin0=[]
tempe0=[]
ident0=[]
for i in range(len(fdata)):
lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )
tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )
if (tempetest>-3.0 and tempetest<3.0):
lumin0.append( lumintest )
tempe0.append( tempetest )
ident0.append(i)
tempeB=[]
luminB=[]
identB=[]
for i in range(len(tempe0)):
if (lumin0[i]<1.0 and lumin0[i]>-1.5):
tempeB.append(tempe0[i])
luminB.append(lumin0[i])
identB.append(ident0[i])
if (len(luminB)>5):
binlow=polyfit(luminB,tempeB,3)
tempeZ=[]
for i in range(len(tempeB)):
tempez=0.0
for j in range(len(binlow)):
tempez+=binlow[j]*luminB[i]**(len(binlow)-1-j)
tempeZ.append(tempeB[i]-tempez)
# skew was supposed to be a measure of the binarity, but it does not work
#print(skew(tempeZ))
saveh=histogram(tempeZ,bins=10)
savea=[]
saveb=[]
for i in range(len(saveh[0])):
savea.append(0.5*(saveh[1][i]+saveh[1][i+1]))
saveb.append(saveh[0][i])
limrng=savea[saveb.index(max(saveb))]-0.04*0.5
saveid=[]
for i in range(len(tempeZ)):
# if smaller than possibly a binary our an outlier
if (tempeZ[i]>limrng):
saveid.append(identB[i])
for i in range(len(tempe0)):
if (lumin0[i]>=1.0 or lumin0[i]<=-1.5):
saveid.append(ident0[i])
return [saveid,0.0]
else:
return [[],0.0]
#-------------------------------------------------------
#-----------------END BINARY CHECK----------------------
#-------------------------------------------------------
#-------------------------------------------------------
#---------------------ISOCHR CHECK----------------------
#-------------------------------------------------------
def DoJob(dist,clrexc,metal,doname,filt,b,expcor,bincor,binlist):
# isochrone information from the main body
global isochronesLTN
global isochronesCMD
global isochronesZMS
global isochronesTEF
global age_values
global z_values
# expcor and bincor should be either 0 or 1 (1 only when starting from ext.maps)
if (expcor==0):
pass
else:
expcor=1
if (bincor==0):
pass
else:
bincor=1
# transformation from Z to Fe/H, based on Pöhnl & Paunzen (2010)
# metal=metal_transf(0,metal)
# transformation factor between E(B-V) and the chosen photometric system colour
factor_clrexc=clrexc_multiplier(dist,b,filt,expcor)
# input stellar data and transform from CMD space to logL-TN space
# if bincor=1 then the list of indices will be used for fdata instead of the whole list
# only lumin0, tempe0 and count0 are necessary for the procedure, the rest is for debugging
if (bincor==0):
fdata=loadtxt('clusters/'+str(doname)+'.txt', skiprows=1)
BpRp=[]
for i in range(len(fdata)):
BpRp.append(0.0+fdata[i][1])
fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc
lumin0=[]
tempe0=[]
count0=[]
Gmag0=[]
Gmag1=[]
Gmag2=[]
BpRp0=[]
BpRp1=[]
AuxBC=[]
AuxTT=[]
AuxTZ=[]
for i in range(len(fdata)):
lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )
tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )
if (tempetest>-3.0 and tempetest<3.0 and lumintest>-5.0 and lumintest<5.0):
lumin0.append( lumintest )
tempe0.append( tempetest )
count0.append( 1.0 )
Gmag0.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor))
Gmag1.append(fdata[i][0])
Gmag2.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor)+5*log10(dist)-5.0)
BpRp0.append(fdata[i][1])
BpRp1.append(BpRp[i])
AuxBC.append(BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTT.append(Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTZ.append(logTZ( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ))
else:
fdata=loadtxt(str(doname)+'.txt', skiprows=1)
BpRp=[]
for i in binlist:
BpRp.append(0.0+fdata[i][1])
fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc
lumin0=[]
tempe0=[]
count0=[]
Gmag0=[]
Gmag1=[]
Gmag2=[]
BpRp0=[]
BpRp1=[]
AuxBC=[]
AuxTT=[]
AuxTZ=[]
for i in binlist:
lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )
tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )
if (tempetest>-3.0 and tempetest<3.0 and lumintest>-5.0 and lumintest<5.0):
lumin0.append( lumintest )
tempe0.append( tempetest )
count0.append( 1.0 )
Gmag0.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor))
Gmag1.append(fdata[i][0])
Gmag2.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor)+5*log10(dist)-5.0)
BpRp0.append(fdata[i][1])
BpRp1.append(BpRp[i])
AuxBC.append(BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTT.append(Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTZ.append(logTZ( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ))
# finding the best fit in isochrone grid
fitI=-1
fitJ=-1
fitX=1.0e16
for i in range(len(age_values)):
for j in range(len(z_values)):
tempe1=[]
# apply TN systematic corrections, fitting section
for l in range(len(tempe0)):
tempe1.append(tempe0[l]+funfind(lumin0[l],temp_corr_grid[i][j][0],temp_corr_grid[i][j][1]))
fitvalues=LstSqr(lumin0,tempe1,count0,isochronesLTN[i][j][0],isochronesLTN[i][j][1])
if (fitvalues<fitX):
fitX=fitvalues
fitI=0+i
fitJ=0+j
# apply TN systematic corrections, results section
AuxSY=[]
for l in range(len(tempe0)):
AuxSY.append(funfind(lumin0[l],temp_corr_grid[fitI][fitJ][0],temp_corr_grid[fitI][fitJ][1]))
tempe0[l]+=funfind(lumin0[l],temp_corr_grid[fitI][fitJ][0],temp_corr_grid[fitI][fitJ][1])
return([age_values[fitI],z_values[fitJ],fitX,[tempe0,lumin0,isochronesLTN[fitI][fitJ][1],isochronesLTN[fitI][fitJ][0],AuxSY,isochronesZMS[fitI][fitJ][1],isochronesTEF[fitI][fitJ][1]],[BpRp0,Gmag0,isochronesCMD[fitI][fitJ][1],isochronesCMD[fitI][fitJ][0],BpRp1,Gmag1,Gmag2,AuxBC,AuxTT,AuxTZ]])
#-------------------------------------------------------
#-----------------END ISOCHR CHECK----------------------
#-------------------------------------------------------
#<<<<---------------------- ------------------- ---------------------->>>>
#<<<<---------------------- ------------------- ---------------------->>>>
#<<<<---------------------- PROGRAM STARTS HERE ---------------------->>>>
#<<<<---------------------- ------------------- ---------------------->>>>
#<<<<---------------------- ------------------- ---------------------->>>>
# only for testing
debugTest=False
# initialisation, loads photometric system, grid spacing and iteration numbers
photosystem,age_step,z_step,Nredd,Niter,redAdj=initialise()
print('Loading list of clusters ...')
try:
pathfile=os.getcwd()
os.chdir(pathfile)
with open("clusters/_complete.txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
dataini=array([list(map(str,x)) for x in all_data[1:]])
except FileNotFoundError:
pathfile=os.path.dirname(__file__)
os.chdir(pathfile)
with open("clusters/_complete.txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
dataini=array([list(map(str,x)) for x in all_data[1:]])
clust_list=[]
ini_D_list=[]
ini_E_list=[]
exp_A_list=[]
exp_Z_list=[]
par_b_list=[]
for i in range(len(dataini)):
clust_list.append(dataini[i][0])
ini_D_list.append(float(dataini[i][3]))
ini_E_list.append(float(dataini[i][4]))
par_b_list.append(float(dataini[i][1]))
exp_A_list.append(-999)
exp_Z_list.append(-999)
print('Preparing isochrone grid ...')
# load ZAMS for the calculations
with open("ZAMS_014.txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
ZAMS=array([list(map(float,x)) for x in all_data[0:]])
# preparing isochrone grid
with open("isochrones"+photosystem+".txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
isochrones_complete=array([list(map(str,x)) for x in all_data[13:]])
f_data=[]
all_data=[]
# first, create a list of ages and metallicities available
age_values=[6.6]
# age_step=0.2
age_last=10.0
while (age_values[-1]!=age_last):
age_values.append(round(age_values[-1]+age_step , 1))
z_values=[0.005]
# z_step=0.005
z_last=0.040
while (z_values[-1]!=z_last):
z_values.append(round(z_values[-1]+z_step , 3))
# create the grid, using age for rows and metallicity for columns
isochronesLTN=[]
isochronesCMD=[]
isochronesZMS=[]
isochronesTEF=[]
for i in range(len(age_values)):
isohelp1=[]
isohelp2=[]
isohelp3=[]
isohelp4=[]
for j in range(len(z_values)):
isohelp1.append([])
isohelp2.append([])
isohelp3.append([])
isohelp4.append([])
isochronesLTN.append(isohelp1)
isochronesCMD.append(isohelp2)
isochronesZMS.append(isohelp3)
isochronesTEF.append(isohelp4)
# fill in the grid from the loaded data (isochrones_complete)
for i in range(len(isochrones_complete)):
if (age_values.count(round(float(isochrones_complete[i][2]),1))==1 and z_values.count(round(float(isochrones_complete[i][0]),3))==1):
age_idx=age_values.index(round(float(isochrones_complete[i][2]),1))
z_idx=z_values.index(round(float(isochrones_complete[i][0]),3))
isochronesLTN[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])
isochronesZMS[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])
isochronesTEF[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])
if (photosystem=='G'):
isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][28]) , (float(isochrones_complete[i][29])-float(isochrones_complete[i][30]))])
elif (photosystem=='2'):
isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][28]) , (float(isochrones_complete[i][28])-float(isochrones_complete[i][30]))])
elif (photosystem=='J'):
isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][30]) , (float(isochrones_complete[i][29])-float(isochrones_complete[i][30]))])
isochrones_complete=[]
# transform isochrones to the normalised grid
for main_i in range(len(age_values)):
for main_j in range(len(z_values)):
isochronesLTN[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'LTN')
isochronesCMD[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'CMD')
isochronesZMS[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'ZMS')
isochronesTEF[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'TEF')
# prepare grid for corrected systematics for temperature calibration
temp_corr_grid=[]
for main_i in range(len(age_values)):
temp_corr_help=[]
for main_j in range(len(z_values)):
temp_corr_help.append(sys_temp(age_values[main_i],z_values[main_j],photosystem))
temp_corr_grid.append(temp_corr_help)
print('Starting main procedure ...\n')
start_time = time.time()
for cc in range(len(clust_list)):
try:
start_time_1 = time.time()
# starting values (colour excess is E(B-V), this is recalculated to whatever system
# using the factor_clrexc)
iniD=ini_D_list[cc]
iniE=ini_E_list[cc]
iniZ=0.014
clustname=clust_list[cc]+'_'+photosystem
# numbX = number of iterations of given quantity
# rangX = range of itereations centred around the ini value
# valX = current value in iteration
numbD=1
rangD=0.8*iniD
if (iniE<0.0 or Nredd==0):
numbE=10
rangE=[0.010,0.040,0.080,0.125,0.250,0.500,0.750,1.000,1.500,2.000]
elif (Nredd==1):
numbE=0+Nredd
rangE=[iniE]
else:
numbE=0+Nredd
rangE=[]
for main_i in range(numbE):
rangE.append(iniE-redAdj*iniE+2*redAdj*iniE*main_i/float(numbE-1))
# start calculating fits for the grid of parameters, assume no binaries
valD=iniD-0.0*rangD
result_grid=[]
for main_i in range(numbD):
for main_j in range(numbE):
valE=rangE[main_j]
final_z=0.000+iniZ
res_ini=0.000
check_limit=0
while ((round(res_ini,3)!=round(final_z,3)) and check_limit<Niter):
res_ini=0.000+final_z
final_age,final_z,final_fit,final_data,final_data2 = DoJob(valD,valE,res_ini,clustname,photosystem,par_b_list[cc],0,0,[])
check_limit+=1
result_grid.append([valD,valE,final_age,final_z,final_fit,final_data,final_data2,check_limit])
print(check_limit)
valD+=rangD/float(numbD-1+1)
# results for all reddening values are sorted are written in a logfile
print(clust_list[cc])
result_grid=array(result_grid)
sorted_result_grid=result_grid[result_grid[:,4].argsort()]
print('\n')
fsave = open('finished/_logfile.txt', "a+")
fsave.write('Cluster: %s \n' % (clust_list[cc]))
fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))
fsave.write('------------------------------------------------------------------------ \n')
for main_i in range(len(sorted_result_grid)):
fsave.write('Parameters: %d %.3f %.1f %.3f ..... fit:%.7f iter:%d \n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4],sorted_result_grid[main_i][7]))
fsave.write('# FINISHED (%.3f min) \n\n' % ((time.time() - start_time_1)/60.0))
fsave.close()
# the three best results are plotted in CMD and LTN diagrams
# if debug mode is on, additional data for individual points are returned for the three fits
fitcurves=['r-','g--','b:']
fitpoints=['ko','ko','ko']
plt.figure(figsize=(12,6))
for main_i in range(len(fitcurves)):
try:
plt.subplot(2,3,main_i+1)
plt.plot(sorted_result_grid[main_i][5][0],sorted_result_grid[main_i][5][1],fitpoints[main_i],ms=4.0,alpha=0.2)
plt.plot(sorted_result_grid[main_i][5][2],sorted_result_grid[main_i][5][3],fitcurves[main_i],alpha=0.6)
if (main_i==0):
plt.xlabel('TN')
plt.ylabel('log L')
else:
plt.xlabel('TN')
#plt.xlim(-0.8,0.2)
#plt.ylim(-2.5,4.0)
plt.xlim( min(sorted_result_grid[main_i][5][0]) - 0.25*(max(sorted_result_grid[main_i][5][0])-min(sorted_result_grid[main_i][5][0])) , max(sorted_result_grid[main_i][5][0]) + 0.25*(max(sorted_result_grid[main_i][5][0])-min(sorted_result_grid[main_i][5][0])) )
plt.ylim( min(sorted_result_grid[main_i][5][1]) - 0.05*(max(sorted_result_grid[main_i][5][1])-min(sorted_result_grid[main_i][5][1])) , max(sorted_result_grid[main_i][5][1]) + 0.05*(max(sorted_result_grid[main_i][5][1])-min(sorted_result_grid[main_i][5][1])) )
plt.title('%d ; %.3f ; %.1f ; %.3f ... %.6f' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))
plt.locator_params(axis='x',nbins=7)
if (debugTest):
fsave = open('finished/'+str(clustname)+'_aux_R'+str(main_i+1)+'_isoch.txt', "w+")
fsave.write('Cluster: %s \n' % (clustname))
fsave.write('Parameters: %d ; %.3f ; %.1f ; %.3f ... %.6f \n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))
fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))
fsave.write('---------------------------------------------------------------- \n')
fsave.write('%7s %7s %7s %7s \n' % ('T_eff','T_ZAMS','T_N','logL'))
for main_j in range(len(sorted_result_grid[main_i][5][2])):
fsave.write('%7.4f %7.4f %7.4f %7.3f \n' % (sorted_result_grid[main_i][5][6][main_j],sorted_result_grid[main_i][5][5][main_j],sorted_result_grid[main_i][5][2][main_j],sorted_result_grid[main_i][5][3][main_j]))
fsave.close()
except IndexError:
if ((Nredd==1 and main_i>0)==False):
print('err')
try:
plt.subplot(2,3,main_i+4)
plt.plot(sorted_result_grid[main_i][6][0],sorted_result_grid[main_i][6][1],fitpoints[main_i],ms=4.0,alpha=0.2)
plt.plot(sorted_result_grid[main_i][6][2],sorted_result_grid[main_i][6][3],fitcurves[main_i],alpha=0.6)
if (main_i==0):
if (photosystem=='G'):
plt.xlabel('(BP-RP)_0 [mag]')
plt.ylabel('M_G [mag]')
elif (photosystem=='2'):
plt.xlabel('(J-Ks)_0 [mag]')
plt.ylabel('M_J [mag]')
elif (photosystem=='J'):
plt.xlabel('(B-V)_0 [mag]')
plt.ylabel('M_V [mag]')
else:
if (photosystem=='G'):
plt.xlabel('(BP-RP)_0 [mag]')
elif (photosystem=='2'):
plt.xlabel('(J-Ks)_0 [mag]')
elif (photosystem=='J'):
plt.xlabel('(B-V)_0 [mag]')
# plt.xlim(-0.8,4.0)
# plt.ylim(15,-5)
plt.xlim( min(sorted_result_grid[main_i][6][0]) - 0.10*(max(sorted_result_grid[main_i][6][0])-min(sorted_result_grid[main_i][6][0])) , max(sorted_result_grid[main_i][6][0]) + 0.10*(max(sorted_result_grid[main_i][6][0])-min(sorted_result_grid[main_i][6][0])) )
plt.ylim( max(sorted_result_grid[main_i][6][1]) + 0.10*(max(sorted_result_grid[main_i][6][1])-min(sorted_result_grid[main_i][6][1])) , min(sorted_result_grid[main_i][6][1]) - 0.10*(max(sorted_result_grid[main_i][6][1])-min(sorted_result_grid[main_i][6][1])) )
#plt.gca().invert_yaxis()
#plt.title('%d ; %.3f ; %.1f ; %.3f ... %.6f' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))
if (debugTest):
fsave = open('finished/'+str(clustname)+'_aux_R'+str(main_i+1)+'_clust.txt', "w+")
fsave.write('Cluster: %s \n' % (clustname))
fsave.write('Parameters: %d ; %.3f ; %.1f ; %.3f ... %.6f \n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))
fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))
fsave.write('---------------------------------------------------------------- \n')
fsave.write('%6s %6s %6s %6s %6s %6s %7s %7s %7s %7s %7s \n' % ('Color','Color0','Mag','Mag0','AbsMag','BC_Mag','T_eff','T_ZAMS','T_corr','T_N','logL'))
for main_j in range(len(sorted_result_grid[main_i][5][0])):
fsave.write('%6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %7.4f %7.4f %7.4f %7.4f %7.3f \n' % (sorted_result_grid[main_i][6][4][main_j],sorted_result_grid[main_i][6][0][main_j],sorted_result_grid[main_i][6][5][main_j],sorted_result_grid[main_i][6][6][main_j],sorted_result_grid[main_i][6][1][main_j],sorted_result_grid[main_i][6][7][main_j],log10(sorted_result_grid[main_i][6][8][main_j]),sorted_result_grid[main_i][6][9][main_j],sorted_result_grid[main_i][5][4][main_j],sorted_result_grid[main_i][5][0][main_j],sorted_result_grid[main_i][5][1][main_j]))
fsave.close()
except IndexError:
if ((Nredd==1 and main_i>0)==False):
print('err')
plt.tight_layout()
plt.savefig('finished/'+str(clustname)+'.png',dpi=300,bbox_inches="tight")
plt.close()
except OSError:
# exception can be encountered if the list of clusters does not match the provided data files
# names of the clusters should coincide with the names of the data files (up to the photo.system designation)
print('no file: %s' % (clustname))
isochrones=[]
print('\n')
print('Finished!')
print("Runtime: %s min" % ((time.time() - start_time)/60.0)) | 44.541209 | 570 | 0.554833 | from numpy import *
import matplotlib.pyplot as plt
import time
import os
import warnings
warnings.filterwarnings("ignore", category=VisibleDeprecationWarning)
from metalcode_calib_tempe import Teff
from metalcode_calib_tempe import BolCorBV
from metalcode_calib_absmg import absmag
from metalcode_calib_clrex import clrexc_multiplier
from metalcode_calc_lstsqr import LstSqr
def initialise():
print('Initialisation ...')
inputing=True
list_vals=['G','J','2']
while inputing:
photosystem=input(' -- Pick photometric system (G,2,J): ')
if (photosystem in list_vals):
inputing=False
inputing=True
list_vals=[0.1,0.2]
while inputing:
try:
age_step=float(input(' -- Pick grid spacing, age (0.1,0.2): '))
if (age_step in list_vals):
inputing=False
except ValueError:
pass
inputing=True
list_vals=[0.005]
while inputing:
try:
z_step=float(input(' -- Pick grid spacing, Z (0.005): '))
if (z_step in list_vals):
inputing=False
except ValueError:
pass
inputing=True
while inputing:
try:
Nredd=int(input(' -- Nredd (3): '))
if ((Nredd == 0) or (Nredd % 2 == 1)):
inputing=False
except ValueError:
pass
inputing=True
list_vals=[0.005]
while inputing:
try:
redAdj=float(input(' -- Reddening range (0.0 .. 1.0): '))
if (redAdj>0.0 and redAdj<1.0):
inputing=False
except ValueError:
pass
inputing=True
while inputing:
try:
Niter=int(input(' -- Niter (6): '))
if (Niter >= 3):
inputing=False
except ValueError:
pass
return (photosystem,age_step,z_step,Nredd,Niter,redAdj)
def sys_temp(sys_age,sys_z,sys_photosystem):
global isochronesLTN
global isochronesCMD
global age_values
global z_values
sys_q1=[]
sys_q2=[]
sys_i=age_values.index(round(sys_age,1))
sys_j=z_values.index(round(sys_z,3))
sys_b,sys_a=isochronesLTN[sys_i][sys_j]
sys_y,sys_x=isochronesCMD[sys_i][sys_j]
sys_xx=[]
sys_yy=[]
sys_aa=[]
sys_bb=[]
for sys_k in range(len(sys_x)):
sys_y0=logL( sys_y[sys_k] , BolCorBV(sys_x[sys_k],sys_y[sys_k],z_values[sys_j],sys_photosystem) )
sys_x0=logTN( Teff(sys_x[sys_k],sys_y[sys_k],z_values[sys_j],sys_photosystem) , sys_y0 )
if (sys_x0>-3 and sys_x0<3 and sys_y0>-5 and sys_y0<5):
sys_yy.append(sys_y0)
sys_xx.append(sys_x0)
sys_aa.append(sys_a[sys_k])
sys_bb.append(sys_b[sys_k])
sys_q2.append(sys_a[sys_k]-sys_x0)
sys_q1.append(sys_y0)
sys_qx=sorted(sys_q1)
sys_qy=[]
for sys_i in range(len(sys_qx)):
sys_qy.append(sys_q2[sys_q1.index(sys_qx[sys_i])])
sys_q3=[]
sys_q4=[]
for sys_j in range(35):
sys_qq=[]
for sys_i in range(len(sys_qx)):
if (sys_qx[sys_i]>(-1.0+sys_j*0.2) and sys_qx[sys_i]<=(-1.0+(sys_j+1)*0.2)):
sys_qq.append(sys_qy[sys_i])
if (len(sys_qq)>0):
sys_q3.append((-1.0+(sys_j+0.5)*0.2))
sys_q4.append(mean(sys_qq))
return [sys_q3,sys_q4]
def funfind(funv,funq3,funq4):
funw=0.0
funt=True
funi=0
while funt:
if (funq3[funi]<=funv and funq3[funi+1]>=funv):
funt=False
funqa=(funq4[funi]-funq4[funi+1])/(funq3[funi]-funq3[funi+1])
funqb=funq4[funi]-funqa*funq3[funi]
funw=funqa*funv+funqb
else:
funi+=1
if (funi==len(funq3)-1):
funt=False
return funw
def logL(logL_V,logL_BC):
return (1.896-0.4*(logL_V+logL_BC))
def logTN(TN_Teff,TN_logL):
global ZAMS
logTZAMS=-9999.9
ZAMS_T=list(ZAMS[:,0])
ZAMS_L=list(ZAMS[:,1])
if (TN_logL>=ZAMS_L[0] and TN_logL<=ZAMS_L[-1]):
TN_i=0
TN_found=0
while (TN_found==0):
if ((TN_logL>=ZAMS_L[TN_i]) and (TN_logL<=ZAMS_L[TN_i+1])):
logTZAMS=TN_logL*(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])+ZAMS_T[TN_i]-(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])*ZAMS_L[TN_i]
TN_found=1
elif (TN_i<len(ZAMS_T)-1):
TN_i+=1
else:
TN_found=1
elif (TN_logL<ZAMS_L[0]):
logTZAMS=TN_logL*(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])+ZAMS_T[0]-(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])*ZAMS_L[0]
else:
logTZAMS=TN_logL*(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])+ZAMS_T[-2]-(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])*ZAMS_L[-2]
return log10(TN_Teff)-logTZAMS
def logTZ(TN_Teff,TN_logL):
global ZAMS
logTZAMS=-9999.9
ZAMS_T=list(ZAMS[:,0])
ZAMS_L=list(ZAMS[:,1])
if (TN_logL>=ZAMS_L[0] and TN_logL<=ZAMS_L[-1]):
TN_i=0
TN_found=0
while (TN_found==0):
if ((TN_logL>=ZAMS_L[TN_i]) and (TN_logL<=ZAMS_L[TN_i+1])):
logTZAMS=TN_logL*(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])+ZAMS_T[TN_i]-(ZAMS_T[TN_i+1]-ZAMS_T[TN_i])/(ZAMS_L[TN_i+1]-ZAMS_L[TN_i])*ZAMS_L[TN_i]
TN_found=1
elif (TN_i<len(ZAMS_T)-1):
TN_i+=1
else:
TN_found=1
elif (TN_logL<ZAMS_L[0]):
logTZAMS=TN_logL*(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])+ZAMS_T[0]-(ZAMS_T[1]-ZAMS_T[0])/(ZAMS_L[1]-ZAMS_L[0])*ZAMS_L[0]
else:
logTZAMS=TN_logL*(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])+ZAMS_T[-2]-(ZAMS_T[-1]-ZAMS_T[-2])/(ZAMS_L[-1]-ZAMS_L[-2])*ZAMS_L[-2]
return logTZAMS
def isochrone_grid(grid_age,grid_z,grid_v):
global isochronesLTN
global isochronesCMD
global isochronesZMS
global isochronesTEF
global age_values
global z_values
if (grid_v=='LTN'):
grid_iso=isochronesLTN[age_values.index(grid_age)][z_values.index(grid_z)]
grid_TN=[]
for grid_i in range(len(array(grid_iso)[:,1])):
grid_TN.append(logTN(10.0**array(grid_iso)[grid_i,1] , array(grid_iso)[grid_i,0]))
return [array(grid_iso)[:,0] , array(grid_TN)]
elif (grid_v=='CMD'):
grid_iso=isochronesCMD[age_values.index(grid_age)][z_values.index(grid_z)]
return [array(grid_iso)[:,0] , array(grid_iso)[:,1]]
elif (grid_v=='ZMS'):
grid_iso=isochronesZMS[age_values.index(grid_age)][z_values.index(grid_z)]
grid_TZ=[]
for grid_i in range(len(array(grid_iso)[:,1])):
grid_TZ.append(logTZ(10.0**array(grid_iso)[grid_i,1] , array(grid_iso)[grid_i,0]))
return [array(grid_iso)[:,0] , array(grid_TZ)]
elif (grid_v=='TEF'):
grid_iso=isochronesTEF[age_values.index(grid_age)][z_values.index(grid_z)]
return [array(grid_iso)[:,0] , array(grid_iso)[:,1]]
def BinaryJob(dist,clrexc,metal,doname,filt,b,expcor):
if (expcor==0):
pass
else:
expcor=1
factor_clrexc=clrexc_multiplier(dist,b,filt,expcor)
fdata=loadtxt('clusters/'+str(doname)+'.txt', skiprows=1)
for i in range(len(fdata)):
fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc
lumin0=[]
tempe0=[]
ident0=[]
for i in range(len(fdata)):
lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )
tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )
if (tempetest>-3.0 and tempetest<3.0):
lumin0.append( lumintest )
tempe0.append( tempetest )
ident0.append(i)
tempeB=[]
luminB=[]
identB=[]
for i in range(len(tempe0)):
if (lumin0[i]<1.0 and lumin0[i]>-1.5):
tempeB.append(tempe0[i])
luminB.append(lumin0[i])
identB.append(ident0[i])
if (len(luminB)>5):
binlow=polyfit(luminB,tempeB,3)
tempeZ=[]
for i in range(len(tempeB)):
tempez=0.0
for j in range(len(binlow)):
tempez+=binlow[j]*luminB[i]**(len(binlow)-1-j)
tempeZ.append(tempeB[i]-tempez)
saveh=histogram(tempeZ,bins=10)
savea=[]
saveb=[]
for i in range(len(saveh[0])):
savea.append(0.5*(saveh[1][i]+saveh[1][i+1]))
saveb.append(saveh[0][i])
limrng=savea[saveb.index(max(saveb))]-0.04*0.5
saveid=[]
for i in range(len(tempeZ)):
if (tempeZ[i]>limrng):
saveid.append(identB[i])
for i in range(len(tempe0)):
if (lumin0[i]>=1.0 or lumin0[i]<=-1.5):
saveid.append(ident0[i])
return [saveid,0.0]
else:
return [[],0.0]
def DoJob(dist,clrexc,metal,doname,filt,b,expcor,bincor,binlist):
global isochronesLTN
global isochronesCMD
global isochronesZMS
global isochronesTEF
global age_values
global z_values
if (expcor==0):
pass
else:
expcor=1
if (bincor==0):
pass
else:
bincor=1
factor_clrexc=clrexc_multiplier(dist,b,filt,expcor)
if (bincor==0):
fdata=loadtxt('clusters/'+str(doname)+'.txt', skiprows=1)
BpRp=[]
for i in range(len(fdata)):
BpRp.append(0.0+fdata[i][1])
fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc
lumin0=[]
tempe0=[]
count0=[]
Gmag0=[]
Gmag1=[]
Gmag2=[]
BpRp0=[]
BpRp1=[]
AuxBC=[]
AuxTT=[]
AuxTZ=[]
for i in range(len(fdata)):
lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )
tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )
if (tempetest>-3.0 and tempetest<3.0 and lumintest>-5.0 and lumintest<5.0):
lumin0.append( lumintest )
tempe0.append( tempetest )
count0.append( 1.0 )
Gmag0.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor))
Gmag1.append(fdata[i][0])
Gmag2.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor)+5*log10(dist)-5.0)
BpRp0.append(fdata[i][1])
BpRp1.append(BpRp[i])
AuxBC.append(BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTT.append(Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTZ.append(logTZ( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ))
else:
fdata=loadtxt(str(doname)+'.txt', skiprows=1)
BpRp=[]
for i in binlist:
BpRp.append(0.0+fdata[i][1])
fdata[i][1]=fdata[i][1]-clrexc*factor_clrexc
lumin0=[]
tempe0=[]
count0=[]
Gmag0=[]
Gmag1=[]
Gmag2=[]
BpRp0=[]
BpRp1=[]
AuxBC=[]
AuxTT=[]
AuxTZ=[]
for i in binlist:
lumintest=( logL( absmag(clrexc,fdata[i][0],dist,filt,b,expcor) , BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) ) )
tempetest=( logTN( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ) )
if (tempetest>-3.0 and tempetest<3.0 and lumintest>-5.0 and lumintest<5.0):
lumin0.append( lumintest )
tempe0.append( tempetest )
count0.append( 1.0 )
Gmag0.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor))
Gmag1.append(fdata[i][0])
Gmag2.append(absmag(clrexc,fdata[i][0],dist,filt,b,expcor)+5*log10(dist)-5.0)
BpRp0.append(fdata[i][1])
BpRp1.append(BpRp[i])
AuxBC.append(BolCorBV(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTT.append(Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt))
AuxTZ.append(logTZ( Teff(fdata[i][1],absmag(clrexc,fdata[i][0],dist,filt,b,expcor),metal,filt) , lumintest ))
fitI=-1
fitJ=-1
fitX=1.0e16
for i in range(len(age_values)):
for j in range(len(z_values)):
tempe1=[]
for l in range(len(tempe0)):
tempe1.append(tempe0[l]+funfind(lumin0[l],temp_corr_grid[i][j][0],temp_corr_grid[i][j][1]))
fitvalues=LstSqr(lumin0,tempe1,count0,isochronesLTN[i][j][0],isochronesLTN[i][j][1])
if (fitvalues<fitX):
fitX=fitvalues
fitI=0+i
fitJ=0+j
AuxSY=[]
for l in range(len(tempe0)):
AuxSY.append(funfind(lumin0[l],temp_corr_grid[fitI][fitJ][0],temp_corr_grid[fitI][fitJ][1]))
tempe0[l]+=funfind(lumin0[l],temp_corr_grid[fitI][fitJ][0],temp_corr_grid[fitI][fitJ][1])
return([age_values[fitI],z_values[fitJ],fitX,[tempe0,lumin0,isochronesLTN[fitI][fitJ][1],isochronesLTN[fitI][fitJ][0],AuxSY,isochronesZMS[fitI][fitJ][1],isochronesTEF[fitI][fitJ][1]],[BpRp0,Gmag0,isochronesCMD[fitI][fitJ][1],isochronesCMD[fitI][fitJ][0],BpRp1,Gmag1,Gmag2,AuxBC,AuxTT,AuxTZ]])
debugTest=False
photosystem,age_step,z_step,Nredd,Niter,redAdj=initialise()
print('Loading list of clusters ...')
try:
pathfile=os.getcwd()
os.chdir(pathfile)
with open("clusters/_complete.txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
dataini=array([list(map(str,x)) for x in all_data[1:]])
except FileNotFoundError:
pathfile=os.path.dirname(__file__)
os.chdir(pathfile)
with open("clusters/_complete.txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
dataini=array([list(map(str,x)) for x in all_data[1:]])
clust_list=[]
ini_D_list=[]
ini_E_list=[]
exp_A_list=[]
exp_Z_list=[]
par_b_list=[]
for i in range(len(dataini)):
clust_list.append(dataini[i][0])
ini_D_list.append(float(dataini[i][3]))
ini_E_list.append(float(dataini[i][4]))
par_b_list.append(float(dataini[i][1]))
exp_A_list.append(-999)
exp_Z_list.append(-999)
print('Preparing isochrone grid ...')
with open("ZAMS_014.txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
ZAMS=array([list(map(float,x)) for x in all_data[0:]])
with open("isochrones"+photosystem+".txt","r") as f_data:
all_data=[x.split() for x in f_data.readlines()]
isochrones_complete=array([list(map(str,x)) for x in all_data[13:]])
f_data=[]
all_data=[]
age_values=[6.6]
age_last=10.0
while (age_values[-1]!=age_last):
age_values.append(round(age_values[-1]+age_step , 1))
z_values=[0.005]
z_last=0.040
while (z_values[-1]!=z_last):
z_values.append(round(z_values[-1]+z_step , 3))
isochronesLTN=[]
isochronesCMD=[]
isochronesZMS=[]
isochronesTEF=[]
for i in range(len(age_values)):
isohelp1=[]
isohelp2=[]
isohelp3=[]
isohelp4=[]
for j in range(len(z_values)):
isohelp1.append([])
isohelp2.append([])
isohelp3.append([])
isohelp4.append([])
isochronesLTN.append(isohelp1)
isochronesCMD.append(isohelp2)
isochronesZMS.append(isohelp3)
isochronesTEF.append(isohelp4)
for i in range(len(isochrones_complete)):
if (age_values.count(round(float(isochrones_complete[i][2]),1))==1 and z_values.count(round(float(isochrones_complete[i][0]),3))==1):
age_idx=age_values.index(round(float(isochrones_complete[i][2]),1))
z_idx=z_values.index(round(float(isochrones_complete[i][0]),3))
isochronesLTN[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])
isochronesZMS[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])
isochronesTEF[age_idx][z_idx].append([float(isochrones_complete[i][6]) , float(isochrones_complete[i][7])])
if (photosystem=='G'):
isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][28]) , (float(isochrones_complete[i][29])-float(isochrones_complete[i][30]))])
elif (photosystem=='2'):
isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][28]) , (float(isochrones_complete[i][28])-float(isochrones_complete[i][30]))])
elif (photosystem=='J'):
isochronesCMD[age_idx][z_idx].append([float(isochrones_complete[i][30]) , (float(isochrones_complete[i][29])-float(isochrones_complete[i][30]))])
isochrones_complete=[]
for main_i in range(len(age_values)):
for main_j in range(len(z_values)):
isochronesLTN[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'LTN')
isochronesCMD[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'CMD')
isochronesZMS[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'ZMS')
isochronesTEF[main_i][main_j]=isochrone_grid(age_values[main_i],z_values[main_j],'TEF')
temp_corr_grid=[]
for main_i in range(len(age_values)):
temp_corr_help=[]
for main_j in range(len(z_values)):
temp_corr_help.append(sys_temp(age_values[main_i],z_values[main_j],photosystem))
temp_corr_grid.append(temp_corr_help)
print('Starting main procedure ...\n')
start_time = time.time()
for cc in range(len(clust_list)):
try:
start_time_1 = time.time()
iniD=ini_D_list[cc]
iniE=ini_E_list[cc]
iniZ=0.014
clustname=clust_list[cc]+'_'+photosystem
numbD=1
rangD=0.8*iniD
if (iniE<0.0 or Nredd==0):
numbE=10
rangE=[0.010,0.040,0.080,0.125,0.250,0.500,0.750,1.000,1.500,2.000]
elif (Nredd==1):
numbE=0+Nredd
rangE=[iniE]
else:
numbE=0+Nredd
rangE=[]
for main_i in range(numbE):
rangE.append(iniE-redAdj*iniE+2*redAdj*iniE*main_i/float(numbE-1))
valD=iniD-0.0*rangD
result_grid=[]
for main_i in range(numbD):
for main_j in range(numbE):
valE=rangE[main_j]
final_z=0.000+iniZ
res_ini=0.000
check_limit=0
while ((round(res_ini,3)!=round(final_z,3)) and check_limit<Niter):
res_ini=0.000+final_z
final_age,final_z,final_fit,final_data,final_data2 = DoJob(valD,valE,res_ini,clustname,photosystem,par_b_list[cc],0,0,[])
check_limit+=1
result_grid.append([valD,valE,final_age,final_z,final_fit,final_data,final_data2,check_limit])
print(check_limit)
valD+=rangD/float(numbD-1+1)
print(clust_list[cc])
result_grid=array(result_grid)
sorted_result_grid=result_grid[result_grid[:,4].argsort()]
print('\n')
fsave = open('finished/_logfile.txt', "a+")
fsave.write('Cluster: %s \n' % (clust_list[cc]))
fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))
fsave.write('------------------------------------------------------------------------ \n')
for main_i in range(len(sorted_result_grid)):
fsave.write('Parameters: %d %.3f %.1f %.3f ..... fit:%.7f iter:%d \n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4],sorted_result_grid[main_i][7]))
fsave.write('# FINISHED (%.3f min) \n\n' % ((time.time() - start_time_1)/60.0))
fsave.close()
fitcurves=['r-','g--','b:']
fitpoints=['ko','ko','ko']
plt.figure(figsize=(12,6))
for main_i in range(len(fitcurves)):
try:
plt.subplot(2,3,main_i+1)
plt.plot(sorted_result_grid[main_i][5][0],sorted_result_grid[main_i][5][1],fitpoints[main_i],ms=4.0,alpha=0.2)
plt.plot(sorted_result_grid[main_i][5][2],sorted_result_grid[main_i][5][3],fitcurves[main_i],alpha=0.6)
if (main_i==0):
plt.xlabel('TN')
plt.ylabel('log L')
else:
plt.xlabel('TN')
plt.xlim( min(sorted_result_grid[main_i][5][0]) - 0.25*(max(sorted_result_grid[main_i][5][0])-min(sorted_result_grid[main_i][5][0])) , max(sorted_result_grid[main_i][5][0]) + 0.25*(max(sorted_result_grid[main_i][5][0])-min(sorted_result_grid[main_i][5][0])) )
plt.ylim( min(sorted_result_grid[main_i][5][1]) - 0.05*(max(sorted_result_grid[main_i][5][1])-min(sorted_result_grid[main_i][5][1])) , max(sorted_result_grid[main_i][5][1]) + 0.05*(max(sorted_result_grid[main_i][5][1])-min(sorted_result_grid[main_i][5][1])) )
plt.title('%d ; %.3f ; %.1f ; %.3f ... %.6f' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))
plt.locator_params(axis='x',nbins=7)
if (debugTest):
fsave = open('finished/'+str(clustname)+'_aux_R'+str(main_i+1)+'_isoch.txt', "w+")
fsave.write('Cluster: %s \n' % (clustname))
fsave.write('Parameters: %d ; %.3f ; %.1f ; %.3f ... %.6f \n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))
fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))
fsave.write('---------------------------------------------------------------- \n')
fsave.write('%7s %7s %7s %7s \n' % ('T_eff','T_ZAMS','T_N','logL'))
for main_j in range(len(sorted_result_grid[main_i][5][2])):
fsave.write('%7.4f %7.4f %7.4f %7.3f \n' % (sorted_result_grid[main_i][5][6][main_j],sorted_result_grid[main_i][5][5][main_j],sorted_result_grid[main_i][5][2][main_j],sorted_result_grid[main_i][5][3][main_j]))
fsave.close()
except IndexError:
if ((Nredd==1 and main_i>0)==False):
print('err')
try:
plt.subplot(2,3,main_i+4)
plt.plot(sorted_result_grid[main_i][6][0],sorted_result_grid[main_i][6][1],fitpoints[main_i],ms=4.0,alpha=0.2)
plt.plot(sorted_result_grid[main_i][6][2],sorted_result_grid[main_i][6][3],fitcurves[main_i],alpha=0.6)
if (main_i==0):
if (photosystem=='G'):
plt.xlabel('(BP-RP)_0 [mag]')
plt.ylabel('M_G [mag]')
elif (photosystem=='2'):
plt.xlabel('(J-Ks)_0 [mag]')
plt.ylabel('M_J [mag]')
elif (photosystem=='J'):
plt.xlabel('(B-V)_0 [mag]')
plt.ylabel('M_V [mag]')
else:
if (photosystem=='G'):
plt.xlabel('(BP-RP)_0 [mag]')
elif (photosystem=='2'):
plt.xlabel('(J-Ks)_0 [mag]')
elif (photosystem=='J'):
plt.xlabel('(B-V)_0 [mag]')
plt.xlim( min(sorted_result_grid[main_i][6][0]) - 0.10*(max(sorted_result_grid[main_i][6][0])-min(sorted_result_grid[main_i][6][0])) , max(sorted_result_grid[main_i][6][0]) + 0.10*(max(sorted_result_grid[main_i][6][0])-min(sorted_result_grid[main_i][6][0])) )
plt.ylim( max(sorted_result_grid[main_i][6][1]) + 0.10*(max(sorted_result_grid[main_i][6][1])-min(sorted_result_grid[main_i][6][1])) , min(sorted_result_grid[main_i][6][1]) - 0.10*(max(sorted_result_grid[main_i][6][1])-min(sorted_result_grid[main_i][6][1])) )
if (debugTest):
fsave = open('finished/'+str(clustname)+'_aux_R'+str(main_i+1)+'_clust.txt', "w+")
fsave.write('Cluster: %s \n' % (clustname))
fsave.write('Parameters: %d ; %.3f ; %.1f ; %.3f ... %.6f \n' % (int(sorted_result_grid[main_i][0]),sorted_result_grid[main_i][1],sorted_result_grid[main_i][2],sorted_result_grid[main_i][3],sorted_result_grid[main_i][4]))
fsave.write('Inputs: %s ; %f ; %f ; %d ; %f ; %d \n' % (photosystem,age_step,z_step,Nredd,redAdj,Niter))
fsave.write('---------------------------------------------------------------- \n')
fsave.write('%6s %6s %6s %6s %6s %6s %7s %7s %7s %7s %7s \n' % ('Color','Color0','Mag','Mag0','AbsMag','BC_Mag','T_eff','T_ZAMS','T_corr','T_N','logL'))
for main_j in range(len(sorted_result_grid[main_i][5][0])):
fsave.write('%6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %7.4f %7.4f %7.4f %7.4f %7.3f \n' % (sorted_result_grid[main_i][6][4][main_j],sorted_result_grid[main_i][6][0][main_j],sorted_result_grid[main_i][6][5][main_j],sorted_result_grid[main_i][6][6][main_j],sorted_result_grid[main_i][6][1][main_j],sorted_result_grid[main_i][6][7][main_j],log10(sorted_result_grid[main_i][6][8][main_j]),sorted_result_grid[main_i][6][9][main_j],sorted_result_grid[main_i][5][4][main_j],sorted_result_grid[main_i][5][0][main_j],sorted_result_grid[main_i][5][1][main_j]))
fsave.close()
except IndexError:
if ((Nredd==1 and main_i>0)==False):
print('err')
plt.tight_layout()
plt.savefig('finished/'+str(clustname)+'.png',dpi=300,bbox_inches="tight")
plt.close()
except OSError:
print('no file: %s' % (clustname))
isochrones=[]
print('\n')
print('Finished!')
print("Runtime: %s min" % ((time.time() - start_time)/60.0)) | true | true |
1c326c1be1fa65ae94e336626f666e99c0ffbc37 | 2,281 | py | Python | dialogue-engine/test/programytest/parser/template/graph_tests/test_program.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 104 | 2020-03-30T09:40:00.000Z | 2022-03-06T22:34:25.000Z | dialogue-engine/test/programytest/parser/template/graph_tests/test_program.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 25 | 2020-06-12T01:36:35.000Z | 2022-02-19T07:30:44.000Z | dialogue-engine/test/programytest/parser/template/graph_tests/test_program.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 10 | 2020-04-02T23:43:56.000Z | 2021-05-14T13:47:01.000Z | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.program import TemplateProgramNode
from programy.parser.exceptions import ParserException
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphProgramTests(TemplateGraphTestClient):
def test_program_node_from_xml(self):
template = ET.fromstring("""
<template>
<program />
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateProgramNode)
def test_program_with_childrenl(self):
template = ET.fromstring("""
<template>
<program>Text</program>
</template>
""")
with self.assertRaises(ParserException):
self._graph.parse_template_expression(template)
| 44.72549 | 126 | 0.737396 | import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.program import TemplateProgramNode
from programy.parser.exceptions import ParserException
from programytest.parser.template.graph_tests.graph_test_client import TemplateGraphTestClient
class TemplateGraphProgramTests(TemplateGraphTestClient):
def test_program_node_from_xml(self):
template = ET.fromstring("""
<template>
<program />
</template>
""")
root = self._graph.parse_template_expression(template)
self.assertIsNotNone(root)
self.assertIsInstance(root, TemplateNode)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 1)
node = root.children[0]
self.assertIsNotNone(node)
self.assertIsInstance(node, TemplateProgramNode)
def test_program_with_childrenl(self):
template = ET.fromstring("""
<template>
<program>Text</program>
</template>
""")
with self.assertRaises(ParserException):
self._graph.parse_template_expression(template)
| true | true |
1c326c78d52342aca0f9514217bc8d7c8987413f | 685 | py | Python | ynab_sdk/api/accounts.py | csadorf/ynab-sdk-python | 845d3798e44bbc27261f73975089d8e19366e8e8 | [
"Apache-2.0"
] | 19 | 2019-05-04T00:26:31.000Z | 2021-09-01T07:31:20.000Z | ynab_sdk/api/accounts.py | csadorf/ynab-sdk-python | 845d3798e44bbc27261f73975089d8e19366e8e8 | [
"Apache-2.0"
] | 100 | 2019-04-25T00:27:57.000Z | 2022-03-31T15:24:45.000Z | ynab_sdk/api/accounts.py | csadorf/ynab-sdk-python | 845d3798e44bbc27261f73975089d8e19366e8e8 | [
"Apache-2.0"
] | 7 | 2019-11-13T06:30:06.000Z | 2022-03-25T15:37:53.000Z | from ynab_sdk.api.models.responses.account import AccountResponse
from ynab_sdk.api.models.responses.accounts import AccountsResponse
from ynab_sdk.utils.clients.base_client import BaseClient
class AccountsApi:
def __init__(self, client: BaseClient):
self.client = client
def get_accounts(self, budget_id: str) -> AccountsResponse:
response = self.client.get(f"/budgets/{budget_id}/accounts")
return AccountsResponse.from_dict(response)
def get_account(self, budget_id: str, account_id: str) -> AccountResponse:
response = self.client.get(f"/budgets/{budget_id}/accounts/{account_id}")
return AccountResponse.from_dict(response)
| 40.294118 | 81 | 0.753285 | from ynab_sdk.api.models.responses.account import AccountResponse
from ynab_sdk.api.models.responses.accounts import AccountsResponse
from ynab_sdk.utils.clients.base_client import BaseClient
class AccountsApi:
def __init__(self, client: BaseClient):
self.client = client
def get_accounts(self, budget_id: str) -> AccountsResponse:
response = self.client.get(f"/budgets/{budget_id}/accounts")
return AccountsResponse.from_dict(response)
def get_account(self, budget_id: str, account_id: str) -> AccountResponse:
response = self.client.get(f"/budgets/{budget_id}/accounts/{account_id}")
return AccountResponse.from_dict(response)
| true | true |
1c326d2cf6bfa0dfe105f44ccdd7ffff3833f586 | 8,432 | py | Python | Part2/build_model.py | KernelA/ai-lab-test | 3dda3d7879ec77a37fd70be3da8d92cf0a74321c | [
"MIT"
] | null | null | null | Part2/build_model.py | KernelA/ai-lab-test | 3dda3d7879ec77a37fd70be3da8d92cf0a74321c | [
"MIT"
] | null | null | null | Part2/build_model.py | KernelA/ai-lab-test | 3dda3d7879ec77a37fd70be3da8d92cf0a74321c | [
"MIT"
] | null | null | null | import gzip
import json
import logging
import os
import pickle
import re
import string
import unicodedata
import sys
from typing import Dict, Any
from nltk.tokenize import TweetTokenizer
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
FORMAT = "%(asctime)s %(name)s [%(levelname)s] {%(funcName)s} %(message)s"
logging.basicConfig(format=FORMAT)
RUSSIAN_LETTERS = frozenset("абвгдежзийклмнопрстуфхцчшщъыьэюя")
PUNCTS = frozenset(string.punctuation)
PUNCT_WITHOUT_DASH_REGEX = "[{}]+".format("".join(PUNCTS - set("-")))
PUNCT_REGEX = f"[{string.punctuation}]+"
REPLACE_REGEX = "\[(.+?)\]"
REPLACE_TARGET = {"male": 1, "female": -1}
DUMP_DIR = "dumps"
if not os.path.exists(os.path.join(".", DUMP_DIR)):
os.mkdir(os.path.join(".", DUMP_DIR))
AUTHOR_GENDERS_DUMP = os.path.join(".", DUMP_DIR, "author-genders.pickle")
AUTHOR_TEST_DUMP = os.path.join(".", DUMP_DIR, "author-tests.pickle")
def save_pickle_dump(path_to_dump: str, obj):
LOGGER.info("Save a dump to: {}".format(path_to_dump))
with open(path_to_dump, "wb") as dump_file:
pickle.dump(obj, dump_file)
def load_pickle_dump(path_to_dump: str):
LOGGER.info("Load a dump from: {}".format(path_to_dump))
with open(path_to_dump, "rb") as dump_file:
obj = pickle.load(dump_file)
return obj
def load_genders(path_to_gzip: str) -> Dict[int, str]:
if os.path.exists(AUTHOR_GENDERS_DUMP):
return load_pickle_dump(AUTHOR_GENDERS_DUMP)
else:
with gzip.open(path_to_gzip, "rt", encoding="utf-8") as file:
lines = tuple(map(json.loads, file.readlines()))
res = {line["author"]: line["gender"] for line in lines}
save_pickle_dump(AUTHOR_GENDERS_DUMP, res)
LOGGER.info("Total authors: {}".format(len(lines)))
LOGGER.info("Total unique authors: {}".format(len(res)))
LOGGER.info("Total count = unique count {}".format(len(res) == len(lines)))
return res
def load_test_authors(path_to_gzip: str) -> set:
if os.path.exists(AUTHOR_TEST_DUMP):
return load_pickle_dump(AUTHOR_TEST_DUMP)
else:
with gzip.open(path_to_gzip, "rt", encoding="utf-8") as file:
lines = map(json.loads, file.readlines())
res = {line["author"] for line in lines}
save_pickle_dump(AUTHOR_TEST_DUMP, res)
return res
def extract_features(author_posts: Dict[str, Any]) -> Dict[str, float]:
words = author_posts["words"]
def normalize(features_dict: Dict[str, float], *keys, norm_key: str):
for key in keys:
features_dict[key] = 0 if features_dict[norm_key] == 0 else features_dict[key] / features_dict[norm_key]
features = {
"tot_char": 0,
"tot_punct": 0,
"ratio_lc": 0,
"ratio_uc": 0,
"ratio_comma": 0,
"ratio_colon": 0,
"ratio_semicolon": 0,
"ratio_question": 0,
"ratio_exclam": 0,
"ratio_period": 0,
"ratio_left_brace": 0,
"ratio_right_brace": 0,
"tot_words": 0,
"avg_char_per_word": 0
}
features["author"] = author_posts["author"]
for word in words:
features["tot_char"] += len(word)
if re.fullmatch(f"[a-я][a-я-]*[a-я]", word, re.IGNORECASE) is not None:
for char in word:
if char.isalpha():
if char.islower():
features["ratio_lc"] += 1
else:
features["ratio_uc"] += 1
features["tot_words"] += 1
features["avg_char_per_word"] += len(word)
else:
for char in word:
if char in PUNCTS:
features["tot_punct"] += 1
if char == "(":
features["ratio_left_brace"] += 1
elif char == ")":
features["ratio_right_brace"] += 1
elif char == ",":
features["ratio_comma"] += 1
elif char == ":":
features["ratio_colon"] += 1
elif char == ";":
features["ratio_semicolon"] += 1
elif char == ".":
features["ratio_period"] += 1
elif char == "?":
features["ratio_question"] += 1
elif char == "!":
features["ratio_exclam"] += 1
features["ratio_right_brace"] = features["ratio_right_brace"] - features["ratio_left_brace"]
if features["ratio_right_brace"] < 0:
features["ratio_right_brace"] = 0
features["ratio_left_brace"] = features["ratio_left_brace"] - features["ratio_right_brace"]
if features["ratio_left_brace"] < 0:
features["ratio_left_brace"] = 0
normalize(features, "ratio_lc", "ratio_uc", norm_key="tot_char")
normalize(features, "ratio_comma", "ratio_colon", "ratio_semicolon", "ratio_question",
"ratio_exclam", "ratio_period", "ratio_left_brace", "ratio_right_brace", norm_key="tot_punct")
normalize(features, "avg_char_per_word", norm_key="tot_words")
return features
def is_word_or_punct(word: str):
# Слова
if re.fullmatch(f"[a-я][a-я-]*[a-я]", word, re.IGNORECASE) is not None:
return True
# Только пунктуация
elif re.fullmatch(PUNCT_REGEX, word, re.IGNORECASE) is not None:
return True
else:
False
def extract_test_train_features(path_to_compressed_file: str,
path_to_features_train: str,
path_to_features_test: str,
authors_with_genders: Dict[int, str],
authors_test: set):
tokenizer = TweetTokenizer(preserve_case=True, reduce_len=True)
with gzip.open(path_to_compressed_file, "rt", encoding="utf-8") as file_in\
, open(path_to_features_train, "w", encoding="utf-8") as file_train\
, open(path_to_features_test, "w", encoding="utf-8") as file_test:
for i, line in enumerate(map(json.loads, file_in)):
author_id = line["author"]
if author_id in authors_with_genders or author_id in authors_test:
str_line = line["text"].strip().replace("ё", "е")
str_line = "".join(char for char in unicodedata.normalize("NFD", str_line) if unicodedata.category(char) != "Mn")
str_line = re.sub(REPLACE_REGEX, "", str_line)
cleaned_words = tuple(filter(is_word_or_punct, tokenizer.tokenize(str_line)))
features = extract_features({"author": author_id, "words": cleaned_words})
features_line = ("{key}:{val}".format(key=key, val=features[key]) for key in features if
key != "gender" and key != "author")
target = -1
if author_id in authors_with_genders and 5 <= len(cleaned_words):
target = REPLACE_TARGET[authors_with_genders[author_id]]
elif author_id in authors_test:
target = 1
res_line = "{} |num {} |add author={}\n".format(target, " ".join(features_line), author_id)
if author_id in authors_with_genders and 5 <= len(cleaned_words):
file_train.write(res_line)
elif author_id in authors_test:
file_test.write(res_line)
if __name__ == "__main__":
path_to_genders_raw = os.path.join(".", "public.jsonlines.gz")
if not os.path.exists(path_to_genders_raw):
print("Path '{}' does not exist.".format(path_to_genders_raw))
sys.exit(1)
author_with_genders = load_genders(path_to_genders_raw)
path_to_authors_test = os.path.join(".", "private.jsonlines.gz")
if not os.path.exists(path_to_authors_test):
print("Path '{}' does not exist.".format(path_to_authors_test))
sys.exit(1)
test_authors = load_test_authors(path_to_authors_test)
path_to_post_raw = os.path.join(".", "messages.jsonlines.gz")
if not os.path.exists(path_to_post_raw):
print("Path '{}' does not exist.".format(path_to_post_raw))
sys.exit(1)
path_to_train_features = os.path.join(".", "train.vw")
path_to_test_features = os.path.join(".", "test.vw")
extract_test_train_features(path_to_post_raw, path_to_train_features, path_to_test_features
, author_with_genders, test_authors)
| 35.280335 | 129 | 0.603534 | import gzip
import json
import logging
import os
import pickle
import re
import string
import unicodedata
import sys
from typing import Dict, Any
from nltk.tokenize import TweetTokenizer
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.DEBUG)
FORMAT = "%(asctime)s %(name)s [%(levelname)s] {%(funcName)s} %(message)s"
logging.basicConfig(format=FORMAT)
RUSSIAN_LETTERS = frozenset("абвгдежзийклмнопрстуфхцчшщъыьэюя")
PUNCTS = frozenset(string.punctuation)
PUNCT_WITHOUT_DASH_REGEX = "[{}]+".format("".join(PUNCTS - set("-")))
PUNCT_REGEX = f"[{string.punctuation}]+"
REPLACE_REGEX = "\[(.+?)\]"
REPLACE_TARGET = {"male": 1, "female": -1}
DUMP_DIR = "dumps"
if not os.path.exists(os.path.join(".", DUMP_DIR)):
os.mkdir(os.path.join(".", DUMP_DIR))
AUTHOR_GENDERS_DUMP = os.path.join(".", DUMP_DIR, "author-genders.pickle")
AUTHOR_TEST_DUMP = os.path.join(".", DUMP_DIR, "author-tests.pickle")
def save_pickle_dump(path_to_dump: str, obj):
LOGGER.info("Save a dump to: {}".format(path_to_dump))
with open(path_to_dump, "wb") as dump_file:
pickle.dump(obj, dump_file)
def load_pickle_dump(path_to_dump: str):
LOGGER.info("Load a dump from: {}".format(path_to_dump))
with open(path_to_dump, "rb") as dump_file:
obj = pickle.load(dump_file)
return obj
def load_genders(path_to_gzip: str) -> Dict[int, str]:
if os.path.exists(AUTHOR_GENDERS_DUMP):
return load_pickle_dump(AUTHOR_GENDERS_DUMP)
else:
with gzip.open(path_to_gzip, "rt", encoding="utf-8") as file:
lines = tuple(map(json.loads, file.readlines()))
res = {line["author"]: line["gender"] for line in lines}
save_pickle_dump(AUTHOR_GENDERS_DUMP, res)
LOGGER.info("Total authors: {}".format(len(lines)))
LOGGER.info("Total unique authors: {}".format(len(res)))
LOGGER.info("Total count = unique count {}".format(len(res) == len(lines)))
return res
def load_test_authors(path_to_gzip: str) -> set:
if os.path.exists(AUTHOR_TEST_DUMP):
return load_pickle_dump(AUTHOR_TEST_DUMP)
else:
with gzip.open(path_to_gzip, "rt", encoding="utf-8") as file:
lines = map(json.loads, file.readlines())
res = {line["author"] for line in lines}
save_pickle_dump(AUTHOR_TEST_DUMP, res)
return res
def extract_features(author_posts: Dict[str, Any]) -> Dict[str, float]:
words = author_posts["words"]
def normalize(features_dict: Dict[str, float], *keys, norm_key: str):
for key in keys:
features_dict[key] = 0 if features_dict[norm_key] == 0 else features_dict[key] / features_dict[norm_key]
features = {
"tot_char": 0,
"tot_punct": 0,
"ratio_lc": 0,
"ratio_uc": 0,
"ratio_comma": 0,
"ratio_colon": 0,
"ratio_semicolon": 0,
"ratio_question": 0,
"ratio_exclam": 0,
"ratio_period": 0,
"ratio_left_brace": 0,
"ratio_right_brace": 0,
"tot_words": 0,
"avg_char_per_word": 0
}
features["author"] = author_posts["author"]
for word in words:
features["tot_char"] += len(word)
if re.fullmatch(f"[a-я][a-я-]*[a-я]", word, re.IGNORECASE) is not None:
for char in word:
if char.isalpha():
if char.islower():
features["ratio_lc"] += 1
else:
features["ratio_uc"] += 1
features["tot_words"] += 1
features["avg_char_per_word"] += len(word)
else:
for char in word:
if char in PUNCTS:
features["tot_punct"] += 1
if char == "(":
features["ratio_left_brace"] += 1
elif char == ")":
features["ratio_right_brace"] += 1
elif char == ",":
features["ratio_comma"] += 1
elif char == ":":
features["ratio_colon"] += 1
elif char == ";":
features["ratio_semicolon"] += 1
elif char == ".":
features["ratio_period"] += 1
elif char == "?":
features["ratio_question"] += 1
elif char == "!":
features["ratio_exclam"] += 1
features["ratio_right_brace"] = features["ratio_right_brace"] - features["ratio_left_brace"]
if features["ratio_right_brace"] < 0:
features["ratio_right_brace"] = 0
features["ratio_left_brace"] = features["ratio_left_brace"] - features["ratio_right_brace"]
if features["ratio_left_brace"] < 0:
features["ratio_left_brace"] = 0
normalize(features, "ratio_lc", "ratio_uc", norm_key="tot_char")
normalize(features, "ratio_comma", "ratio_colon", "ratio_semicolon", "ratio_question",
"ratio_exclam", "ratio_period", "ratio_left_brace", "ratio_right_brace", norm_key="tot_punct")
normalize(features, "avg_char_per_word", norm_key="tot_words")
return features
def is_word_or_punct(word: str):
if re.fullmatch(f"[a-я][a-я-]*[a-я]", word, re.IGNORECASE) is not None:
return True
elif re.fullmatch(PUNCT_REGEX, word, re.IGNORECASE) is not None:
return True
else:
False
def extract_test_train_features(path_to_compressed_file: str,
path_to_features_train: str,
path_to_features_test: str,
authors_with_genders: Dict[int, str],
authors_test: set):
tokenizer = TweetTokenizer(preserve_case=True, reduce_len=True)
with gzip.open(path_to_compressed_file, "rt", encoding="utf-8") as file_in\
, open(path_to_features_train, "w", encoding="utf-8") as file_train\
, open(path_to_features_test, "w", encoding="utf-8") as file_test:
for i, line in enumerate(map(json.loads, file_in)):
author_id = line["author"]
if author_id in authors_with_genders or author_id in authors_test:
str_line = line["text"].strip().replace("ё", "е")
str_line = "".join(char for char in unicodedata.normalize("NFD", str_line) if unicodedata.category(char) != "Mn")
str_line = re.sub(REPLACE_REGEX, "", str_line)
cleaned_words = tuple(filter(is_word_or_punct, tokenizer.tokenize(str_line)))
features = extract_features({"author": author_id, "words": cleaned_words})
features_line = ("{key}:{val}".format(key=key, val=features[key]) for key in features if
key != "gender" and key != "author")
target = -1
if author_id in authors_with_genders and 5 <= len(cleaned_words):
target = REPLACE_TARGET[authors_with_genders[author_id]]
elif author_id in authors_test:
target = 1
res_line = "{} |num {} |add author={}\n".format(target, " ".join(features_line), author_id)
if author_id in authors_with_genders and 5 <= len(cleaned_words):
file_train.write(res_line)
elif author_id in authors_test:
file_test.write(res_line)
if __name__ == "__main__":
path_to_genders_raw = os.path.join(".", "public.jsonlines.gz")
if not os.path.exists(path_to_genders_raw):
print("Path '{}' does not exist.".format(path_to_genders_raw))
sys.exit(1)
author_with_genders = load_genders(path_to_genders_raw)
path_to_authors_test = os.path.join(".", "private.jsonlines.gz")
if not os.path.exists(path_to_authors_test):
print("Path '{}' does not exist.".format(path_to_authors_test))
sys.exit(1)
test_authors = load_test_authors(path_to_authors_test)
path_to_post_raw = os.path.join(".", "messages.jsonlines.gz")
if not os.path.exists(path_to_post_raw):
print("Path '{}' does not exist.".format(path_to_post_raw))
sys.exit(1)
path_to_train_features = os.path.join(".", "train.vw")
path_to_test_features = os.path.join(".", "test.vw")
extract_test_train_features(path_to_post_raw, path_to_train_features, path_to_test_features
, author_with_genders, test_authors)
| true | true |
1c326d45a20d612aabe7081c9482a2bf8c5d9ead | 377 | py | Python | mainApp/migrations/0008_auto_20191221_1544.py | GauravJain98/Softwars | 10625a90599f834f785a64586bd736dec84fd25b | [
"MIT"
] | null | null | null | mainApp/migrations/0008_auto_20191221_1544.py | GauravJain98/Softwars | 10625a90599f834f785a64586bd736dec84fd25b | [
"MIT"
] | 5 | 2021-03-19T12:47:20.000Z | 2021-09-22T19:42:53.000Z | mainApp/migrations/0008_auto_20191221_1544.py | GauravJain98/Softwars | 10625a90599f834f785a64586bd736dec84fd25b | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2019-12-21 15:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainApp', '0007_auto_20191215_2009'),
]
operations = [
migrations.RenameField(
model_name='problemstatement',
old_name='youtube_link',
new_name='youtube',
),
]
| 20.944444 | 47 | 0.604775 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainApp', '0007_auto_20191215_2009'),
]
operations = [
migrations.RenameField(
model_name='problemstatement',
old_name='youtube_link',
new_name='youtube',
),
]
| true | true |
1c326e4b8b37ac6f1f9cb4aae8e43bdf93dddc0a | 642 | py | Python | multiProcess/threads/multilock1.py | bruce1408/detectron2_modify | 815df8c3cd68b1450e039fbafc27c6ab302d620b | [
"Apache-2.0"
] | null | null | null | multiProcess/threads/multilock1.py | bruce1408/detectron2_modify | 815df8c3cd68b1450e039fbafc27c6ab302d620b | [
"Apache-2.0"
] | null | null | null | multiProcess/threads/multilock1.py | bruce1408/detectron2_modify | 815df8c3cd68b1450e039fbafc27c6ab302d620b | [
"Apache-2.0"
] | null | null | null | import threading
import time
num = 100
def fun_sub():
global num
lock.acquire()
print('----加锁----')
print('现在操作共享资源的线程名字是:', t.name)
num2 = num
time.sleep(0.001)
num = num2-1
lock.release()
print('----释放锁----')
if __name__ == '__main__':
print('开始测试同步锁 at %s' % time.ctime())
lock = threading.Lock() # 创建一把同步锁
thread_list = []
for thread in range(100):
t = threading.Thread(target=fun_sub)
t.start()
thread_list.append(t)
for t in thread_list:
t.join() # 所有子线程都结束了, 主线程才结束
print('num is %d' % num)
print('结束测试同步锁 at %s' % time.ctime())
| 18.882353 | 44 | 0.565421 | import threading
import time
num = 100
def fun_sub():
global num
lock.acquire()
print('----加锁----')
print('现在操作共享资源的线程名字是:', t.name)
num2 = num
time.sleep(0.001)
num = num2-1
lock.release()
print('----释放锁----')
if __name__ == '__main__':
print('开始测试同步锁 at %s' % time.ctime())
lock = threading.Lock()
thread_list = []
for thread in range(100):
t = threading.Thread(target=fun_sub)
t.start()
thread_list.append(t)
for t in thread_list:
t.join()
print('num is %d' % num)
print('结束测试同步锁 at %s' % time.ctime())
| true | true |
1c32706216a59ee6d52a4376463befff7c87cc1c | 1,286 | py | Python | test_app.py | MLH-Fellowship/0.5.1-howDoIDiscord | 5bb52e17635f6742dbccd8044435dddd82a3bb93 | [
"MIT"
] | 2 | 2020-06-05T17:31:17.000Z | 2020-06-05T21:32:48.000Z | test_app.py | MLH-Fellowship/0.5.1-howDoIDiscord | 5bb52e17635f6742dbccd8044435dddd82a3bb93 | [
"MIT"
] | 13 | 2020-06-02T15:34:43.000Z | 2020-06-23T23:25:25.000Z | test_app.py | MLH-Fellowship/0.5.1-howDoIDiscord | 5bb52e17635f6742dbccd8044435dddd82a3bb93 | [
"MIT"
] | null | null | null | import pytest
import requests
import json
import urllib.parse
import subprocess
def callHowDoI(testQuery):
# x = requests.post("http://localhost:5000/test", params={"testquery": testQuery})
cmd = "http -f POST http://localhost:5000/test?testquery={}".format(urllib.parse.quote(testQuery))
res = subprocess.check_output(cmd, shell=True)
# get rid of the b' on the front of the string
res = res.decode('utf-8')
jsonRes = json.loads(res)
return jsonRes["status"]
def test_func():
testQueries = [
{"query":"howdoi get gmail","shouldFail": False},
{"query":"howdoi learn java","shouldFail": False},
{"query":"howdoi compile c code","shouldFail": False},
{"query":"howdii run faster","shouldFail": True},
{"query":"howdoi exit vim","shouldFail": False},
{"query":"when is half life 3 coming out","shouldFail": True},
{"query":"howdoi install gentoo","shouldFail": False},
{"query":"h``owdoi love vim","shouldFail": True},
{"query":"-ho[wdoi love vim","shouldFail": True}
]
for test in testQueries:
response = callHowDoI(test["query"])
if test["shouldFail"]:
assert response == "error"
else:
assert response == "success"
| 34.756757 | 102 | 0.618974 | import pytest
import requests
import json
import urllib.parse
import subprocess
def callHowDoI(testQuery):
cmd = "http -f POST http://localhost:5000/test?testquery={}".format(urllib.parse.quote(testQuery))
res = subprocess.check_output(cmd, shell=True)
res = res.decode('utf-8')
jsonRes = json.loads(res)
return jsonRes["status"]
def test_func():
testQueries = [
{"query":"howdoi get gmail","shouldFail": False},
{"query":"howdoi learn java","shouldFail": False},
{"query":"howdoi compile c code","shouldFail": False},
{"query":"howdii run faster","shouldFail": True},
{"query":"howdoi exit vim","shouldFail": False},
{"query":"when is half life 3 coming out","shouldFail": True},
{"query":"howdoi install gentoo","shouldFail": False},
{"query":"h``owdoi love vim","shouldFail": True},
{"query":"-ho[wdoi love vim","shouldFail": True}
]
for test in testQueries:
response = callHowDoI(test["query"])
if test["shouldFail"]:
assert response == "error"
else:
assert response == "success"
| true | true |
1c3270da277a227601ccd6d01a1407d95545ca58 | 1,842 | py | Python | PygameDeepRLAgent/A3CBootcampGame/ShootingGrounds/Targets.py | Muff1nz/PygameDeepRLAgent | c3e53de99ff847357e27048843ea01fe92fbfd38 | [
"MIT"
] | null | null | null | PygameDeepRLAgent/A3CBootcampGame/ShootingGrounds/Targets.py | Muff1nz/PygameDeepRLAgent | c3e53de99ff847357e27048843ea01fe92fbfd38 | [
"MIT"
] | 4 | 2017-08-25T08:54:05.000Z | 2017-10-26T09:15:18.000Z | PygameDeepRLAgent/A3CBootcampGame/ShootingGrounds/Targets.py | Muff1nz/PygameDeepRLAgent | c3e53de99ff847357e27048843ea01fe92fbfd38 | [
"MIT"
] | 1 | 2019-12-10T01:11:41.000Z | 2019-12-10T01:11:41.000Z | import random
import numpy as np
from A3CBootcampGame.Actor import Actor
class TargetHandler:
def __init__(self, settings, player):
self.settings = settings
self.player = player
self.target = Target(settings, [0,0], "./Assets/Enemy.png")
self.foodSpawnRate = 30
self.spawnRange = [settings.gameRes*0.1, settings.gameRes*0.9]
self.rng = random.Random()
self.spawnTarget()
def update(self, timeStep):
if not self.target.active:
self.spawnTarget()
def randomPos(self):
pos = np.array([self.rng.randrange(self.spawnRange[0], self.spawnRange[1]),
self.rng.randrange(self.spawnRange[0], self.spawnRange[1])])
return pos
def spawnTarget(self):
while True:
pos = self.randomPos()
self.target.spawn(pos)
if not self.boxCollision(self.target, self.player):
break
def boxCollision(self, box1, box2):
if (box1.pos[1] + box1.size <= box2.pos[1] or
box1.pos[1] >= box2.pos[1] + box2.size or
box1.pos[0] + box1.size <= box2.pos[0] or
box1.pos[0] >= box2.pos[0] + box2.size):
return False
return True
def draw(self, screen):
self.target.draw(screen)
def reset(self):
self.target.active = False
class Target(Actor):
def __init__(self, settings, pos, spritePath):
super(Target, self).__init__(settings, spritePath, 0.1)
self.type = "target"
self.active = True
self.pos = pos
def spawn(self, pos):
self.pos = pos
self.active = True
def draw(self, screen):
screen.blit(self.sprite, self.pos)
def onBoxCollision(self, other):
if other.type == "bullet":
self.active = False
| 28.338462 | 84 | 0.582519 | import random
import numpy as np
from A3CBootcampGame.Actor import Actor
class TargetHandler:
def __init__(self, settings, player):
self.settings = settings
self.player = player
self.target = Target(settings, [0,0], "./Assets/Enemy.png")
self.foodSpawnRate = 30
self.spawnRange = [settings.gameRes*0.1, settings.gameRes*0.9]
self.rng = random.Random()
self.spawnTarget()
def update(self, timeStep):
if not self.target.active:
self.spawnTarget()
def randomPos(self):
pos = np.array([self.rng.randrange(self.spawnRange[0], self.spawnRange[1]),
self.rng.randrange(self.spawnRange[0], self.spawnRange[1])])
return pos
def spawnTarget(self):
while True:
pos = self.randomPos()
self.target.spawn(pos)
if not self.boxCollision(self.target, self.player):
break
def boxCollision(self, box1, box2):
if (box1.pos[1] + box1.size <= box2.pos[1] or
box1.pos[1] >= box2.pos[1] + box2.size or
box1.pos[0] + box1.size <= box2.pos[0] or
box1.pos[0] >= box2.pos[0] + box2.size):
return False
return True
def draw(self, screen):
self.target.draw(screen)
def reset(self):
self.target.active = False
class Target(Actor):
def __init__(self, settings, pos, spritePath):
super(Target, self).__init__(settings, spritePath, 0.1)
self.type = "target"
self.active = True
self.pos = pos
def spawn(self, pos):
self.pos = pos
self.active = True
def draw(self, screen):
screen.blit(self.sprite, self.pos)
def onBoxCollision(self, other):
if other.type == "bullet":
self.active = False
| true | true |
1c3272ddf3297908c116e0893bea7b92b2baba48 | 743 | py | Python | pzdump.py | kavol/pzdump | 2e7c57a304b7ad2780aff26b26a710634622dfb8 | [
"MIT"
] | null | null | null | pzdump.py | kavol/pzdump | 2e7c57a304b7ad2780aff26b26a710634622dfb8 | [
"MIT"
] | null | null | null | pzdump.py | kavol/pzdump | 2e7c57a304b7ad2780aff26b26a710634622dfb8 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import argparse
import sys
parser = argparse.ArgumentParser(description="A tool to dump timezone data.")
parser.add_argument("-V", "--version", help="output version information and exit",
action="store_true")
parser.add_argument("-v", "--verbose", help="print lowest and highest possible time and time discontinuities",
action="store_true")
parser.add_argument("-c", "--cutoff", help="cut off verbose output by the given range",
nargs=2, metavar=("loyear", "hiyear"))
parser.add_argument("zonename", help="time zone(s) to dump",
nargs="*", action="append")
args = parser.parse_args()
if args.version:
print("pzdump 0.00")
sys.exit()
| 35.380952 | 110 | 0.644684 |
import argparse
import sys
parser = argparse.ArgumentParser(description="A tool to dump timezone data.")
parser.add_argument("-V", "--version", help="output version information and exit",
action="store_true")
parser.add_argument("-v", "--verbose", help="print lowest and highest possible time and time discontinuities",
action="store_true")
parser.add_argument("-c", "--cutoff", help="cut off verbose output by the given range",
nargs=2, metavar=("loyear", "hiyear"))
parser.add_argument("zonename", help="time zone(s) to dump",
nargs="*", action="append")
args = parser.parse_args()
if args.version:
print("pzdump 0.00")
sys.exit()
| true | true |
1c32733ec0f61da2f97776ffd0055187cdc56853 | 146 | py | Python | yc66/104.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc66/104.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc66/104.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | S = input()
result = 1
for s in S:
if s == 'L':
result = result * 2
elif s == 'R':
result = result * 2 + 1
print(result)
| 14.6 | 31 | 0.465753 | S = input()
result = 1
for s in S:
if s == 'L':
result = result * 2
elif s == 'R':
result = result * 2 + 1
print(result)
| true | true |
1c3273449f87ead7feafe7ed121958c401918fa7 | 7,903 | py | Python | model.py | haoyu0831/Covid-Mobility-Network-Analysis | 8464b0a25db03585219c1fc6d8e257a9ed826628 | [
"MIT"
] | 1 | 2020-09-23T06:08:20.000Z | 2020-09-23T06:08:20.000Z | model.py | hhe-bot/Covid-Mobility-Network-Analysis | 8464b0a25db03585219c1fc6d8e257a9ed826628 | [
"MIT"
] | 1 | 2020-09-23T06:10:29.000Z | 2020-09-25T15:41:33.000Z | model.py | haoyu0831/Covid-Mobility-Network-Analysis | 8464b0a25db03585219c1fc6d8e257a9ed826628 | [
"MIT"
] | null | null | null | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
'''
This function is to generate a graph with data produced by read_file.py
'''
def generate_d_network(dest_cbgs):
G = nx.DiGraph()
# add edges
for i in dest_cbgs:
G.add_edge(*i, weight=dest_cbgs[i])
return G
'''
This function has almost same function as above but generate a undirected Graph
'''
def generate_network(dest_cbgs):
G = nx.Graph()
# add edges
for i, j in dest_cbgs.keys():
if (i, j) not in G.edges:
G.add_edge(i, j, weight=dest_cbgs[i, j])
else:
weight = dest_cbgs[i, j] + G.edges[i, j]['weight']
G.add_edge(i, j, weight=weight)
return G
'''
this function is to generate percolation step of undirected network with threshold
'''
def generate_network_threshold(g, threshold=0):
new_g = nx.Graph()
new_g.add_nodes_from(g.nodes)
edge_list = list(g.edges)
for i, j in edge_list:
weight = g.edges[i, j]['weight']
if weight >= threshold:
new_g.add_edge(i, j, weight=weight)
return new_g
'''
this function is to generate percolation step of directed network with threshold
'''
def generate_d_network_threshold(g, threshold=0):
new_g = nx.Graph()
edge_list = list(g.edges)
for i, j in edge_list:
if g.edges[i, j]['weight'] >= threshold:
new_g.add_edge(i, j)
return new_g
'''
This function is find max weight of a graph
'''
def max_weight(g):
m_weight = 0
for i in g.edges:
weight = g.edges[i[0], i[1]]['weight']
if weight > m_weight:
m_weight = weight
return m_weight
'''
This function is to return the number of elements in the largest and second largest SCC
'''
def num_g_sg(scc):
len_scc = list(map(len, scc))
len_scc.sort()
len_scc.reverse()
if len(len_scc) == 0:
return 0, 0
elif len(len_scc) == 1:
return len_scc[0], 0
else:
return len_scc[0], len_scc[1]
'''
This function finds the largest and second largest before the largest value
'''
def l_sl_value(li):
if len(li) == 0:
return 0,0
l = [i for i, j in enumerate(li) if j == max(li)][0]
sublist = li[:l]
if l == 0:
sl = 0
else:
sl = [i for i, j in enumerate(sublist) if j == max(sublist)][0]
return l, sl
'''
This function is to calculate the number of elements in largest and second largest SCC changing with thresholds
'''
def calc_g_sg(g, start, interval, d1=None, final=100):
node_size = len(g.nodes())
tmp_g = node_size
tmp_t = start
thresholds = []
num_g = []
num_sg = []
dev_g = []
dev_sg = []
num_rest = []
lens=[]
while tmp_g > node_size/final and tmp_g != 1:
tmp_n = generate_network_threshold(g, tmp_t)
lens.append(len(tmp_n.edges))
scc = sorted(list(nx.connected_components(tmp_n)), key=len, reverse=True)
tmp_g, tmp_sg = num_g_sg(scc)
num_g.append(tmp_g)
num_sg.append(tmp_sg)
if len(scc) < 2:
num_rest.append(0)
else:
num_rest.append(sum(map(len, scc[1:]))/(len(scc)-1))
thresholds.append(tmp_t)
if final != 100 and tmp_t > 20:
break
if interval > 1 and tmp_t < 100:
tmp_t += 1
else:
tmp_t += interval
if d1 is None:
continue
if len(scc) != 0:
dev_g.append(sum_device(scc[0], d1))
if len(scc) == 1:
dev_sg.append(0)
else:
dev_sg.append(sum_device(scc[1], d1))
else:
dev_sg.append(0)
dev_g.append(0)
return np.array(thresholds), np.array(num_g), np.array(num_sg), np.array(num_rest), np.array(dev_g), np.array(dev_sg), np.array(lens)
'''
This function calculate the sum of device in GC and SGC
'''
def sum_device(nodes, d1):
s = 0
for i in nodes:
if i in d1.keys():
s += d1[i]
return s
'''
This function is to find the bottleneck by analyzing the threshold around when the second SCC is the largest
'''
def calc_bottleneck(g, thresholds, num_sg):
max_index = [i for i, j in enumerate(num_sg) if j == max(num_sg)][0]
bn_weight_b = thresholds[max_index]
interval = thresholds[1] - thresholds[0]
bn = []
G_sg_largest = generate_network_threshold(g, bn_weight_b)
if type(G_sg_largest) == nx.classes.digraph.DiGraph:
scc = list(nx.strongly_connected_components(G_sg_largest))
else:
scc = list(nx.connected_components(G_sg_largest))
scc.sort(key=len)
scc_sg_largest = scc[-1]
scc_sg_s_largest = scc[-2]
for i, j in g.edges():
if bn_weight_b - interval < g.edges[(i, j)]['weight'] <= bn_weight_b:
if (i in scc_sg_largest and j in scc_sg_s_largest) or (j in scc_sg_largest and i in scc_sg_s_largest):
bn.append((i, j))
return bn, bn_weight_b
'''
This function is to find the bottleneck by analyzing the threshold around when the second SCC is the largest
'''
def calc_bottleneck_c(g, thresholds, qc):
interval = thresholds[1] - thresholds[0]
bn = set()
G_sg_largest = generate_network_threshold(g, qc)
if type(G_sg_largest) == nx.classes.digraph.DiGraph:
scc = list(nx.strongly_connected_components(G_sg_largest))
else:
scc = list(nx.connected_components(G_sg_largest))
scc.sort(key=len)
scc_sg_largest = scc[-1]
if len(scc) == 1:
return set()
scc_sg_s_largest = scc[-2]
for i, j in g.edges():
if qc - interval < g.edges[(i, j)]['weight'] <= qc:
if (i in scc_sg_largest and j in scc_sg_s_largest) or (j in scc_sg_largest and i in scc_sg_s_largest):
bn.add((i, j))
return bn
'''
This function calculates the total flux of a graph
'''
def total_flux(g):
flux = 0
for i in g.edges():
flux += g.edges[i]['weight']
return flux
'''
This function returns latitude and longitude of a point
'''
def get_xy(pt):
return [pt.x, pt.y]
# file = 'data/01/01/2020-01-01-social-distancing.csv.gz'
# G = generate_network(*read_file(file, 25), 10)
# print(num_g_sg(G))
# def bottlenecks(self):
# g_perco_b = generate_network_threshold(self.g, self.qc - .25)
# s_cc = sorted(list(nx.connected_components(self.g_perco)), key=len, reverse=True)[1]
# l_cc = sorted(list(nx.connected_components(g_perco_b)), key=len, reverse=True)[0]
# l_cc = l_cc.difference(s_cc)
#
# bc = set()
#
# for i, j in g_perco_b.edges():
# if self.qc - .25 <= g_perco_b.edges[i, j]['weight'] < self.qc:
# if (i in s_cc and j in l_cc) or (i in l_cc and j in s_cc):
# bc.add((i, j))
#
# return bc
def calc_bn_set_diff(g_b, g):
bn = set()
g_b_1 = g_b.subgraph(sorted(list(nx.connected_components(g_b)), key=len, reverse=True)[0])
g_b_link = set(g_b_1.edges())
tmp = sorted(list(nx.connected_components(g)), key=len, reverse=True)
g_1, g_2 = tmp[0], tmp[1]
tmp_0 = set()
tmp_1 = set()
for i, j in g_b_link.difference(set(g.edges())):
if (i in g_1 and j in g_1) or (i in g_2 and j in g_2):
continue
if g_b_1.degree(i) == 1 or g_b_1.degree(j) == 1:
continue
if (i in g_2 and j in g_1) or (i in g_2 and j in g_1):
bn.add((i,j))
continue
if (i in g_1) or (j in g_1):
tmp_0.add((i,j))
if (i in g_2) or (j in g_2):
tmp_1.add((i,j))
for i,j in tmp_0:
for k in tmp_1:
if i in k or j in k:
bn.add((i,j))
bn.add(k)
return bn
def select(dic, num):
tmp = set()
for i in dic.keys():
if dic[i] > num:
tmp.add(str(i))
return tmp
| 23.520833 | 137 | 0.592307 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def generate_d_network(dest_cbgs):
G = nx.DiGraph()
for i in dest_cbgs:
G.add_edge(*i, weight=dest_cbgs[i])
return G
def generate_network(dest_cbgs):
G = nx.Graph()
for i, j in dest_cbgs.keys():
if (i, j) not in G.edges:
G.add_edge(i, j, weight=dest_cbgs[i, j])
else:
weight = dest_cbgs[i, j] + G.edges[i, j]['weight']
G.add_edge(i, j, weight=weight)
return G
def generate_network_threshold(g, threshold=0):
new_g = nx.Graph()
new_g.add_nodes_from(g.nodes)
edge_list = list(g.edges)
for i, j in edge_list:
weight = g.edges[i, j]['weight']
if weight >= threshold:
new_g.add_edge(i, j, weight=weight)
return new_g
def generate_d_network_threshold(g, threshold=0):
new_g = nx.Graph()
edge_list = list(g.edges)
for i, j in edge_list:
if g.edges[i, j]['weight'] >= threshold:
new_g.add_edge(i, j)
return new_g
def max_weight(g):
m_weight = 0
for i in g.edges:
weight = g.edges[i[0], i[1]]['weight']
if weight > m_weight:
m_weight = weight
return m_weight
def num_g_sg(scc):
len_scc = list(map(len, scc))
len_scc.sort()
len_scc.reverse()
if len(len_scc) == 0:
return 0, 0
elif len(len_scc) == 1:
return len_scc[0], 0
else:
return len_scc[0], len_scc[1]
def l_sl_value(li):
if len(li) == 0:
return 0,0
l = [i for i, j in enumerate(li) if j == max(li)][0]
sublist = li[:l]
if l == 0:
sl = 0
else:
sl = [i for i, j in enumerate(sublist) if j == max(sublist)][0]
return l, sl
def calc_g_sg(g, start, interval, d1=None, final=100):
node_size = len(g.nodes())
tmp_g = node_size
tmp_t = start
thresholds = []
num_g = []
num_sg = []
dev_g = []
dev_sg = []
num_rest = []
lens=[]
while tmp_g > node_size/final and tmp_g != 1:
tmp_n = generate_network_threshold(g, tmp_t)
lens.append(len(tmp_n.edges))
scc = sorted(list(nx.connected_components(tmp_n)), key=len, reverse=True)
tmp_g, tmp_sg = num_g_sg(scc)
num_g.append(tmp_g)
num_sg.append(tmp_sg)
if len(scc) < 2:
num_rest.append(0)
else:
num_rest.append(sum(map(len, scc[1:]))/(len(scc)-1))
thresholds.append(tmp_t)
if final != 100 and tmp_t > 20:
break
if interval > 1 and tmp_t < 100:
tmp_t += 1
else:
tmp_t += interval
if d1 is None:
continue
if len(scc) != 0:
dev_g.append(sum_device(scc[0], d1))
if len(scc) == 1:
dev_sg.append(0)
else:
dev_sg.append(sum_device(scc[1], d1))
else:
dev_sg.append(0)
dev_g.append(0)
return np.array(thresholds), np.array(num_g), np.array(num_sg), np.array(num_rest), np.array(dev_g), np.array(dev_sg), np.array(lens)
def sum_device(nodes, d1):
s = 0
for i in nodes:
if i in d1.keys():
s += d1[i]
return s
def calc_bottleneck(g, thresholds, num_sg):
max_index = [i for i, j in enumerate(num_sg) if j == max(num_sg)][0]
bn_weight_b = thresholds[max_index]
interval = thresholds[1] - thresholds[0]
bn = []
G_sg_largest = generate_network_threshold(g, bn_weight_b)
if type(G_sg_largest) == nx.classes.digraph.DiGraph:
scc = list(nx.strongly_connected_components(G_sg_largest))
else:
scc = list(nx.connected_components(G_sg_largest))
scc.sort(key=len)
scc_sg_largest = scc[-1]
scc_sg_s_largest = scc[-2]
for i, j in g.edges():
if bn_weight_b - interval < g.edges[(i, j)]['weight'] <= bn_weight_b:
if (i in scc_sg_largest and j in scc_sg_s_largest) or (j in scc_sg_largest and i in scc_sg_s_largest):
bn.append((i, j))
return bn, bn_weight_b
def calc_bottleneck_c(g, thresholds, qc):
interval = thresholds[1] - thresholds[0]
bn = set()
G_sg_largest = generate_network_threshold(g, qc)
if type(G_sg_largest) == nx.classes.digraph.DiGraph:
scc = list(nx.strongly_connected_components(G_sg_largest))
else:
scc = list(nx.connected_components(G_sg_largest))
scc.sort(key=len)
scc_sg_largest = scc[-1]
if len(scc) == 1:
return set()
scc_sg_s_largest = scc[-2]
for i, j in g.edges():
if qc - interval < g.edges[(i, j)]['weight'] <= qc:
if (i in scc_sg_largest and j in scc_sg_s_largest) or (j in scc_sg_largest and i in scc_sg_s_largest):
bn.add((i, j))
return bn
def total_flux(g):
flux = 0
for i in g.edges():
flux += g.edges[i]['weight']
return flux
def get_xy(pt):
return [pt.x, pt.y]
def calc_bn_set_diff(g_b, g):
bn = set()
g_b_1 = g_b.subgraph(sorted(list(nx.connected_components(g_b)), key=len, reverse=True)[0])
g_b_link = set(g_b_1.edges())
tmp = sorted(list(nx.connected_components(g)), key=len, reverse=True)
g_1, g_2 = tmp[0], tmp[1]
tmp_0 = set()
tmp_1 = set()
for i, j in g_b_link.difference(set(g.edges())):
if (i in g_1 and j in g_1) or (i in g_2 and j in g_2):
continue
if g_b_1.degree(i) == 1 or g_b_1.degree(j) == 1:
continue
if (i in g_2 and j in g_1) or (i in g_2 and j in g_1):
bn.add((i,j))
continue
if (i in g_1) or (j in g_1):
tmp_0.add((i,j))
if (i in g_2) or (j in g_2):
tmp_1.add((i,j))
for i,j in tmp_0:
for k in tmp_1:
if i in k or j in k:
bn.add((i,j))
bn.add(k)
return bn
def select(dic, num):
tmp = set()
for i in dic.keys():
if dic[i] > num:
tmp.add(str(i))
return tmp
| true | true |
1c32746489974f9a1ebb5aee32cae1eeb38fb63b | 2,325 | py | Python | python/expl_impl_euler_optim.py | itpplasma/SIMPLE | 6981791e0a7889647ac5c006325ac951811c2f36 | [
"MIT"
] | 1 | 2020-11-18T14:58:27.000Z | 2020-11-18T14:58:27.000Z | python/expl_impl_euler_optim.py | landreman/SIMPLE | 77722c2479b4a064b99d0e2a58ef7749ce157c07 | [
"MIT"
] | 6 | 2019-10-25T07:52:00.000Z | 2021-11-16T13:19:04.000Z | python/expl_impl_euler_optim.py | landreman/SIMPLE | 77722c2479b4a064b99d0e2a58ef7749ce157c07 | [
"MIT"
] | 2 | 2021-11-05T18:55:09.000Z | 2022-03-23T06:27:04.000Z | """
Created: 2018-08-08
Modified: 2019-03-07
Author: Christopher Albert <albert@alumni.tugraz.at>
"""
from numpy import array, zeros, arange
from scipy.interpolate import lagrange
import common
from common import f, r0, th0, ph0, pph0, timesteps, get_val, get_der2, newton1
from plotting import plot_orbit, plot_cost_function_jac
steps_per_bounce = 8
dt, nt = timesteps(steps_per_bounce, nbounce = 100)
nlag = 1 # order of Lagrange extrapolation
z = zeros([3,nt+1])
z[:,0] = [r0, th0, ph0]
Hplot = zeros(nt+1) # Hamiltonian for plotting
[Hplot[0], pth, vpar] = get_val(array([r0,th0,ph0,pph0]))
def F(r, q, pthold):
global H, dHdx, dHdpph, pth, dpthdx, vpar, dvpardx, \
d2pthdx2, d2pthdpphdz, d2Hdx2, d2Hdpphdz, \
d2vpardx2, d2vpardpphdz
[H, pth, vpar, dHdx, dHdpph, dpthdx, dpthdpph, dvpardx, dvpardpph, d2pthdx2,
d2pthdpphdz, d2Hdx2, d2Hdpphdz, d2vpardx2, d2vpardpphdz] = get_der2(
array([r[0],q[0],q[1],pph0]))
ret = dpthdx[0]*(pth-pthold) - dt*(dHdx[0]*dpthdx[1]-dHdx[1]*dpthdx[0])
jac = d2pthdx2[0]*(pth-pthold) + dpthdx[0]**2 - dt*(
d2Hdx2[0]*dpthdx[1]-d2pthdx2[0]*dHdx[1]
+dHdx[0]*d2pthdx2[3]-dpthdx[0]*d2Hdx2[3])
return ret, [jac]
#%%
common.neval = 0
from time import time
tic = time()
nbounce = 0
for kt in range(nt):
pthold = pth
# Initialize via Lagrange extrapolation
if(kt>=nlag):
extrapr = lagrange(arange(-nlag,1), z[0,kt-nlag:kt+1])
r0 = extrapr(1)
else:
r0 = z[0,kt]
sol = newton1(F, r0, rtol=1e-7, atol=1e-15, args=(z[1:,kt], pthold))
z[0,kt+1] = sol.x
dHdx[0] = dHdx[0] + (sol.x[0] - sol.xold[0])*d2Hdx2[0]
dpthdx[0] = dpthdx[0] + (sol.x[0] - sol.xold[0])*d2pthdx2[0]
vpar = vpar + (sol.x[0] - sol.xold[0])*dvpardx[0]
f.B = f.B + (sol.x[0] - sol.xold[0])*f.dB[0]
f.hph = f.hph + (sol.x[0] - sol.xold[0])*f.dhph[0]
pth = pth + (sol.x[0] - sol.xold[0])*dpthdx[0]
H = H + (sol.x[0] - sol.xold[0])*dHdx[0]
z[1,kt+1] = z[1,kt] + dt*dHdx[0]/dpthdx[0]
z[2,kt+1] = z[2,kt] + dt*vpar/f.hph
Hplot[kt+1] = H
print('Field evaluations: {}'.format(common.neval))
print('Time taken: {}'.format(time()-tic))
plot_orbit(z)
plot_cost_function_jac(F, z[:,-2], z[:,-1], pthold)
| 29.43038 | 82 | 0.594839 |
from numpy import array, zeros, arange
from scipy.interpolate import lagrange
import common
from common import f, r0, th0, ph0, pph0, timesteps, get_val, get_der2, newton1
from plotting import plot_orbit, plot_cost_function_jac
steps_per_bounce = 8
dt, nt = timesteps(steps_per_bounce, nbounce = 100)
nlag = 1
z = zeros([3,nt+1])
z[:,0] = [r0, th0, ph0]
Hplot = zeros(nt+1)
[Hplot[0], pth, vpar] = get_val(array([r0,th0,ph0,pph0]))
def F(r, q, pthold):
global H, dHdx, dHdpph, pth, dpthdx, vpar, dvpardx, \
d2pthdx2, d2pthdpphdz, d2Hdx2, d2Hdpphdz, \
d2vpardx2, d2vpardpphdz
[H, pth, vpar, dHdx, dHdpph, dpthdx, dpthdpph, dvpardx, dvpardpph, d2pthdx2,
d2pthdpphdz, d2Hdx2, d2Hdpphdz, d2vpardx2, d2vpardpphdz] = get_der2(
array([r[0],q[0],q[1],pph0]))
ret = dpthdx[0]*(pth-pthold) - dt*(dHdx[0]*dpthdx[1]-dHdx[1]*dpthdx[0])
jac = d2pthdx2[0]*(pth-pthold) + dpthdx[0]**2 - dt*(
d2Hdx2[0]*dpthdx[1]-d2pthdx2[0]*dHdx[1]
+dHdx[0]*d2pthdx2[3]-dpthdx[0]*d2Hdx2[3])
return ret, [jac]
common.neval = 0
from time import time
tic = time()
nbounce = 0
for kt in range(nt):
pthold = pth
if(kt>=nlag):
extrapr = lagrange(arange(-nlag,1), z[0,kt-nlag:kt+1])
r0 = extrapr(1)
else:
r0 = z[0,kt]
sol = newton1(F, r0, rtol=1e-7, atol=1e-15, args=(z[1:,kt], pthold))
z[0,kt+1] = sol.x
dHdx[0] = dHdx[0] + (sol.x[0] - sol.xold[0])*d2Hdx2[0]
dpthdx[0] = dpthdx[0] + (sol.x[0] - sol.xold[0])*d2pthdx2[0]
vpar = vpar + (sol.x[0] - sol.xold[0])*dvpardx[0]
f.B = f.B + (sol.x[0] - sol.xold[0])*f.dB[0]
f.hph = f.hph + (sol.x[0] - sol.xold[0])*f.dhph[0]
pth = pth + (sol.x[0] - sol.xold[0])*dpthdx[0]
H = H + (sol.x[0] - sol.xold[0])*dHdx[0]
z[1,kt+1] = z[1,kt] + dt*dHdx[0]/dpthdx[0]
z[2,kt+1] = z[2,kt] + dt*vpar/f.hph
Hplot[kt+1] = H
print('Field evaluations: {}'.format(common.neval))
print('Time taken: {}'.format(time()-tic))
plot_orbit(z)
plot_cost_function_jac(F, z[:,-2], z[:,-1], pthold)
| true | true |
1c3274ff2d683c03bfa991e87a2db167033125f3 | 24,663 | py | Python | sklearn/dummy.py | cperreault11/scikit-learn | 0b78cb00e69109f498c326ad84953954e349d11f | [
"BSD-3-Clause"
] | null | null | null | sklearn/dummy.py | cperreault11/scikit-learn | 0b78cb00e69109f498c326ad84953954e349d11f | [
"BSD-3-Clause"
] | null | null | null | sklearn/dummy.py | cperreault11/scikit-learn | 0b78cb00e69109f498c326ad84953954e349d11f | [
"BSD-3-Clause"
] | null | null | null | # Author: Mathieu Blondel <mathieu@mblondel.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .base import MultiOutputMixin
from .utils import check_random_state
from .utils import deprecated
from .utils.validation import _num_samples
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted, _check_sample_weight
from .utils.random import _random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):
"""DummyClassifier makes predictions that ignore the input features.
This classifier serves as a simple baseline to compare against other more
complex classifiers.
The specific behavior of the baseline is selected with the `strategy`
parameter.
All strategies make predictions that ignore the input feature values passed
as the `X` argument to `fit` and `predict`. The predictions, however,
typically depend on values observed in the `y` parameter passed to `fit`.
Note that the "stratified" and "uniform" strategies lead to
non-deterministic predictions that can be rendered deterministic by setting
the `random_state` parameter if needed. The other strategies are naturally
deterministic and, once fit, always return a the same constant prediction
for any value of `X`.
Read more in the :ref:`User Guide <dummy_estimators>`.
.. versionadded:: 0.13
Parameters
----------
strategy : {"most_frequent", "prior", "stratified", "uniform", \
"constant"}, default="prior"
Strategy to use to generate predictions.
* "most_frequent": the `predict` method always returns the most
frequent class label in the observed `y` argument passed to `fit`.
The `predict_proba` method returns the matching one-hot encoded
vector.
* "prior": the `predict` method always returns the most frequent
class label in the observed `y` argument passed to `fit` (like
"most_frequent"). ``predict_proba`` always returns the empirical
class distribution of `y` also known as the empirical class prior
distribution.
* "stratified": the `predict_proba` method randomly samples one-hot
vectors from a multinomial distribution parametrized by the empirical
class prior probabilities.
The `predict` method returns the class label which got probability
one in the one-hot vector of `predict_proba`.
Each sampled row of both methods is therefore independent and
identically distributed.
* "uniform": generates predictions uniformly at random from the list
of unique classes observed in `y`, i.e. each class has equal
probability.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class.
.. versionchanged:: 0.24
The default value of `strategy` has changed to "prior" in version
0.24.
random_state : int, RandomState instance or None, default=None
Controls the randomness to generate the predictions when
``strategy='stratified'`` or ``strategy='uniform'``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
constant : int or str or array-like of shape (n_outputs,), default=None
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of such arrays
Unique class labels observed in `y`. For multi-output classification
problems, this attribute is a list of arrays as each output has an
independent set of possible classes.
n_classes_ : int or list of int
Number of label for each output.
class_prior_ : ndarray of shape (n_classes,) or list of such arrays
Frequency of each class observed in `y`. For multioutput classification
problems, this is computed independently for each output.
n_outputs_ : int
Number of outputs.
n_features_in_ : `None`
Always set to `None`.
.. versionadded:: 0.24
.. deprecated:: 1.0
Will be removed in 1.0
sparse_output_ : bool
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input `y` is passed in sparse
format.
See Also
--------
DummyRegressor : Regressor that makes predictions using simple rules.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyClassifier
>>> X = np.array([-1, 1, 1, 1])
>>> y = np.array([0, 1, 1, 1])
>>> dummy_clf = DummyClassifier(strategy="most_frequent")
>>> dummy_clf.fit(X, y)
DummyClassifier(strategy='most_frequent')
>>> dummy_clf.predict(X)
array([1, 1, 1, 1])
>>> dummy_clf.score(X, y)
0.75
"""
def __init__(self, *, strategy="prior", random_state=None, constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the baseline classifier.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Returns the instance itself.
"""
allowed_strategies = (
"most_frequent",
"stratified",
"uniform",
"constant",
"prior",
)
if self.strategy not in allowed_strategies:
raise ValueError(
"Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies)
)
self._strategy = self.strategy
if self._strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn(
"A local copy of the target data has been converted "
"to a numpy array. Predicting on sparse target data "
"with the uniform strategy would not save memory "
"and would be slower.",
UserWarning,
)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.asarray(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self._strategy == "constant":
if self.constant is None:
raise ValueError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError(
"Constant target value should have shape (%d, 1)."
% self.n_outputs_
)
(self.classes_, self.n_classes_, self.class_prior_) = class_distribution(
y, sample_weight
)
if self._strategy == "constant":
for k in range(self.n_outputs_):
if not any(constant[k][0] == c for c in self.classes_[k]):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
err_msg = (
"The constant target value must be present in "
"the training data. You provided constant={}. "
"Possible values are: {}.".format(
self.constant, list(self.classes_[k])
)
)
raise ValueError(err_msg)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self._strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == "stratified":
class_prob = class_prior_
elif self._strategy == "uniform":
raise ValueError(
"Sparse target prediction is not "
"supported with the uniform strategy"
)
elif self._strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
else:
if self._strategy in ("most_frequent", "prior"):
y = np.tile(
[
classes_[k][class_prior_[k].argmax()]
for k in range(self.n_outputs_)
],
[n_samples, 1],
)
elif self._strategy == "stratified":
y = np.vstack(
[
classes_[k][proba[k].argmax(axis=1)]
for k in range(self.n_outputs_)
]
).T
elif self._strategy == "uniform":
ret = [
classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)
]
y = np.vstack(ret).T
elif self._strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
def predict_proba(self, X, uncertainty=[]):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
check_is_fitted(self)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == "most_frequent":
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, object with finite length or shape}
Training data.
Returns
-------
P : ndarray of shape (n_samples, n_classes) or list of such arrays
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
def _more_tags(self):
return {
"poor_score": True,
"no_validation": True,
"_xfail_checks": {
"check_methods_subset_invariance": "fails for the predict method",
"check_methods_sample_order_invariance": "fails for the predict method",
},
}
def score(self, X, y, sample_weight=None):
"""Return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since DummyClassifier
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2."
)
@property
def n_features_in_(self):
check_is_fitted(self)
return None
class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
"""Regressor that makes predictions using simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
.. versionadded:: 0.13
Parameters
----------
strategy : {"mean", "median", "quantile", "constant"}, default="mean"
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array-like of shape (n_outputs,), default=None
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0], default=None
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : ndarray of shape (1, n_outputs)
Mean or median or quantile of the training targets or constant value
given by the user.
n_features_in_ : `None`
Always set to `None`.
.. versionadded:: 0.24
.. deprecated:: 1.0
Will be removed in 1.0
n_outputs_ : int
Number of outputs.
See Also
--------
DummyClassifier: Classifier that makes predictions using simple rules.
Examples
--------
>>> import numpy as np
>>> from sklearn.dummy import DummyRegressor
>>> X = np.array([1.0, 2.0, 3.0, 4.0])
>>> y = np.array([2.0, 3.0, 5.0, 10.0])
>>> dummy_regr = DummyRegressor(strategy="mean")
>>> dummy_regr.fit(X, y)
DummyRegressor()
>>> dummy_regr.predict(X)
array([5., 5., 5., 5.])
>>> dummy_regr.score(X, y)
0.0
"""
def __init__(self, *, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : object
Fitted estimator.
"""
allowed_strategies = ("mean", "median", "quantile", "constant")
if self.strategy not in allowed_strategies:
raise ValueError(
"Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies)
)
y = check_array(y, ensure_2d=False, input_name="y")
if len(y) == 0:
raise ValueError("y must not be empty.")
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [
_weighted_percentile(y[:, k], sample_weight, percentile=50.0)
for k in range(self.n_outputs_)
]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError(
"Quantile must be a scalar in the range [0.0, 1.0], but got %s."
% self.quantile
)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [
_weighted_percentile(y[:, k], sample_weight, percentile=percentile)
for k in range(self.n_outputs_)
]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
self.constant = check_array(
self.constant,
accept_sparse=["csr", "csc", "coo"],
ensure_2d=False,
ensure_min_samples=0,
)
if self.n_outputs_ != 1 and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have shape (%d, 1)." % y.shape[1]
)
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X, return_std=False):
"""Perform classification on test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test data.
return_std : bool, default=False
Whether to return the standard deviation of posterior prediction.
All zeros in this case.
.. versionadded:: 0.20
Returns
-------
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Predicted target values for X.
y_std : array-like of shape (n_samples,) or (n_samples, n_outputs)
Standard deviation of predictive distribution of query points.
"""
check_is_fitted(self)
n_samples = _num_samples(X)
y = np.full(
(n_samples, self.n_outputs_),
self.constant_,
dtype=np.array(self.constant_).dtype,
)
y_std = np.zeros((n_samples, self.n_outputs_))
if self.n_outputs_ == 1:
y = np.ravel(y)
y_std = np.ravel(y_std)
return (y, y_std) if return_std else y
def _more_tags(self):
return {"poor_score": True, "no_validation": True}
def score(self, X, y, sample_weight=None):
"""Return the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as `(1 - u/v)`, where `u` is the
residual sum of squares `((y_true - y_pred) ** 2).sum()` and `v` is the
total sum of squares `((y_true - y_true.mean()) ** 2).sum()`. The best
possible score is 1.0 and it can be negative (because the model can be
arbitrarily worse). A constant model that always predicts the expected
value of y, disregarding the input features, would get a R^2 score of
0.0.
Parameters
----------
X : None or array-like of shape (n_samples, n_features)
Test samples. Passing None as test samples gives the same result
as passing real test samples, since `DummyRegressor`
operates independently of the sampled observations.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
R^2 of `self.predict(X)` wrt. y.
"""
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2."
)
@property
def n_features_in_(self):
check_is_fitted(self)
return None
| 34.785614 | 88 | 0.575031 |
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .base import MultiOutputMixin
from .utils import check_random_state
from .utils import deprecated
from .utils.validation import _num_samples
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.validation import check_is_fitted, _check_sample_weight
from .utils.random import _random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
class DummyClassifier(MultiOutputMixin, ClassifierMixin, BaseEstimator):
def __init__(self, *, strategy="prior", random_state=None, constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
allowed_strategies = (
"most_frequent",
"stratified",
"uniform",
"constant",
"prior",
)
if self.strategy not in allowed_strategies:
raise ValueError(
"Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies)
)
self._strategy = self.strategy
if self._strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn(
"A local copy of the target data has been converted "
"to a numpy array. Predicting on sparse target data "
"with the uniform strategy would not save memory "
"and would be slower.",
UserWarning,
)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.asarray(y)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self._strategy == "constant":
if self.constant is None:
raise ValueError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError(
"Constant target value should have shape (%d, 1)."
% self.n_outputs_
)
(self.classes_, self.n_classes_, self.class_prior_) = class_distribution(
y, sample_weight
)
if self._strategy == "constant":
for k in range(self.n_outputs_):
if not any(constant[k][0] == c for c in self.classes_[k]):
err_msg = (
"The constant target value must be present in "
"the training data. You provided constant={}. "
"Possible values are: {}.".format(
self.constant, list(self.classes_[k])
)
)
raise ValueError(err_msg)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
check_is_fitted(self)
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
if self._strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self._strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self._strategy == "stratified":
class_prob = class_prior_
elif self._strategy == "uniform":
raise ValueError(
"Sparse target prediction is not "
"supported with the uniform strategy"
)
elif self._strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = _random_choice_csc(n_samples, classes_, class_prob, self.random_state)
else:
if self._strategy in ("most_frequent", "prior"):
y = np.tile(
[
classes_[k][class_prior_[k].argmax()]
for k in range(self.n_outputs_)
],
[n_samples, 1],
)
elif self._strategy == "stratified":
y = np.vstack(
[
classes_[k][proba[k].argmax(axis=1)]
for k in range(self.n_outputs_)
]
).T
elif self._strategy == "uniform":
ret = [
classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)
]
y = np.vstack(ret).T
elif self._strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1:
y = np.ravel(y)
return y
def predict_proba(self, X, uncertainty=[]):
check_is_fitted(self)
n_samples = _num_samples(X)
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self._strategy == "most_frequent":
ind = class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self._strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self._strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
out = out.astype(np.float64)
elif self._strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self._strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1:
P = P[0]
return P
def predict_log_proba(self, X):
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
def _more_tags(self):
return {
"poor_score": True,
"no_validation": True,
"_xfail_checks": {
"check_methods_subset_invariance": "fails for the predict method",
"check_methods_sample_order_invariance": "fails for the predict method",
},
}
def score(self, X, y, sample_weight=None):
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
@deprecated(
"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2."
)
@property
def n_features_in_(self):
check_is_fitted(self)
return None
class DummyRegressor(MultiOutputMixin, RegressorMixin, BaseEstimator):
def __init__(self, *, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
allowed_strategies = ("mean", "median", "quantile", "constant")
if self.strategy not in allowed_strategies:
raise ValueError(
"Unknown strategy type: %s, expected one of %s."
% (self.strategy, allowed_strategies)
)
y = check_array(y, ensure_2d=False, input_name="y")
if len(y) == 0:
raise ValueError("y must not be empty.")
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [
_weighted_percentile(y[:, k], sample_weight, percentile=50.0)
for k in range(self.n_outputs_)
]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError(
"Quantile must be a scalar in the range [0.0, 1.0], but got %s."
% self.quantile
)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [
_weighted_percentile(y[:, k], sample_weight, percentile=percentile)
for k in range(self.n_outputs_)
]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError(
"Constant target value has to be specified "
"when the constant strategy is used."
)
self.constant = check_array(
self.constant,
accept_sparse=["csr", "csc", "coo"],
ensure_2d=False,
ensure_min_samples=0,
)
if self.n_outputs_ != 1 and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have shape (%d, 1)." % y.shape[1]
)
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X, return_std=False):
check_is_fitted(self)
n_samples = _num_samples(X)
y = np.full(
(n_samples, self.n_outputs_),
self.constant_,
dtype=np.array(self.constant_).dtype,
)
y_std = np.zeros((n_samples, self.n_outputs_))
if self.n_outputs_ == 1:
y = np.ravel(y)
y_std = np.ravel(y_std)
return (y, y_std) if return_std else y
def _more_tags(self):
return {"poor_score": True, "no_validation": True}
def score(self, X, y, sample_weight=None):
if X is None:
X = np.zeros(shape=(len(y), 1))
return super().score(X, y, sample_weight)
@deprecated(
"`n_features_in_` is deprecated in 1.0 and will be removed in 1.2."
)
@property
def n_features_in_(self):
check_is_fitted(self)
return None
| true | true |
1c32751133d80d21b279bcbbf1b75fcc0b098f72 | 461 | py | Python | poco/drivers/std/screen.py | HBoPRC/Poco | c8b0dc5362db45ff7a8397eebb0c52d9047f4b67 | [
"Apache-2.0"
] | null | null | null | poco/drivers/std/screen.py | HBoPRC/Poco | c8b0dc5362db45ff7a8397eebb0c52d9047f4b67 | [
"Apache-2.0"
] | null | null | null | poco/drivers/std/screen.py | HBoPRC/Poco | c8b0dc5362db45ff7a8397eebb0c52d9047f4b67 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
from poco.sdk.interfaces.screen import ScreenInterface
from poco.utils.simplerpc.utils import sync_wrapper
class StdScreen(ScreenInterface):
def __init__(self, client):
super(StdScreen, self).__init__()
self.client = client
@sync_wrapper
def getScreen(self, width):
return self.client.call("Screenshot", width)
@sync_wrapper
def getPortSize(self):
return self.client.call("GetScreenSize")
| 24.263158 | 54 | 0.707158 |
from poco.sdk.interfaces.screen import ScreenInterface
from poco.utils.simplerpc.utils import sync_wrapper
class StdScreen(ScreenInterface):
def __init__(self, client):
super(StdScreen, self).__init__()
self.client = client
@sync_wrapper
def getScreen(self, width):
return self.client.call("Screenshot", width)
@sync_wrapper
def getPortSize(self):
return self.client.call("GetScreenSize")
| true | true |
1c327693cb4b39dc20e869248ee2fa581293cdcd | 214 | py | Python | testing/test.py | matthew-cheney/crowd-sorting-single-threaded | f32f05641821f5770dd95787c459888101b93d05 | [
"MIT"
] | 1 | 2019-11-30T07:59:25.000Z | 2019-11-30T07:59:25.000Z | testing/test.py | mchen95/crowd-sorting | f32f05641821f5770dd95787c459888101b93d05 | [
"MIT"
] | 2 | 2019-10-14T17:16:46.000Z | 2019-10-21T23:14:32.000Z | testing/test.py | matthew-cheney/crowd-sorting-single-threaded | f32f05641821f5770dd95787c459888101b93d05 | [
"MIT"
] | null | null | null | import unittest
class Template(unittest.TestCase):
def setUp(self):
pass
def test_basic(self):
self.assertTrue(True)
self.assertFalse(False)
def tearDown(self):
pass
| 15.285714 | 34 | 0.621495 | import unittest
class Template(unittest.TestCase):
def setUp(self):
pass
def test_basic(self):
self.assertTrue(True)
self.assertFalse(False)
def tearDown(self):
pass
| true | true |
1c327792cbf8c219d3adddc8ebbd0b6ee84b5c8a | 32,170 | py | Python | dqc_testsuite_xule.py | altova/sec-edgar-tools | d9d2e66963d1a357f395fc6b022eee30e4a869c5 | [
"Apache-2.0"
] | 9 | 2015-06-10T15:52:45.000Z | 2022-03-23T20:43:38.000Z | dqc_testsuite_xule.py | altova/sec-edgar-tools | d9d2e66963d1a357f395fc6b022eee30e4a869c5 | [
"Apache-2.0"
] | 2 | 2016-12-02T15:16:34.000Z | 2022-03-23T20:46:27.000Z | dqc_testsuite_xule.py | altova/sec-edgar-tools | d9d2e66963d1a357f395fc6b022eee30e4a869c5 | [
"Apache-2.0"
] | 10 | 2016-03-05T21:16:14.000Z | 2022-02-01T08:58:50.000Z | # Copyright 2015-2018 Altova GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__copyright__ = 'Copyright 2015-2019 Altova GmbH'
__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'
# Executes the XBRL US Data Quality Committee conformance test suite.
#
# This script drives Altova RaptorXML+XBRL to execute the DQC test suite files in
# https://github.com/DataQualityCommittee/dqc_us_rules/tree/v6.1.2/tests/test_suite/DQC_Testcases_Release_All_V6.zip.
# See https://github.com/DataQualityCommittee/dqc_us_rules/tree/v6.1.2/tests/test_suite for more information.
#
# Example usage:
#
# Show available options
# raptorxmlxbrl script dqc_testsuite_xule.py -h
# Create a CSV summary file
# raptorxmlxbrl script dqc_testsuite_xule.py /path/to/DQC_Testcases_Release_All_V6/index.xml --log dqc_testsuite.log --csv-report dqc_testsuite.csv
# Create an XML summary file
# raptorxmlxbrl script dqc_testsuite_xule.py /path/to/DQC_Testcases_Release_All_V6/index.xml --log dqc_testsuite.log --xml-report dqc_testsuite.xml
# Run only specific testcases
# raptorxmlxbrl script dqc_testsuite_xule.py /path/to/DQC_Testcases_Release_All_V6/index.xml --log dqc_testsuite.log --csv-report dqc_testsuite.xml --testcase DQC_0004 DQC_0005
import argparse
import collections
import concurrent.futures
import datetime
import json
import logging
import multiprocessing
import os
import pickle
import re
import tempfile
import time
import urllib.parse
import urllib.request
import zipfile
from altova_api.v2 import xml, xsd, xbrl, beta, ProductInfo
xbrl.xule = beta.xbrl.xule
class ValidationError(Exception):
"""User-defined exception representing a validation error."""
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def setup_xule_processor(root_dir, catalog=None):
xp = xbrl.xule.Processor(catalog=catalog)
with open(os.path.join(root_dir,'xule','rulesetMap.json')) as f:
for ns, path in json.load(f).items():
ruleset_path = os.path.join(root_dir,'dqc_us_rules',path.split('?')[0].split('/dqc_us_rules/')[-1])
logging.info('Loading ruleset %s', ruleset_path)
try:
xp.add_ruleset(ruleset_path, ns)
except:
logging.exception('Error loading ruleset %s', ruleset_path)
return xp
def attr_val(elem, attr_name):
"""Returns the value of attribute *attr_name* on element *elem* or None if no such attribute does not exists."""
attr = elem.find_attribute(attr_name)
if attr:
val = attr.schema_normalized_value
if val is None:
val = attr.normalized_value
return val
return None
def attr_val_bool(elem, attr_name):
"""Returns the boolean value of attribute *attr_name* on element *elem* or None if no such attribute does not exists."""
attr = elem.find_attribute(attr_name)
return attr.normalized_value.strip() in ('1', 'true') if attr else None
def elem_val(elem):
"""Returns the text value of element *elem*."""
val = elem.schema_normalized_value
if val is None:
text = []
for child in elem.children:
if isinstance(child, xml.CharDataInformationItem):
text.append(child.value)
val = ''.join(text)
return val
def parse_variation(variation_elem):
"""Parses the <variation> element and returns a dict containing meta-information about the given variation."""
variation = {
'id': attr_val(variation_elem, 'id'),
}
for elem in variation_elem.element_children():
if elem.local_name == 'name':
variation['name'] = elem_val(elem)
elif elem.local_name == 'description':
variation['description'] = elem.serialize(omit_start_tag=True)
elif elem.local_name == 'data':
data = {
'instances': [],
'linkbases': [],
'schemas': [],
}
for elem2 in elem.element_children():
if elem2.local_name in ('instance', 'linkbase', 'schema'):
uri = elem_val(elem2)
data[elem2.local_name+'s'].append(urllib.parse.urljoin(elem2.base_uri, uri))
if attr_val_bool(elem2, 'readMeFirst'):
data['readMeFirst'] = urllib.parse.urljoin(elem2.base_uri, uri)
else:
logging.warning('Testcase file %s contains unknown <data> child element <%s>', elem2.document.uri, elem2.local_name)
variation['data'] = data
elif elem.local_name == 'results':
results = {
'blockedMessageCodes': attr_val(elem, 'blockedMessageCodes'),
'errors': {}
}
for elem2 in elem.element_children():
if elem2.local_name == 'error':
results['errors'][elem_val(elem2)] = {
'severity': attr_val(elem2, 'severity'),
'count': int(attr_val(elem2, 'count'))
}
elif elem2.local_name == 'result':
pass
else:
logging.warning('Testcase file %s contains unknown <results> child element <%s>', elem2.document.uri, elem2.local_name)
variation['results'] = results
else:
logging.warning('Testcase file %s contains unknown <variation> child element <%s>', elem.document.uri, elem.local_name)
return variation
def load_testcase(testcase_uri):
"""Loads the testcase file and returns a dict with the testcase meta-information."""
logging.info('Loading testcase %s', testcase_uri)
# Load the testcase file
instance, log = xml.Instance.create_from_url(testcase_uri)
# Check for any fatal errors
if not instance:
raise ValidationError('\n'.join(error.text for error in log))
testcase_elem = instance.document_element
testcase = {
'uri': instance.uri,
}
# Iterate over all <testcase> child elements
variations = []
variation_ids = set()
for elem in testcase_elem.element_children():
if elem.local_name == 'creator':
creator = {}
for elem2 in elem.element_children():
if elem2.local_name == 'name':
creator['name'] = elem_val(elem2)
elif elem2.local_name == 'email':
creator['email'] = elem_val(elem2)
testcase['creator'] = creator
elif elem.local_name == 'number':
testcase['number'] = elem_val(elem)
elif elem.local_name == 'ruleIdentifier':
testcase['ruleIdentifier'] = elem_val(elem)
elif elem.local_name == 'description':
testcase['description'] = elem.serialize(omit_start_tag=True)
elif elem.local_name == 'ruleMessage':
testcase['ruleMessage'] = elem_val(elem)
elif elem.local_name == 'variation':
variation = parse_variation(elem)
variations.append(variation)
if variation['id'] in variation_ids:
logging.warning('Testcase file %s contains variations with duplicate id %s', testcase_uri, variation['id'])
else:
logging.warning('Testcase file %s contains unknown <testcase> child element <%s>', elem.document.uri, elem.local_name)
testcase['variations'] = variations
return testcase
def load_testsuite(index_uri):
"""Loads the testcases specified in the given testsuite index file and returns a dict with all testcase meta-information."""
logging.info('Start loading testsuite index %s', index_uri)
start = time.time()
# Load the testcase index file
instance, log = xml.Instance.create_from_url(index_uri)
# Check for any fatal errors
if not instance:
raise ValidationError('\n'.join(error.text for error in log))
documentation_elem = instance.document_element
testsuite = {
'uri': instance.uri,
'name': attr_val(documentation_elem, 'name'),
'date': attr_val(documentation_elem, 'date')
}
# Iterate over all <testcase> child elements and parse the testcase file
testcases = []
for testcases_elem in documentation_elem.element_children():
if testcases_elem.local_name == 'testcases':
root = urllib.parse.urljoin(testcases_elem.base_uri, attr_val(testcases_elem, 'root')+'/')
for testcase_elem in testcases_elem.element_children():
if testcase_elem.local_name == 'testcase':
# Get the value of the @uri attribute and make any relative uris absolute to the base uri
uri = urllib.parse.urljoin(root, attr_val(testcase_elem, 'uri'))
# Load the testcase file
testcases.append(load_testcase(uri))
testsuite['testcases'] = testcases
runtime = time.time() - start
logging.info('Finished loading testsuite index %s in %fs', index_uri, runtime)
return testsuite
def instance_name_from_zip(path):
"""Determines the instance filename within a SEC EDGAR zip archive."""
re_instance_name = re.compile(r'.+-\d{8}\.xml')
for name in zipfile.ZipFile(path).namelist():
if re_instance_name.fullmatch(name):
return name
raise RuntimeError('Zip archive does not contain a valid SEC instance file.')
def get_uri_in_zip(zipURI, catalog):
if catalog is not None:
zipURI = catalog.resolve_uri(zipURI)
urlparseResult = urllib.parse.urlparse(zipURI)
if urlparseResult.scheme != 'file':
tmpzip = tempfile.NamedTemporaryFile(suffix='.zip', delete=False).name
logging.info('Downloading archive %s to %s', zipURI, tmpzip)
urllib.request.urlretrieve(zipURI, tmpzip)
zipPath = tmpzip
else:
zipPath = file_uri_to_os_path(zipURI)
uri = 'file:{0}%7Czip/{1}'.format(urllib.request.pathname2url(zipPath), instance_name_from_zip(zipPath))
return uri
def execute_variation(testcase, variation, xp, catalog, args):
"""Peforms the actual XBRL instance or taxonomy validation and returns 'PASS' if the actual outcome is conformant with the result specified in the variation."""
logging.info('[%s] Start executing variation', variation['id'])
if 'readMeFirst' in variation['data']:
readMeFirstURI = variation['data']['readMeFirst']
uri = get_uri_in_zip(readMeFirstURI, catalog) if readMeFirstURI.endswith('.zip') else readMeFirstURI
else:
raise RuntimeError('Unknown entry point in variation %s' % variation['id'])
logging.info('[%s] Validating instance %s', variation['id'], uri)
instance, error_log = xbrl.Instance.create_from_url(uri, error_limit=500, catalog=catalog)
error_counts = collections.Counter()
for error in error_log:
if error.severity == xml.ErrorSeverity.ERROR:
error_counts['other'] += 1
if instance:
for result in xp.execute(instance):
if result.severity == xbrl.xule.Severity.ERROR:
rule_name = result.effective_rule_name
if variation['results']['blockedMessageCodes'] is None or rule_name not in variation['results']['blockedMessageCodes']:
error_counts[rule_name] += 1
#if error_log.has_errors() and logging.getLogger().isEnabledFor(logging.DEBUG):
# logging.debug('[%s] Error log:\n%s', variation['id'], '\n'.join(error.text for error in error_log))
passed = False if len(variation['results']['errors']) == 0 and len(error_counts) > 0 else True
for code, error in variation['results']['errors'].items():
if error['count'] != error_counts[code]:
passed = False
logging.info('[%s] Finished executing variation: %s, %s', variation['id'], 'PASS' if passed else 'FAIL', dict(error_counts))
return 'PASS' if passed else 'FAIL', error_counts
def write_doc(path, content, mode="wb"):
dir, file = os.path.split(path)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(path, mode)
f.write(content)
f.close()
def download_doc(url, target):
logging.info('Downloading %s => %s' % (url, target))
content = urllib.request.urlopen(url).read()
write_doc(target, content, "wb")
def remote_uri_to_os_path(target_dir, uri):
url_parts = urllib.parse.urlparse(uri)
path = url_parts.path[1:] if url_parts.path.startswith("/") else url_parts.path
return os.path.join(target_dir, url_parts.netloc, path)
def is_remote(uri):
url_parts = urllib.parse.urlparse(uri)
return url_parts.scheme != "file"
def file_uri_to_os_path(uri):
url_parts = urllib.parse.urlparse(uri)
return urllib.request.url2pathname(url_parts.path)
def os_path_to_file_uri(path):
return urllib.request.pathname2url(path)
def download_files_and_create_catalog(doc_uris, target_dir):
logging.info('Start downloading files and creating catalog')
catalog_template = """<?xml version='1.0' encoding='UTF-8'?>
<catalog xmlns='urn:oasis:names:tc:entity:xmlns:xml:catalog' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xsi:schemaLocation='urn:oasis:names:tc:entity:xmlns:xml:catalog Catalog.xsd'>
%(mappings)s
</catalog>
"""
uri_mapping_template = """<uri name="%(source)s" uri="%(target)s"/>"""
lines = []
catalog_path = os.path.join(target_dir, "catalog.xml")
for uri in doc_uris:
target_abs = remote_uri_to_os_path(target_dir, uri)
target_rel = os_path_to_file_uri(os.path.relpath(target_abs, target_dir))
download_doc(uri, target_abs)
lines.append(uri_mapping_template % {"source": uri, "target": target_rel})
catalog_content = catalog_template % {"mappings": "\n ".join(lines)}
write_doc(catalog_path, catalog_content, "w")
logging.info('Finished downloading files and creating catalog')
def execute_collect_remote_uris_of_variation(uri):
logging.info('Collecting remote URIs of instance %s', uri)
remote_uris = set()
remote_uris.add(uri)
instance, error_log = xbrl.Instance.create_from_url(uri, error_limit=500)
if instance is not None and instance.dts is not None:
for doc in instance.dts.documents:
if is_remote(doc.uri):
remote_uris.add(doc.uri)
return remote_uris
def collect_remote_uris(testsuite, args):
"""Downloads remote instances with all references files and creates a catalog."""
logging.info('Start collecting remote files')
remote_uris = set()
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0011/dqc_0011.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_dei_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_srt_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2015_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2016_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2017_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2018_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0011/dqc_0011.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_dei_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_srt_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2015_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2016_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2017_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2018_concepts.csv')
testsuite_path, testsuite_index = os.path.split(testsuite['uri'])
with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_workers) as executor:
# Schedule processing of all variations as futures
futures = {}
for testcase in testsuite['testcases']:
if args.testcase_numbers and testcase['number'] not in args.testcase_numbers:
continue
if args.exclude_testcase_numbers and testcase['number'] in args.exclude_testcase_numbers:
continue
for variation in testcase['variations']:
if args.variation_ids and variation['id'] not in args.variation_ids:
continue
if 'readMeFirst' not in variation['data']:
continue
readMeFirst = variation['data']['readMeFirst']
if is_remote(readMeFirst):
futures[executor.submit(execute_collect_remote_uris_of_variation, readMeFirst)] = readMeFirst
# Wait for all futures to finish
for future in concurrent.futures.as_completed(futures):
variation_key = futures[future]
try:
remote_uris.update(future.result())
except:
logging.exception('[%s] Exception raised during remote uri collection:', variation_key)
logging.info('Finished collecting remote files')
return list(remote_uris)
def execute_testsuite(testsuite, args):
"""Runs all testcase variations in parallel and returns a dict with the results of each testcase variation."""
logging.info('Start executing %s variations in %d testcases', sum(len(testcase['variations']) for testcase in testsuite['testcases']), len(testsuite['testcases']))
start = time.time()
testsuite_path, testsuite_index = os.path.split(file_uri_to_os_path(testsuite['uri']))
catalog_path = os.path.join(testsuite_path, "catalog.xml")
catalog = None
if os.path.exists(catalog_path):
catalog, error_log = xml.Catalog.create_from_url(catalog_path)
# Check for any fatal errors
if not catalog:
raise ValidationError('\n'.join(error.text for error in error_log))
xp = setup_xule_processor(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(args.uri)))), catalog)
results = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_workers) as executor:
# Schedule processing of all variations as futures
futures = {}
for testcase in testsuite['testcases']:
if args.testcase_numbers and testcase['number'] not in args.testcase_numbers:
continue
if args.exclude_testcase_numbers and testcase['number'] in args.exclude_testcase_numbers:
continue
for variation in testcase['variations']:
if args.variation_ids and variation['id'] not in args.variation_ids:
continue
futures[executor.submit(execute_variation, testcase, variation, xp, catalog, args)] = (testcase['uri'], variation['id'])
# Wait for all futures to finish
for future in concurrent.futures.as_completed(futures):
variation_key = futures[future]
try:
results[variation_key] = future.result()
except:
results[variation_key] = 'EXCEPTION', collections.Counter()
logging.exception('[%s] Exception raised during testcase execution:', variation_key[1])
runtime = time.time() - start
logging.info('Finished executing testcase variations in %fs', runtime)
return results, runtime
def calc_conformance(results):
"""Returns a tuple with the number of total and failed testcase variations and the conformance as percentage."""
total = len(results)
failed = sum(1 for status, _ in results.values() if status != 'PASS')
conformance = (total-failed)*100/total if total > 0 else 100
return total, failed, conformance
def format_uri(uri, base, relative_uris):
return uri[len(base)+1:] if relative_uris and uri.startswith(base) else uri
def write_csv_report(path, testsuite, results, runtime, relative_uris):
"""Writes testsuite run results to csv file."""
total, failed, conformance = calc_conformance(results)
with open(path, 'w') as csvfile:
testsuite_path, testsuite_index = os.path.split(testsuite['uri'])
csvfile.write('Date,Total,Failed,Conformance,Runtime,Testsuite,Testcase,Variation,ReadMeFirst,Status,Actual,Expected,Blocked,Warnings\n')
csvfile.write('"{:%Y-%m-%d %H:%M:%S}",{},{},{:.2f},{:.1f},{}\n'.format(datetime.datetime.now(), total, failed, conformance, runtime, testsuite['uri']))
for testcase in testsuite['testcases']:
csvfile.write(',,,,,,%s\n' % testcase['number'])
for variation in testcase['variations']:
variation_key = (testcase['uri'], variation['id'])
if variation_key in results:
instance_uri = format_uri(variation['data']['readMeFirst'], testsuite_path, relative_uris)
status, error_counts = results[variation_key]
actual = ' '.join('%dx%s' % (count, code) for code, count in sorted(error_counts.items()))
expected = ' '.join('%dx%s' % (error['count'], code) for code, error in sorted(variation['results']['errors'].items()))
blocked = variation['results']['blockedMessageCodes'].replace('|', ' ') if variation['results']['blockedMessageCodes'] else ''
warnings = ''
if status == 'PASS' and len(variation['results']['errors']) != len(error_counts):
additional_errors = set(error_counts.keys()) - set(variation['results']['errors'])
warnings = 'Additional errors %s reported' % ' '.join(sorted(additional_errors))
csvfile.write(',,,,,,,{},{},{},{},{},{},{}\n'.format(variation['id'], instance_uri, status, actual, expected, blocked, warnings))
def xml_escape(str):
return str.replace('<', '<').replace('&', '&').replace('"', '"')
def write_xml_report(path, testsuite, results, runtime, relative_uris):
"""Writes testsuite run results to xml file."""
total, failed, conformance = calc_conformance(results)
with open(path, 'w') as xmlfile:
testsuite_path, testsuite_index = os.path.split(testsuite['uri'])
testsuite_uri = testsuite['uri'] if not relative_uris else testsuite_index
xmlfile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
xmlfile.write('<testsuite\n\txmlns="http://www.altova.com/testsuite/results"\n')
if relative_uris:
xmlfile.write('\txml:base="{}/"\n'.format(testsuite_path))
xmlfile.write('\turi="{}"\n\tname="{}"\n\ttotal="{}"\n\tfailed="{}"\n\tconformance="{}"\n\truntime="{}"\n\texecution-date="{:%Y-%m-%dT%H:%M:%S}"\n\tprocessor="Altova RaptorXML+XBRL Server">\n'.format(
testsuite_uri, testsuite['name'], total, failed, conformance, runtime, datetime.datetime.now()))
for testcase in testsuite['testcases']:
testcase_uri = format_uri(testcase['uri'], testsuite_path, relative_uris)
xmlfile.write('\t<testcase\n\t\turi="{}"\n\t\tnumber="{}"\n\t\truleIdentifier="{}">\n'.format(testcase_uri, testcase['number'], testcase['ruleIdentifier']))
for variation in testcase['variations']:
variation_key = (testcase['uri'], variation['id'])
if variation_key in results:
instance_uri = format_uri(variation['data']['readMeFirst'], testsuite_path, relative_uris)
xmlfile.write('\t\t<variation\n\t\t\tid="{}"\n\t\t\tname="{}"\n\t\t\tinstance="{}">\n'.format(variation['id'], xml_escape(variation['name']), instance_uri))
status, error_counts = results[variation_key]
actual = ' '.join('%dx%s' % (count, code) for code, count in sorted(error_counts.items()))
expected = ' '.join('%dx%s' % (error['count'], code) for code, error in sorted(variation['results']['errors'].items()))
blocked = variation['results']['blockedMessageCodes'].replace('|', ' ') if variation['results']['blockedMessageCodes'] else ''
if status == 'PASS' and len(variation['results']['errors']) != len(error_counts):
additional_errors = ' '.join(sorted(set(error_counts.keys()) - set(variation['results']['errors'])))
xmlfile.write('\t\t\t<result\n\t\t\t\tstatus="{}"\n\t\t\t\tactual="{}"\n\t\t\t\texpected="{}"\n\t\t\t\tblocked="{}"\n\t\t\t\tadditional="{}"/>\n'.format(status,
actual, expected, blocked, additional_errors))
else:
xmlfile.write('\t\t\t<result\n\t\t\t\tstatus="{}"\n\t\t\t\tactual="{}"\n\t\t\t\texpected="{}"\n\t\t\t\tblocked="{}"/>\n'.format(status, actual, expected, blocked))
xmlfile.write('\t\t</variation>\n')
xmlfile.write('\t</testcase>\n')
xmlfile.write('</testsuite>\n')
def print_results(testsuite, results, runtime):
"""Writes testsuite run summary to console."""
total, failed, conformance = calc_conformance(results)
for testcase in testsuite['testcases']:
for variation in testcase['variations']:
variation_key = (testcase['uri'], variation['id'])
if variation_key in results:
status, error_counts = results[variation_key]
if status != 'PASS':
actual = ' '.join('%dx%s' % (count, code) for code, count in sorted(error_counts.items()))
expected = ' '.join('%dx%s' % (error['count'], code) for code, error in sorted(variation['results']['errors'].items()))
blocked = variation['results']['blockedMessageCodes'].replace('|', ' ') if variation['results']['blockedMessageCodes'] else ''
print('ERROR: Testcase %s, variation %s FAILED; actual [%s]; expected [%s]; blocked [%s]' % (testcase['number'], variation['id'], actual, expected, blocked))
elif len(variation['results']['errors']) != len(error_counts):
additional_errors = set(error_counts.keys()) - set(variation['results']['errors'])
print('Warning: Testcase %s, variation %s had additional errors: [%s]' % (testcase['number'], variation['id'], ' '.join(sorted(additional_errors))))
print('Conformance: %.2f%% (%d failed testcase variations out of %d)' % (conformance, failed, total))
def run_xbrl_testsuite(uri, args):
"""Load and execute the conformance testsuite."""
try:
testsuite = load_testsuite(uri)
if args.create_catalog:
target_dir = os.path.dirname(file_uri_to_os_path(testsuite['uri']))
remote_uris = collect_remote_uris(testsuite, args)
download_files_and_create_catalog(remote_uris, target_dir)
results, runtime = execute_testsuite(testsuite, args)
logging.info('Start generating testsuite report')
if args.csv_file:
write_csv_report(args.csv_file, testsuite, results, runtime, args.relative_uris)
if args.xml_file:
write_xml_report(args.xml_file, testsuite, results, runtime, args.relative_uris)
if not args.csv_file and not args.xml_file:
print_results(testsuite, results, runtime)
logging.info('Finished generating testsuite report')
except:
logging.exception('Testsuite run aborted with exception:')
def setup_logging(args):
"""Initializes Python logging module."""
if args.log_file:
levels = {'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG}
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', filename=args.log_file, filemode='w', level=levels[args.log_level])
else:
logging.getLogger().addHandler(logging.NullHandler())
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
console.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
logging.getLogger().addHandler(console)
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Execute the XBRL US DQC conformance testsuite using Altova RaptorXML+XBRL')
parser.add_argument('uri', metavar='INDEX', help='main testsuite index file')
parser.add_argument('-l', '--log', metavar='LOG_FILE', dest='log_file', help='log output file')
parser.add_argument('--log-level', metavar='LOG_LEVEL', dest='log_level', choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], default='INFO', help='log level (ERROR|WARNING|INFO|DEBUG)')
parser.add_argument('--csv-report', metavar='CSV_FILE', dest='csv_file', help='write testsuite results to csv')
parser.add_argument('--xml-report', metavar='XML_FILE', dest='xml_file', help='write testsuite results to xml')
parser.add_argument('--relative-uris', dest='relative_uris', action='store_true', help='write testcase uris relative to testsuite index file')
parser.add_argument('-t', '--testcase', metavar='TESTCASE_NUMBER', dest='testcase_numbers', nargs='*', help='limit execution to only this testcase number')
parser.add_argument('--exclude-testcase', metavar='EXCLUDE_TESTCASE_NUMBER', dest='exclude_testcase_numbers', nargs='*', help='exclude execution of the given testcase number')
parser.add_argument('-v', '--variation', metavar='VARIATION_ID', dest='variation_ids', nargs='*', help='limit execution to only this variation id')
parser.add_argument('-w', '--workers', metavar='MAX_WORKERS', type=int, dest='max_workers', default=multiprocessing.cpu_count(), help='limit number of workers')
parser.add_argument('--create-catalog', dest='create_catalog', action='store_true', help='download all remote files and create a catalog for them')
return parser.parse_args()
def main():
# Parse command line arguments
args = parse_args()
# Setup logging
setup_logging(args)
# Run the testsuite
run_xbrl_testsuite(args.uri, args)
if __name__ == '__main__':
start = time.time()
main()
end = time.time()
logging.info('Finished testsuite run in %fs', end-start)
| 51.226115 | 223 | 0.666491 |
__copyright__ = 'Copyright 2015-2019 Altova GmbH'
__license__ = 'http://www.apache.org/licenses/LICENSE-2.0'
import argparse
import collections
import concurrent.futures
import datetime
import json
import logging
import multiprocessing
import os
import pickle
import re
import tempfile
import time
import urllib.parse
import urllib.request
import zipfile
from altova_api.v2 import xml, xsd, xbrl, beta, ProductInfo
xbrl.xule = beta.xbrl.xule
class ValidationError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def setup_xule_processor(root_dir, catalog=None):
xp = xbrl.xule.Processor(catalog=catalog)
with open(os.path.join(root_dir,'xule','rulesetMap.json')) as f:
for ns, path in json.load(f).items():
ruleset_path = os.path.join(root_dir,'dqc_us_rules',path.split('?')[0].split('/dqc_us_rules/')[-1])
logging.info('Loading ruleset %s', ruleset_path)
try:
xp.add_ruleset(ruleset_path, ns)
except:
logging.exception('Error loading ruleset %s', ruleset_path)
return xp
def attr_val(elem, attr_name):
attr = elem.find_attribute(attr_name)
if attr:
val = attr.schema_normalized_value
if val is None:
val = attr.normalized_value
return val
return None
def attr_val_bool(elem, attr_name):
attr = elem.find_attribute(attr_name)
return attr.normalized_value.strip() in ('1', 'true') if attr else None
def elem_val(elem):
val = elem.schema_normalized_value
if val is None:
text = []
for child in elem.children:
if isinstance(child, xml.CharDataInformationItem):
text.append(child.value)
val = ''.join(text)
return val
def parse_variation(variation_elem):
variation = {
'id': attr_val(variation_elem, 'id'),
}
for elem in variation_elem.element_children():
if elem.local_name == 'name':
variation['name'] = elem_val(elem)
elif elem.local_name == 'description':
variation['description'] = elem.serialize(omit_start_tag=True)
elif elem.local_name == 'data':
data = {
'instances': [],
'linkbases': [],
'schemas': [],
}
for elem2 in elem.element_children():
if elem2.local_name in ('instance', 'linkbase', 'schema'):
uri = elem_val(elem2)
data[elem2.local_name+'s'].append(urllib.parse.urljoin(elem2.base_uri, uri))
if attr_val_bool(elem2, 'readMeFirst'):
data['readMeFirst'] = urllib.parse.urljoin(elem2.base_uri, uri)
else:
logging.warning('Testcase file %s contains unknown <data> child element <%s>', elem2.document.uri, elem2.local_name)
variation['data'] = data
elif elem.local_name == 'results':
results = {
'blockedMessageCodes': attr_val(elem, 'blockedMessageCodes'),
'errors': {}
}
for elem2 in elem.element_children():
if elem2.local_name == 'error':
results['errors'][elem_val(elem2)] = {
'severity': attr_val(elem2, 'severity'),
'count': int(attr_val(elem2, 'count'))
}
elif elem2.local_name == 'result':
pass
else:
logging.warning('Testcase file %s contains unknown <results> child element <%s>', elem2.document.uri, elem2.local_name)
variation['results'] = results
else:
logging.warning('Testcase file %s contains unknown <variation> child element <%s>', elem.document.uri, elem.local_name)
return variation
def load_testcase(testcase_uri):
logging.info('Loading testcase %s', testcase_uri)
instance, log = xml.Instance.create_from_url(testcase_uri)
if not instance:
raise ValidationError('\n'.join(error.text for error in log))
testcase_elem = instance.document_element
testcase = {
'uri': instance.uri,
}
variations = []
variation_ids = set()
for elem in testcase_elem.element_children():
if elem.local_name == 'creator':
creator = {}
for elem2 in elem.element_children():
if elem2.local_name == 'name':
creator['name'] = elem_val(elem2)
elif elem2.local_name == 'email':
creator['email'] = elem_val(elem2)
testcase['creator'] = creator
elif elem.local_name == 'number':
testcase['number'] = elem_val(elem)
elif elem.local_name == 'ruleIdentifier':
testcase['ruleIdentifier'] = elem_val(elem)
elif elem.local_name == 'description':
testcase['description'] = elem.serialize(omit_start_tag=True)
elif elem.local_name == 'ruleMessage':
testcase['ruleMessage'] = elem_val(elem)
elif elem.local_name == 'variation':
variation = parse_variation(elem)
variations.append(variation)
if variation['id'] in variation_ids:
logging.warning('Testcase file %s contains variations with duplicate id %s', testcase_uri, variation['id'])
else:
logging.warning('Testcase file %s contains unknown <testcase> child element <%s>', elem.document.uri, elem.local_name)
testcase['variations'] = variations
return testcase
def load_testsuite(index_uri):
logging.info('Start loading testsuite index %s', index_uri)
start = time.time()
instance, log = xml.Instance.create_from_url(index_uri)
if not instance:
raise ValidationError('\n'.join(error.text for error in log))
documentation_elem = instance.document_element
testsuite = {
'uri': instance.uri,
'name': attr_val(documentation_elem, 'name'),
'date': attr_val(documentation_elem, 'date')
}
testcases = []
for testcases_elem in documentation_elem.element_children():
if testcases_elem.local_name == 'testcases':
root = urllib.parse.urljoin(testcases_elem.base_uri, attr_val(testcases_elem, 'root')+'/')
for testcase_elem in testcases_elem.element_children():
if testcase_elem.local_name == 'testcase':
uri = urllib.parse.urljoin(root, attr_val(testcase_elem, 'uri'))
testcases.append(load_testcase(uri))
testsuite['testcases'] = testcases
runtime = time.time() - start
logging.info('Finished loading testsuite index %s in %fs', index_uri, runtime)
return testsuite
def instance_name_from_zip(path):
re_instance_name = re.compile(r'.+-\d{8}\.xml')
for name in zipfile.ZipFile(path).namelist():
if re_instance_name.fullmatch(name):
return name
raise RuntimeError('Zip archive does not contain a valid SEC instance file.')
def get_uri_in_zip(zipURI, catalog):
if catalog is not None:
zipURI = catalog.resolve_uri(zipURI)
urlparseResult = urllib.parse.urlparse(zipURI)
if urlparseResult.scheme != 'file':
tmpzip = tempfile.NamedTemporaryFile(suffix='.zip', delete=False).name
logging.info('Downloading archive %s to %s', zipURI, tmpzip)
urllib.request.urlretrieve(zipURI, tmpzip)
zipPath = tmpzip
else:
zipPath = file_uri_to_os_path(zipURI)
uri = 'file:{0}%7Czip/{1}'.format(urllib.request.pathname2url(zipPath), instance_name_from_zip(zipPath))
return uri
def execute_variation(testcase, variation, xp, catalog, args):
logging.info('[%s] Start executing variation', variation['id'])
if 'readMeFirst' in variation['data']:
readMeFirstURI = variation['data']['readMeFirst']
uri = get_uri_in_zip(readMeFirstURI, catalog) if readMeFirstURI.endswith('.zip') else readMeFirstURI
else:
raise RuntimeError('Unknown entry point in variation %s' % variation['id'])
logging.info('[%s] Validating instance %s', variation['id'], uri)
instance, error_log = xbrl.Instance.create_from_url(uri, error_limit=500, catalog=catalog)
error_counts = collections.Counter()
for error in error_log:
if error.severity == xml.ErrorSeverity.ERROR:
error_counts['other'] += 1
if instance:
for result in xp.execute(instance):
if result.severity == xbrl.xule.Severity.ERROR:
rule_name = result.effective_rule_name
if variation['results']['blockedMessageCodes'] is None or rule_name not in variation['results']['blockedMessageCodes']:
error_counts[rule_name] += 1
passed = False if len(variation['results']['errors']) == 0 and len(error_counts) > 0 else True
for code, error in variation['results']['errors'].items():
if error['count'] != error_counts[code]:
passed = False
logging.info('[%s] Finished executing variation: %s, %s', variation['id'], 'PASS' if passed else 'FAIL', dict(error_counts))
return 'PASS' if passed else 'FAIL', error_counts
def write_doc(path, content, mode="wb"):
dir, file = os.path.split(path)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(path, mode)
f.write(content)
f.close()
def download_doc(url, target):
logging.info('Downloading %s => %s' % (url, target))
content = urllib.request.urlopen(url).read()
write_doc(target, content, "wb")
def remote_uri_to_os_path(target_dir, uri):
url_parts = urllib.parse.urlparse(uri)
path = url_parts.path[1:] if url_parts.path.startswith("/") else url_parts.path
return os.path.join(target_dir, url_parts.netloc, path)
def is_remote(uri):
url_parts = urllib.parse.urlparse(uri)
return url_parts.scheme != "file"
def file_uri_to_os_path(uri):
url_parts = urllib.parse.urlparse(uri)
return urllib.request.url2pathname(url_parts.path)
def os_path_to_file_uri(path):
return urllib.request.pathname2url(path)
def download_files_and_create_catalog(doc_uris, target_dir):
logging.info('Start downloading files and creating catalog')
catalog_template = """<?xml version='1.0' encoding='UTF-8'?>
<catalog xmlns='urn:oasis:names:tc:entity:xmlns:xml:catalog' xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' xsi:schemaLocation='urn:oasis:names:tc:entity:xmlns:xml:catalog Catalog.xsd'>
%(mappings)s
</catalog>
"""
uri_mapping_template = """<uri name="%(source)s" uri="%(target)s"/>"""
lines = []
catalog_path = os.path.join(target_dir, "catalog.xml")
for uri in doc_uris:
target_abs = remote_uri_to_os_path(target_dir, uri)
target_rel = os_path_to_file_uri(os.path.relpath(target_abs, target_dir))
download_doc(uri, target_abs)
lines.append(uri_mapping_template % {"source": uri, "target": target_rel})
catalog_content = catalog_template % {"mappings": "\n ".join(lines)}
write_doc(catalog_path, catalog_content, "w")
logging.info('Finished downloading files and creating catalog')
def execute_collect_remote_uris_of_variation(uri):
logging.info('Collecting remote URIs of instance %s', uri)
remote_uris = set()
remote_uris.add(uri)
instance, error_log = xbrl.Instance.create_from_url(uri, error_limit=500)
if instance is not None and instance.dts is not None:
for doc in instance.dts.documents:
if is_remote(doc.uri):
remote_uris.add(doc.uri)
return remote_uris
def collect_remote_uris(testsuite, args):
logging.info('Start collecting remote files')
remote_uris = set()
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0011/dqc_0011.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_dei_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_srt_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2015_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2016_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2017_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/master/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2018_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0011/dqc_0011.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_dei_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_srt_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2015_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2016_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2017_concepts.csv')
remote_uris.add('https://raw.githubusercontent.com/DataQualityCommittee/dqc_us_rules/v6/dqc_us_rules/resources/DQC_US_0015/dqc_15_usgaap_2018_concepts.csv')
testsuite_path, testsuite_index = os.path.split(testsuite['uri'])
with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_workers) as executor:
futures = {}
for testcase in testsuite['testcases']:
if args.testcase_numbers and testcase['number'] not in args.testcase_numbers:
continue
if args.exclude_testcase_numbers and testcase['number'] in args.exclude_testcase_numbers:
continue
for variation in testcase['variations']:
if args.variation_ids and variation['id'] not in args.variation_ids:
continue
if 'readMeFirst' not in variation['data']:
continue
readMeFirst = variation['data']['readMeFirst']
if is_remote(readMeFirst):
futures[executor.submit(execute_collect_remote_uris_of_variation, readMeFirst)] = readMeFirst
for future in concurrent.futures.as_completed(futures):
variation_key = futures[future]
try:
remote_uris.update(future.result())
except:
logging.exception('[%s] Exception raised during remote uri collection:', variation_key)
logging.info('Finished collecting remote files')
return list(remote_uris)
def execute_testsuite(testsuite, args):
logging.info('Start executing %s variations in %d testcases', sum(len(testcase['variations']) for testcase in testsuite['testcases']), len(testsuite['testcases']))
start = time.time()
testsuite_path, testsuite_index = os.path.split(file_uri_to_os_path(testsuite['uri']))
catalog_path = os.path.join(testsuite_path, "catalog.xml")
catalog = None
if os.path.exists(catalog_path):
catalog, error_log = xml.Catalog.create_from_url(catalog_path)
if not catalog:
raise ValidationError('\n'.join(error.text for error in error_log))
xp = setup_xule_processor(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(args.uri)))), catalog)
results = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=args.max_workers) as executor:
futures = {}
for testcase in testsuite['testcases']:
if args.testcase_numbers and testcase['number'] not in args.testcase_numbers:
continue
if args.exclude_testcase_numbers and testcase['number'] in args.exclude_testcase_numbers:
continue
for variation in testcase['variations']:
if args.variation_ids and variation['id'] not in args.variation_ids:
continue
futures[executor.submit(execute_variation, testcase, variation, xp, catalog, args)] = (testcase['uri'], variation['id'])
for future in concurrent.futures.as_completed(futures):
variation_key = futures[future]
try:
results[variation_key] = future.result()
except:
results[variation_key] = 'EXCEPTION', collections.Counter()
logging.exception('[%s] Exception raised during testcase execution:', variation_key[1])
runtime = time.time() - start
logging.info('Finished executing testcase variations in %fs', runtime)
return results, runtime
def calc_conformance(results):
total = len(results)
failed = sum(1 for status, _ in results.values() if status != 'PASS')
conformance = (total-failed)*100/total if total > 0 else 100
return total, failed, conformance
def format_uri(uri, base, relative_uris):
return uri[len(base)+1:] if relative_uris and uri.startswith(base) else uri
def write_csv_report(path, testsuite, results, runtime, relative_uris):
total, failed, conformance = calc_conformance(results)
with open(path, 'w') as csvfile:
testsuite_path, testsuite_index = os.path.split(testsuite['uri'])
csvfile.write('Date,Total,Failed,Conformance,Runtime,Testsuite,Testcase,Variation,ReadMeFirst,Status,Actual,Expected,Blocked,Warnings\n')
csvfile.write('"{:%Y-%m-%d %H:%M:%S}",{},{},{:.2f},{:.1f},{}\n'.format(datetime.datetime.now(), total, failed, conformance, runtime, testsuite['uri']))
for testcase in testsuite['testcases']:
csvfile.write(',,,,,,%s\n' % testcase['number'])
for variation in testcase['variations']:
variation_key = (testcase['uri'], variation['id'])
if variation_key in results:
instance_uri = format_uri(variation['data']['readMeFirst'], testsuite_path, relative_uris)
status, error_counts = results[variation_key]
actual = ' '.join('%dx%s' % (count, code) for code, count in sorted(error_counts.items()))
expected = ' '.join('%dx%s' % (error['count'], code) for code, error in sorted(variation['results']['errors'].items()))
blocked = variation['results']['blockedMessageCodes'].replace('|', ' ') if variation['results']['blockedMessageCodes'] else ''
warnings = ''
if status == 'PASS' and len(variation['results']['errors']) != len(error_counts):
additional_errors = set(error_counts.keys()) - set(variation['results']['errors'])
warnings = 'Additional errors %s reported' % ' '.join(sorted(additional_errors))
csvfile.write(',,,,,,,{},{},{},{},{},{},{}\n'.format(variation['id'], instance_uri, status, actual, expected, blocked, warnings))
def xml_escape(str):
return str.replace('<', '<').replace('&', '&').replace('"', '"')
def write_xml_report(path, testsuite, results, runtime, relative_uris):
total, failed, conformance = calc_conformance(results)
with open(path, 'w') as xmlfile:
testsuite_path, testsuite_index = os.path.split(testsuite['uri'])
testsuite_uri = testsuite['uri'] if not relative_uris else testsuite_index
xmlfile.write('<?xml version="1.0" encoding="UTF-8"?>\n')
xmlfile.write('<testsuite\n\txmlns="http://www.altova.com/testsuite/results"\n')
if relative_uris:
xmlfile.write('\txml:base="{}/"\n'.format(testsuite_path))
xmlfile.write('\turi="{}"\n\tname="{}"\n\ttotal="{}"\n\tfailed="{}"\n\tconformance="{}"\n\truntime="{}"\n\texecution-date="{:%Y-%m-%dT%H:%M:%S}"\n\tprocessor="Altova RaptorXML+XBRL Server">\n'.format(
testsuite_uri, testsuite['name'], total, failed, conformance, runtime, datetime.datetime.now()))
for testcase in testsuite['testcases']:
testcase_uri = format_uri(testcase['uri'], testsuite_path, relative_uris)
xmlfile.write('\t<testcase\n\t\turi="{}"\n\t\tnumber="{}"\n\t\truleIdentifier="{}">\n'.format(testcase_uri, testcase['number'], testcase['ruleIdentifier']))
for variation in testcase['variations']:
variation_key = (testcase['uri'], variation['id'])
if variation_key in results:
instance_uri = format_uri(variation['data']['readMeFirst'], testsuite_path, relative_uris)
xmlfile.write('\t\t<variation\n\t\t\tid="{}"\n\t\t\tname="{}"\n\t\t\tinstance="{}">\n'.format(variation['id'], xml_escape(variation['name']), instance_uri))
status, error_counts = results[variation_key]
actual = ' '.join('%dx%s' % (count, code) for code, count in sorted(error_counts.items()))
expected = ' '.join('%dx%s' % (error['count'], code) for code, error in sorted(variation['results']['errors'].items()))
blocked = variation['results']['blockedMessageCodes'].replace('|', ' ') if variation['results']['blockedMessageCodes'] else ''
if status == 'PASS' and len(variation['results']['errors']) != len(error_counts):
additional_errors = ' '.join(sorted(set(error_counts.keys()) - set(variation['results']['errors'])))
xmlfile.write('\t\t\t<result\n\t\t\t\tstatus="{}"\n\t\t\t\tactual="{}"\n\t\t\t\texpected="{}"\n\t\t\t\tblocked="{}"\n\t\t\t\tadditional="{}"/>\n'.format(status,
actual, expected, blocked, additional_errors))
else:
xmlfile.write('\t\t\t<result\n\t\t\t\tstatus="{}"\n\t\t\t\tactual="{}"\n\t\t\t\texpected="{}"\n\t\t\t\tblocked="{}"/>\n'.format(status, actual, expected, blocked))
xmlfile.write('\t\t</variation>\n')
xmlfile.write('\t</testcase>\n')
xmlfile.write('</testsuite>\n')
def print_results(testsuite, results, runtime):
total, failed, conformance = calc_conformance(results)
for testcase in testsuite['testcases']:
for variation in testcase['variations']:
variation_key = (testcase['uri'], variation['id'])
if variation_key in results:
status, error_counts = results[variation_key]
if status != 'PASS':
actual = ' '.join('%dx%s' % (count, code) for code, count in sorted(error_counts.items()))
expected = ' '.join('%dx%s' % (error['count'], code) for code, error in sorted(variation['results']['errors'].items()))
blocked = variation['results']['blockedMessageCodes'].replace('|', ' ') if variation['results']['blockedMessageCodes'] else ''
print('ERROR: Testcase %s, variation %s FAILED; actual [%s]; expected [%s]; blocked [%s]' % (testcase['number'], variation['id'], actual, expected, blocked))
elif len(variation['results']['errors']) != len(error_counts):
additional_errors = set(error_counts.keys()) - set(variation['results']['errors'])
print('Warning: Testcase %s, variation %s had additional errors: [%s]' % (testcase['number'], variation['id'], ' '.join(sorted(additional_errors))))
print('Conformance: %.2f%% (%d failed testcase variations out of %d)' % (conformance, failed, total))
def run_xbrl_testsuite(uri, args):
try:
testsuite = load_testsuite(uri)
if args.create_catalog:
target_dir = os.path.dirname(file_uri_to_os_path(testsuite['uri']))
remote_uris = collect_remote_uris(testsuite, args)
download_files_and_create_catalog(remote_uris, target_dir)
results, runtime = execute_testsuite(testsuite, args)
logging.info('Start generating testsuite report')
if args.csv_file:
write_csv_report(args.csv_file, testsuite, results, runtime, args.relative_uris)
if args.xml_file:
write_xml_report(args.xml_file, testsuite, results, runtime, args.relative_uris)
if not args.csv_file and not args.xml_file:
print_results(testsuite, results, runtime)
logging.info('Finished generating testsuite report')
except:
logging.exception('Testsuite run aborted with exception:')
def setup_logging(args):
if args.log_file:
levels = {'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG}
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', filename=args.log_file, filemode='w', level=levels[args.log_level])
else:
logging.getLogger().addHandler(logging.NullHandler())
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
console.setFormatter(logging.Formatter('%(levelname)s %(message)s'))
logging.getLogger().addHandler(console)
def parse_args():
parser = argparse.ArgumentParser(description='Execute the XBRL US DQC conformance testsuite using Altova RaptorXML+XBRL')
parser.add_argument('uri', metavar='INDEX', help='main testsuite index file')
parser.add_argument('-l', '--log', metavar='LOG_FILE', dest='log_file', help='log output file')
parser.add_argument('--log-level', metavar='LOG_LEVEL', dest='log_level', choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], default='INFO', help='log level (ERROR|WARNING|INFO|DEBUG)')
parser.add_argument('--csv-report', metavar='CSV_FILE', dest='csv_file', help='write testsuite results to csv')
parser.add_argument('--xml-report', metavar='XML_FILE', dest='xml_file', help='write testsuite results to xml')
parser.add_argument('--relative-uris', dest='relative_uris', action='store_true', help='write testcase uris relative to testsuite index file')
parser.add_argument('-t', '--testcase', metavar='TESTCASE_NUMBER', dest='testcase_numbers', nargs='*', help='limit execution to only this testcase number')
parser.add_argument('--exclude-testcase', metavar='EXCLUDE_TESTCASE_NUMBER', dest='exclude_testcase_numbers', nargs='*', help='exclude execution of the given testcase number')
parser.add_argument('-v', '--variation', metavar='VARIATION_ID', dest='variation_ids', nargs='*', help='limit execution to only this variation id')
parser.add_argument('-w', '--workers', metavar='MAX_WORKERS', type=int, dest='max_workers', default=multiprocessing.cpu_count(), help='limit number of workers')
parser.add_argument('--create-catalog', dest='create_catalog', action='store_true', help='download all remote files and create a catalog for them')
return parser.parse_args()
def main():
# Parse command line arguments
args = parse_args()
# Setup logging
setup_logging(args)
# Run the testsuite
run_xbrl_testsuite(args.uri, args)
if __name__ == '__main__':
start = time.time()
main()
end = time.time()
logging.info('Finished testsuite run in %fs', end-start)
| true | true |
1c32782758509d96d470bdcdbadf1eea28c768bf | 1,972 | py | Python | axelrod/game.py | danilobellini/Axelrod | 2c9212553e06095c24adcb82a5979279cbdf45fb | [
"MIT"
] | 1 | 2017-11-29T22:37:35.000Z | 2017-11-29T22:37:35.000Z | axelrod/game.py | danilobellini/Axelrod | 2c9212553e06095c24adcb82a5979279cbdf45fb | [
"MIT"
] | 10 | 2020-09-07T19:44:57.000Z | 2020-11-25T06:00:05.000Z | axelrod/game.py | danilobellini/Axelrod | 2c9212553e06095c24adcb82a5979279cbdf45fb | [
"MIT"
] | null | null | null | from typing import Tuple, Union
from axelrod import Action
C, D = Action.C, Action.D
Score = Union[int, float]
class Game(object):
"""Container for the game matrix and scoring logic.
Attributes
----------
scores: dict
The numerical score attribute to all combinations of action pairs.
"""
def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None:
"""Create a new game object.
Parameters
----------
r: int or float
Score obtained by both players for mutual cooperation.
s: int or float
Score obtained by a player for cooperating against a defector.
t: int or float
Score obtained by a player for defecting against a cooperator.
p: int or float
Score obtained by both player for mutual defection.
"""
self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)}
def RPST(self) -> Tuple[Score, Score, Score, Score]:
"""Returns game matrix values in Press and Dyson notation."""
R = self.scores[(C, C)][0]
P = self.scores[(D, D)][0]
S = self.scores[(C, D)][0]
T = self.scores[(D, C)][0]
return R, P, S, T
def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]:
"""Returns the appropriate score for a decision pair.
Parameters
----------
pair: tuple(Action, Action)
A pair actions for two players, for example (C, C).
Returns
-------
tuple of int or float
Scores for two player resulting from their actions.
"""
return self.scores[pair]
def __repr__(self) -> str:
return "Axelrod game: (R,P,S,T) = {}".format(self.RPST())
def __eq__(self, other):
if not isinstance(other, Game):
return False
return self.RPST() == other.RPST()
DefaultGame = Game()
| 29 | 87 | 0.553245 | from typing import Tuple, Union
from axelrod import Action
C, D = Action.C, Action.D
Score = Union[int, float]
class Game(object):
def __init__(self, r: Score = 3, s: Score = 0, t: Score = 5, p: Score = 1) -> None:
self.scores = {(C, C): (r, r), (D, D): (p, p), (C, D): (s, t), (D, C): (t, s)}
def RPST(self) -> Tuple[Score, Score, Score, Score]:
R = self.scores[(C, C)][0]
P = self.scores[(D, D)][0]
S = self.scores[(C, D)][0]
T = self.scores[(D, C)][0]
return R, P, S, T
def score(self, pair: Tuple[Action, Action]) -> Tuple[Score, Score]:
return self.scores[pair]
def __repr__(self) -> str:
return "Axelrod game: (R,P,S,T) = {}".format(self.RPST())
def __eq__(self, other):
if not isinstance(other, Game):
return False
return self.RPST() == other.RPST()
DefaultGame = Game()
| true | true |
1c32785b49c7d09a70abf0156d9432ce96763e30 | 4,103 | py | Python | dask/tests/test_threaded.py | lr4d/dask | e405fa0c767a89ec6a413114be831a96cb388443 | [
"BSD-3-Clause"
] | null | null | null | dask/tests/test_threaded.py | lr4d/dask | e405fa0c767a89ec6a413114be831a96cb388443 | [
"BSD-3-Clause"
] | null | null | null | dask/tests/test_threaded.py | lr4d/dask | e405fa0c767a89ec6a413114be831a96cb388443 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import signal
import threading
from multiprocessing.pool import ThreadPool
from time import time, sleep
import pytest
import dask
from dask.compatibility import PY2
from dask.threaded import get
from dask.utils_test import inc, add
def test_get():
dsk = {"x": 1, "y": 2, "z": (inc, "x"), "w": (add, "z", "y")}
assert get(dsk, "w") == 4
assert get(dsk, ["w", "z"]) == (4, 2)
def test_nested_get():
dsk = {"x": 1, "y": 2, "a": (add, "x", "y"), "b": (sum, ["x", "y"])}
assert get(dsk, ["a", "b"]) == (3, 3)
def test_get_without_computation():
dsk = {"x": 1}
assert get(dsk, "x") == 1
def test_broken_callback():
from dask.callbacks import Callback
def _f_ok(*args, **kwargs):
pass
def _f_broken(*args, **kwargs):
raise ValueError("my_exception")
dsk = {"x": 1}
with Callback(start=_f_broken, finish=_f_ok):
with Callback(start=_f_ok, finish=_f_ok):
with pytest.raises(ValueError, match="my_exception"):
get(dsk, "x")
def bad(x):
raise ValueError()
def test_exceptions_rise_to_top():
dsk = {"x": 1, "y": (bad, "x")}
pytest.raises(ValueError, lambda: get(dsk, "y"))
def test_reuse_pool():
with ThreadPool() as pool:
with dask.config.set(pool=pool):
assert get({"x": (inc, 1)}, "x") == 2
assert get({"x": (inc, 1)}, "x") == 2
@pytest.mark.skipif(PY2, reason="threading API changed")
def test_pool_kwarg():
def f():
sleep(0.01)
return threading.get_ident()
dsk = {("x", i): (f,) for i in range(30)}
dsk["x"] = (len, (set, [("x", i) for i in range(len(dsk))]))
with ThreadPool(3) as pool:
assert get(dsk, "x", pool=pool) == 3
def test_threaded_within_thread():
L = []
def f(i):
result = get({"x": (lambda: i,)}, "x", num_workers=2)
L.append(result)
before = threading.active_count()
for i in range(20):
t = threading.Thread(target=f, args=(1,))
t.daemon = True
t.start()
t.join()
assert L == [1]
del L[:]
start = time() # wait for most threads to join
while threading.active_count() > before + 10:
sleep(0.01)
assert time() < start + 5
def test_dont_spawn_too_many_threads():
before = threading.active_count()
dsk = {("x", i): (lambda: i,) for i in range(10)}
dsk["x"] = (sum, list(dsk))
for i in range(20):
get(dsk, "x", num_workers=4)
after = threading.active_count()
assert after <= before + 8
def test_thread_safety():
def f(x):
return 1
dsk = {"x": (sleep, 0.05), "y": (f, "x")}
L = []
def test_f():
L.append(get(dsk, "y"))
threads = []
for i in range(20):
t = threading.Thread(target=test_f)
t.daemon = True
t.start()
threads.append(t)
for thread in threads:
thread.join()
assert L == [1] * 20
@pytest.mark.xfail(
"xdist" in sys.modules,
reason="This test fails intermittently when using pytest-xdist (maybe)",
strict=False,
)
def test_interrupt():
# Python 2 and windows 2 & 3 both implement `queue.get` using polling,
# which means we can set an exception to interrupt the call to `get`.
# Python 3 on other platforms requires sending SIGINT to the main thread.
if PY2:
from thread import interrupt_main
elif os.name == "nt":
from _thread import interrupt_main
else:
main_thread = threading.get_ident()
def interrupt_main():
signal.pthread_kill(main_thread, signal.SIGINT)
def long_task():
sleep(5)
dsk = {("x", i): (long_task,) for i in range(20)}
dsk["x"] = (len, list(dsk.keys()))
try:
interrupter = threading.Timer(0.5, interrupt_main)
interrupter.start()
start = time()
get(dsk, "x")
except KeyboardInterrupt:
pass
except Exception:
assert False, "Failed to interrupt"
stop = time()
if stop - start > 4:
assert False, "Failed to interrupt"
| 23.58046 | 77 | 0.572995 | import os
import sys
import signal
import threading
from multiprocessing.pool import ThreadPool
from time import time, sleep
import pytest
import dask
from dask.compatibility import PY2
from dask.threaded import get
from dask.utils_test import inc, add
def test_get():
dsk = {"x": 1, "y": 2, "z": (inc, "x"), "w": (add, "z", "y")}
assert get(dsk, "w") == 4
assert get(dsk, ["w", "z"]) == (4, 2)
def test_nested_get():
dsk = {"x": 1, "y": 2, "a": (add, "x", "y"), "b": (sum, ["x", "y"])}
assert get(dsk, ["a", "b"]) == (3, 3)
def test_get_without_computation():
dsk = {"x": 1}
assert get(dsk, "x") == 1
def test_broken_callback():
from dask.callbacks import Callback
def _f_ok(*args, **kwargs):
pass
def _f_broken(*args, **kwargs):
raise ValueError("my_exception")
dsk = {"x": 1}
with Callback(start=_f_broken, finish=_f_ok):
with Callback(start=_f_ok, finish=_f_ok):
with pytest.raises(ValueError, match="my_exception"):
get(dsk, "x")
def bad(x):
raise ValueError()
def test_exceptions_rise_to_top():
dsk = {"x": 1, "y": (bad, "x")}
pytest.raises(ValueError, lambda: get(dsk, "y"))
def test_reuse_pool():
with ThreadPool() as pool:
with dask.config.set(pool=pool):
assert get({"x": (inc, 1)}, "x") == 2
assert get({"x": (inc, 1)}, "x") == 2
@pytest.mark.skipif(PY2, reason="threading API changed")
def test_pool_kwarg():
def f():
sleep(0.01)
return threading.get_ident()
dsk = {("x", i): (f,) for i in range(30)}
dsk["x"] = (len, (set, [("x", i) for i in range(len(dsk))]))
with ThreadPool(3) as pool:
assert get(dsk, "x", pool=pool) == 3
def test_threaded_within_thread():
L = []
def f(i):
result = get({"x": (lambda: i,)}, "x", num_workers=2)
L.append(result)
before = threading.active_count()
for i in range(20):
t = threading.Thread(target=f, args=(1,))
t.daemon = True
t.start()
t.join()
assert L == [1]
del L[:]
start = time()
while threading.active_count() > before + 10:
sleep(0.01)
assert time() < start + 5
def test_dont_spawn_too_many_threads():
before = threading.active_count()
dsk = {("x", i): (lambda: i,) for i in range(10)}
dsk["x"] = (sum, list(dsk))
for i in range(20):
get(dsk, "x", num_workers=4)
after = threading.active_count()
assert after <= before + 8
def test_thread_safety():
def f(x):
return 1
dsk = {"x": (sleep, 0.05), "y": (f, "x")}
L = []
def test_f():
L.append(get(dsk, "y"))
threads = []
for i in range(20):
t = threading.Thread(target=test_f)
t.daemon = True
t.start()
threads.append(t)
for thread in threads:
thread.join()
assert L == [1] * 20
@pytest.mark.xfail(
"xdist" in sys.modules,
reason="This test fails intermittently when using pytest-xdist (maybe)",
strict=False,
)
def test_interrupt():
if PY2:
from thread import interrupt_main
elif os.name == "nt":
from _thread import interrupt_main
else:
main_thread = threading.get_ident()
def interrupt_main():
signal.pthread_kill(main_thread, signal.SIGINT)
def long_task():
sleep(5)
dsk = {("x", i): (long_task,) for i in range(20)}
dsk["x"] = (len, list(dsk.keys()))
try:
interrupter = threading.Timer(0.5, interrupt_main)
interrupter.start()
start = time()
get(dsk, "x")
except KeyboardInterrupt:
pass
except Exception:
assert False, "Failed to interrupt"
stop = time()
if stop - start > 4:
assert False, "Failed to interrupt"
| true | true |
1c3278aeb432067752f6763a985c005fd4c759c7 | 3,918 | py | Python | src/aws_scatter_gather/s3_sqs_lambda_async_chunked/resources/work_bucket.py | cbuschka/aws-scatter-gather | abebd22aa6449369845a08d4260607745098bb28 | [
"Apache-2.0"
] | 2 | 2020-06-23T08:27:56.000Z | 2020-06-25T08:44:57.000Z | src/aws_scatter_gather/s3_sqs_lambda_async_chunked/resources/work_bucket.py | cbuschka/aws-scatter-gather | abebd22aa6449369845a08d4260607745098bb28 | [
"Apache-2.0"
] | null | null | null | src/aws_scatter_gather/s3_sqs_lambda_async_chunked/resources/work_bucket.py | cbuschka/aws-scatter-gather | abebd22aa6449369845a08d4260607745098bb28 | [
"Apache-2.0"
] | null | null | null | from aws_scatter_gather.util import json
import os
from aws_scatter_gather.util.jsontime import now
from aws_scatter_gather.util.trace import trace
SCOPE = os.environ.get("SCOPE", "")
WORK_BUCKET = "{SCOPE}s3-sqs-lambda-async-chunked-work".format(SCOPE=SCOPE)
async def write_batch_status(batch_id, record_count, chunk_size, s3_resource):
async with trace("Writing status for {}", batch_id):
object_key = "{}/status.json".format(batch_id)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
await s3_object.put(ACL='private', Body=json.dumps({
"variant": "s3-sqs-lambda-async-chunked",
"batchId": batch_id,
"chunkSize": chunk_size,
"taskCount": record_count,
"startTime": now()
}))
# async def delete_batch_status(batch_id):
# object_key = "{}/status.json".format(batch_id)
# async with trace("Deleting {}/{} from s3", WORK_BUCKET, object_key):
# async with aioaws.resource("s3") as s3_resource:
# s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
# await s3_object.delete()
async def exists_pending_chunk(batch_id, s3_resource):
async with trace("Checking if batch batch_id={} is complete", batch_id):
s3_bucket = await s3_resource.Bucket(name=WORK_BUCKET)
async for _ in s3_bucket.objects.filter(Prefix="{}/pending/".format(batch_id), MaxKeys=1):
return True
else:
return False
async def write_pending_chunk(batch_id, index, chunk, batch_writer):
object_key = "{}/pending/{}.json".format(batch_id, index)
async with trace("Write pending chunk {}/{} to s3", WORK_BUCKET, object_key):
await batch_writer.put(Bucket=WORK_BUCKET, Key=object_key,
ACL='private',
Body=json.dumps(chunk))
async def write_chunk_result(batch_id, index, chunk, s3_resource):
object_key = "{}/done/{}.json".format(batch_id, index)
async with trace("Writing chunk result {}/{} to s3", WORK_BUCKET, object_key):
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
await s3_object.put(ACL='private', Body=json.dumps(chunk))
async def delete_pending_chunk(batch_id, index, s3_resource):
object_key = "{}/pending/{}.json".format(batch_id, index)
async with trace("Deleting {}/{} from s3", WORK_BUCKET, object_key):
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
await s3_object.delete()
async def read_batch_status(batch_id, s3_resource):
async with trace("Reading status for batch_id={}", batch_id):
object_key = "{}/status.json".format(batch_id)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
response = await s3_object.get()
async with response["Body"] as stream:
data = await stream.read()
json_doc = json.loads(data)
return json_doc
async def read_pending_chunk(batch_id, index, s3_resource):
async with trace("Reading pending chunk batch_id={}/index={}", batch_id, index):
object_key = "{}/pending/{}.json".format(batch_id, index)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
response = await s3_object.get()
async with response["Body"] as stream:
data = await stream.read()
json_doc = json.loads(data)
return json_doc
async def read_chunk_result(batch_id, index, s3_resource):
async with trace("Reading chunk result batch_id={}/index={}", batch_id, index):
object_key = "{}/done/{}.json".format(batch_id, index)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
response = await s3_object.get()
async with response["Body"] as stream:
data = await stream.read()
json_doc = json.loads(data)
return json_doc
| 41.242105 | 98 | 0.662072 | from aws_scatter_gather.util import json
import os
from aws_scatter_gather.util.jsontime import now
from aws_scatter_gather.util.trace import trace
SCOPE = os.environ.get("SCOPE", "")
WORK_BUCKET = "{SCOPE}s3-sqs-lambda-async-chunked-work".format(SCOPE=SCOPE)
async def write_batch_status(batch_id, record_count, chunk_size, s3_resource):
async with trace("Writing status for {}", batch_id):
object_key = "{}/status.json".format(batch_id)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
await s3_object.put(ACL='private', Body=json.dumps({
"variant": "s3-sqs-lambda-async-chunked",
"batchId": batch_id,
"chunkSize": chunk_size,
"taskCount": record_count,
"startTime": now()
}))
async def exists_pending_chunk(batch_id, s3_resource):
async with trace("Checking if batch batch_id={} is complete", batch_id):
s3_bucket = await s3_resource.Bucket(name=WORK_BUCKET)
async for _ in s3_bucket.objects.filter(Prefix="{}/pending/".format(batch_id), MaxKeys=1):
return True
else:
return False
async def write_pending_chunk(batch_id, index, chunk, batch_writer):
object_key = "{}/pending/{}.json".format(batch_id, index)
async with trace("Write pending chunk {}/{} to s3", WORK_BUCKET, object_key):
await batch_writer.put(Bucket=WORK_BUCKET, Key=object_key,
ACL='private',
Body=json.dumps(chunk))
async def write_chunk_result(batch_id, index, chunk, s3_resource):
object_key = "{}/done/{}.json".format(batch_id, index)
async with trace("Writing chunk result {}/{} to s3", WORK_BUCKET, object_key):
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
await s3_object.put(ACL='private', Body=json.dumps(chunk))
async def delete_pending_chunk(batch_id, index, s3_resource):
object_key = "{}/pending/{}.json".format(batch_id, index)
async with trace("Deleting {}/{} from s3", WORK_BUCKET, object_key):
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
await s3_object.delete()
async def read_batch_status(batch_id, s3_resource):
async with trace("Reading status for batch_id={}", batch_id):
object_key = "{}/status.json".format(batch_id)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
response = await s3_object.get()
async with response["Body"] as stream:
data = await stream.read()
json_doc = json.loads(data)
return json_doc
async def read_pending_chunk(batch_id, index, s3_resource):
async with trace("Reading pending chunk batch_id={}/index={}", batch_id, index):
object_key = "{}/pending/{}.json".format(batch_id, index)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
response = await s3_object.get()
async with response["Body"] as stream:
data = await stream.read()
json_doc = json.loads(data)
return json_doc
async def read_chunk_result(batch_id, index, s3_resource):
async with trace("Reading chunk result batch_id={}/index={}", batch_id, index):
object_key = "{}/done/{}.json".format(batch_id, index)
s3_object = await s3_resource.Object(WORK_BUCKET, object_key)
response = await s3_object.get()
async with response["Body"] as stream:
data = await stream.read()
json_doc = json.loads(data)
return json_doc
| true | true |
1c327aabc7e164668a9f2b0263e36d4c77eebdb0 | 1,597 | py | Python | octohatrack/contributors_file.py | ncoghlan/octohatrack | cb7a0e8011a312a9074a2aa63b640c32fdbaa988 | [
"BSD-3-Clause"
] | null | null | null | octohatrack/contributors_file.py | ncoghlan/octohatrack | cb7a0e8011a312a9074a2aa63b640c32fdbaa988 | [
"BSD-3-Clause"
] | null | null | null | octohatrack/contributors_file.py | ncoghlan/octohatrack | cb7a0e8011a312a9074a2aa63b640c32fdbaa988 | [
"BSD-3-Clause"
] | 1 | 2020-12-18T09:13:50.000Z | 2020-12-18T09:13:50.000Z | #############################################################
#
# WARNING
#
# This module is extremely experimental.
#
# May contain traces of:
# * using the GitHub API to pull content of a repo
# * string parsing
# * gluten
#
#############################################################
import base64
import re
import hashlib
from .helpers import *
def get_contributors_file(repo_name):
progress("Collecting CONTRIBUTORS file")
response = get_data("/repos/%s/contents/CONTRIBUTORS" % repo_name)
if response is None:
print("No CONTRIBUTORS file")
return []
if "message" in response.keys():
print("No CONTRIBUTORS file")
results = []
content = base64.b64decode(response["content"]).decode("utf-8", "ignore")
for line in content.splitlines():
progress_advance()
if not line.startswith("#"):
if line.strip() is not "":
if "<" in line:
name, alias = line.strip(">").split("<")
if ":" in alias:
service, user_name = alias.split(":@")
if service == "twitter":
user_name += " (twitter)"
elif "@" in alias:
user_name = alias
else:
log.debug("Invalid contributor line type: %s. Returning plain" % line)
results.append({'name': name.strip(), 'user_name': user_name})
progress_complete()
return results
| 27.534483 | 94 | 0.478397 | true | true | |
1c327ac5ec64f945e7fd04b7eb965e6ba822b882 | 4,074 | py | Python | scripts/gen_rf_xmacro_def_undo.py | refu-lang/clib | 327aeea6ed480257c1c4638ba21f2c347f5960ae | [
"MIT",
"BSD-3-Clause"
] | 3 | 2017-02-13T22:31:32.000Z | 2021-07-11T11:59:39.000Z | scripts/gen_rf_xmacro_def_undo.py | refu-lang/clib | 327aeea6ed480257c1c4638ba21f2c347f5960ae | [
"MIT",
"BSD-3-Clause"
] | 1 | 2017-02-06T00:13:05.000Z | 2017-02-06T00:13:05.000Z | scripts/gen_rf_xmacro_def_undo.py | refu-lang/rfbase | 327aeea6ed480257c1c4638ba21f2c347f5960ae | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #the maximum number of arguments for a function
maxArgs = 36
#stuff to import to be able to open files in the parent directory
import os.path
import sys
print("Generating rf_xmacro_def_undo.h ...");
f = open(os.path.dirname(sys.argv[0]) + "/../include/Preprocessor/rf_xmacro_def_undo.h","w");
#put some required definitions at the top
f.write("/**\n** @author Lefteris\n** @date 13/02/2012\n**\n\
** This file contains #undef so to easily undefine all the macros defined by\n\
** rf_xmacro_def.h . It is automatically generated\n\
** by the python script gen_rf_xmacro_def_undo.py\n");
f.write("*/");
#Write the macros used to pick the correct macro for function definition
f.write("\n\n\n///These macros here are used in order to pickup the correct function macro definition\n")
f.write("#undef i_RP_CHECK2\n")
f.write("#undef i_RP_CHECK1\n")
f.write("#undef i_RP_MACRO_CHECK\n");
f.write("#undef i_RP_MACRO_CHECK2\n");
f.write("#undef i_RP_PICK_FUNC_DEF\n");
#Write the i_REVERSE macros to reverse the arguments list
i = maxArgs
f.write("\n///These macros are used to reverse a list of arguments. They are used to obtain the appropriate default arguments\n")
while(i >= 0):
f.write("#undef i_REVERSE"+str(i)+"\n");
i-=1;
#Now Write the macros that take in the N default arguments from the default arguments list
i = maxArgs;
f.write("\n//!These macros are used to get the appropriate number of default arguments\n\n\n")
while(i>=0):
f.write("//! Macros to get the appropriate number of arguments for "+str(i)+"\n");
#1)The i_AFTER_FIRSTXX MAcro
f.write("#undef i_AFTER_FIRST"+str(i)+"\n");
#2)The i_AFTER_FIRSTXX_NOCOMMA MAcro
f.write("#undef i_AFTER_FIRST"+str(i)+"_NOCOMMA\n");
#3)The i_FIRSTXX Macro
f.write("#undef i_FIRST"+str(i)+"\n");
#4)The i_FIRSTXX_NOCOMMA Macro
f.write("#undef i_FIRST"+str(i)+"_NOCOMMA\n");
#5)The i_LASTXX Macro
f.write("#undef i_LAST"+str(i)+"\n");
#6)THE i_LASTXX_IMP Macro
f.write("#undef i_LAST"+str(i)+"_IMP \n ");
#7)The i_LASTXX_NOCOMMA Macro
f.write("#undef i_LAST"+str(i)+"_NOCOMMA \n ");
#8)THE i_LASTXX_NOCCOMA_IMP Macro
f.write("#undef i_LAST"+str(i)+"_NOCCOMA_IMP \n ");
#9)The i_RP_DEFAULT_ARGS_X Macro when last call == 0
f.write("#undef i_RP_DEFAULT_ARGS_"+str(i)+"_LAST0\n");
#10) The i_RP_DEFAULT_ARGS_X Macro when last call == 1
f.write("#undef i_RP_DEFAULT_ARGS_"+str(i)+"_LAST1 \n");
i-=1
#Add the macro that gives us the appropriate default argument macro
f.write("//! This macro gets the appropriate default arguments macro\n");
f.write("#undef i_RP_GET_DEFAULT_ARG \n");
f.write("#undef i_RP_GET_DEFAULT_ARG_IMP \n");
###############################################################################################################
#Now Write the big bulk of the macros. Namely those which are used to define functions with default arguments
####################################################################################################################
i = maxArgs;
f.write("\n//! These macros are used when you want to define a function in a source file with default arguments and want to avoid lots of typing\n");
while(i>=0):
f.write("//! Function definition macros for "+str(i)+" arguments functions\n");
#0)write the macro from which everything is called
f.write("#undef RF_DEFINE_DEFAULT_ARG_FUNC"+str(i)+" \n");
#1)write the general definition macro which picks up the rest
f.write("#undef i_RP_DEFINE_FUNC"+str(i)+" \n");
#1) Definition 1 , if compulsory number of arguments is equal to current
f.write("#undef i_RP_DEFINE_FUNC"+str(i)+"_IMP00 \n");
#2) Definition 2, if compulsory number of arguments is less than current
f.write("#undef i_RP_DEFINE_FUNC"+str(i)+"_IMP01 \n");
#3) Definitions if none of the first two are true
f.write("#define i_RP_DEFINE_FUNC"+str(i)+"_IMP10 \n");
#end of one loop
i-=1;
#at the end close the file
print("rf_xmacro_def_undo.h has been generated!");
f.close();
| 34.235294 | 149 | 0.656112 |
maxArgs = 36
import os.path
import sys
print("Generating rf_xmacro_def_undo.h ...");
f = open(os.path.dirname(sys.argv[0]) + "/../include/Preprocessor/rf_xmacro_def_undo.h","w");
f.write("/**\n** @author Lefteris\n** @date 13/02/2012\n**\n\
** This file contains #undef so to easily undefine all the macros defined by\n\
** rf_xmacro_def.h . It is automatically generated\n\
** by the python script gen_rf_xmacro_def_undo.py\n");
f.write("*/");
f.write("\n\n\n///These macros here are used in order to pickup the correct function macro definition\n")
f.write("#undef i_RP_CHECK2\n")
f.write("#undef i_RP_CHECK1\n")
f.write("#undef i_RP_MACRO_CHECK\n");
f.write("#undef i_RP_MACRO_CHECK2\n");
f.write("#undef i_RP_PICK_FUNC_DEF\n");
i = maxArgs
f.write("\n///These macros are used to reverse a list of arguments. They are used to obtain the appropriate default arguments\n")
while(i >= 0):
f.write("#undef i_REVERSE"+str(i)+"\n");
i-=1;
i = maxArgs;
f.write("\n//!These macros are used to get the appropriate number of default arguments\n\n\n")
while(i>=0):
f.write("//! Macros to get the appropriate number of arguments for "+str(i)+"\n");
f.write("#undef i_AFTER_FIRST"+str(i)+"\n");
f.write("#undef i_AFTER_FIRST"+str(i)+"_NOCOMMA\n");
f.write("#undef i_FIRST"+str(i)+"\n");
f.write("#undef i_FIRST"+str(i)+"_NOCOMMA\n");
f.write("#undef i_LAST"+str(i)+"\n");
f.write("#undef i_LAST"+str(i)+"_IMP \n ");
f.write("#undef i_LAST"+str(i)+"_NOCOMMA \n ");
f.write("#undef i_LAST"+str(i)+"_NOCCOMA_IMP \n ");
f.write("#undef i_RP_DEFAULT_ARGS_"+str(i)+"_LAST0\n");
f.write("#undef i_RP_DEFAULT_ARGS_"+str(i)+"_LAST1 \n");
i-=1
f.write("//! This macro gets the appropriate default arguments macro\n");
f.write("#undef i_RP_GET_DEFAULT_ARG \n");
f.write("#undef i_RP_GET_DEFAULT_ARG_IMP \n");
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.