hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7901c3c67acc8690e6d2267cb89d926f7b9775e1 | 4,181 | py | Python | examples/policies.py | simplivity/simplivity-python | 737ee847ac95cf3a33721765e7d4efcc0a0152e1 | [
"Apache-2.0"
] | null | null | null | examples/policies.py | simplivity/simplivity-python | 737ee847ac95cf3a33721765e7d4efcc0a0152e1 | [
"Apache-2.0"
] | null | null | null | examples/policies.py | simplivity/simplivity-python | 737ee847ac95cf3a33721765e7d4efcc0a0152e1 | [
"Apache-2.0"
] | 3 | 2020-01-15T14:49:03.000Z | 2020-01-24T20:45:24.000Z | ###
# (C) Copyright [2019-2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from simplivity.ovc_client import OVC
from simplivity.exceptions import HPESimpliVityException
import pprint
pp = pprint.PrettyPrinter(indent=4)
config = {
"ip": "<ovc_ip>",
"credentials": {
"username": "<username>",
"password": "<password>"
}
}
ovc = OVC(config)
policies = ovc.policies
hosts = ovc.hosts
clusters = ovc.omnistack_clusters
cluster_groups = ovc.cluster_groups
print("\n\nget_all with default params")
all_policies = policies.get_all()
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
policy_object = all_policies[0]
print("\n\nget_all with filters")
all_policies = policies.get_all(filters={'name': policy_object.data["name"]})
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
print("\n\nget_all with pagination")
pagination = policies.get_all(limit=105, pagination=True, page_size=50)
end = False
while not end:
data = pagination.data
print("Page size:", len(data["resources"]))
print(f"{pp.pformat(data)}")
try:
pagination.next_page()
except HPESimpliVityException:
end = True
print("\n\nget_by_id")
policy = policies.get_by_id(policy_object.data["id"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_by_name")
policy = policies.get_by_name(policy_object.data["name"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_all VMs using this policy")
vms = policy.get_vms()
print(policy.data)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"{pp.pformat(vms)} \n")
print("\n\ncreate policy")
policy_name = "fixed_frequency_retention_policy"
policy = policies.create(policy_name)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
multiple_rules = [
{
"start_time": "14:30",
"end_time": "15:30",
"application_consistent": False,
"frequency": 3,
"retention": 5
},
{
"frequency": 5,
"retention": 6
}
]
print("\n\nadd rules to policy")
policy.create_rules(multiple_rules)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
single_rule = {
"frequency": 10,
"retention": 12
}
policy.create_rules(single_rule)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget rule")
all_rules = policy.data["rules"]
for rule in all_rules:
rule_obj = policy.get_rule(rule.get('id'))
print(f"{pp.pformat(rule_obj)} \n")
print("\n\ndelete rule")
rule_id = policy.data["rules"][0]['id']
policy.delete_rule(rule_id)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nsuspend policy on host")
host = hosts.get_all()[0]
policies.suspend(host)
print("\n\nsuspend policy on omnistack_cluster")
cluster = clusters.get_all()[0]
policies.suspend(cluster)
""" cluster_group options works only with setup having MVA, please use below code for setup with MVA
cluster_group = cluster_groups.get_all()[0]
print(f"{cluster_group}")
print(f"{pp.pformat(cluster_group.data)} \n")
policies.suspend(cluster_group)
"""
""" federation options works only with setup NOT having MVA, please use below code for setup without MVA
print("\n\nsuspend policy on federation")
policies.suspend()
"""
print("\n\nrename policy")
policy.rename(f"renamed_{policy.data['name']}")
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\ndelete policy")
policy.delete()
| 26.974194 | 104 | 0.702464 |
from simplivity.ovc_client import OVC
from simplivity.exceptions import HPESimpliVityException
import pprint
pp = pprint.PrettyPrinter(indent=4)
config = {
"ip": "<ovc_ip>",
"credentials": {
"username": "<username>",
"password": "<password>"
}
}
ovc = OVC(config)
policies = ovc.policies
hosts = ovc.hosts
clusters = ovc.omnistack_clusters
cluster_groups = ovc.cluster_groups
print("\n\nget_all with default params")
all_policies = policies.get_all()
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
policy_object = all_policies[0]
print("\n\nget_all with filters")
all_policies = policies.get_all(filters={'name': policy_object.data["name"]})
count = len(all_policies)
for policy in all_policies:
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"Total number of policies : {count}")
print("\n\nget_all with pagination")
pagination = policies.get_all(limit=105, pagination=True, page_size=50)
end = False
while not end:
data = pagination.data
print("Page size:", len(data["resources"]))
print(f"{pp.pformat(data)}")
try:
pagination.next_page()
except HPESimpliVityException:
end = True
print("\n\nget_by_id")
policy = policies.get_by_id(policy_object.data["id"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_by_name")
policy = policies.get_by_name(policy_object.data["name"])
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget_all VMs using this policy")
vms = policy.get_vms()
print(policy.data)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print(f"{pp.pformat(vms)} \n")
print("\n\ncreate policy")
policy_name = "fixed_frequency_retention_policy"
policy = policies.create(policy_name)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
multiple_rules = [
{
"start_time": "14:30",
"end_time": "15:30",
"application_consistent": False,
"frequency": 3,
"retention": 5
},
{
"frequency": 5,
"retention": 6
}
]
print("\n\nadd rules to policy")
policy.create_rules(multiple_rules)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
single_rule = {
"frequency": 10,
"retention": 12
}
policy.create_rules(single_rule)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nget rule")
all_rules = policy.data["rules"]
for rule in all_rules:
rule_obj = policy.get_rule(rule.get('id'))
print(f"{pp.pformat(rule_obj)} \n")
print("\n\ndelete rule")
rule_id = policy.data["rules"][0]['id']
policy.delete_rule(rule_id)
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\nsuspend policy on host")
host = hosts.get_all()[0]
policies.suspend(host)
print("\n\nsuspend policy on omnistack_cluster")
cluster = clusters.get_all()[0]
policies.suspend(cluster)
print("\n\nrename policy")
policy.rename(f"renamed_{policy.data['name']}")
print(f"{policy}")
print(f"{pp.pformat(policy.data)} \n")
print("\n\ndelete policy")
policy.delete()
| true | true |
7901c5187b7044265efe461875a621e1c571a518 | 8,214 | py | Python | vagrant_files/generator/files/databases/druid_cl5_rf1.py | TSDBBench/Overlord | d72b6927ceaf6631f5b07f411e34bec9904158c4 | [
"Apache-2.0"
] | 6 | 2017-07-05T16:59:16.000Z | 2020-07-01T10:17:09.000Z | vagrant_files/generator/files/databases/druid_cl5_rf1.py | TSDBBench/Overlord | d72b6927ceaf6631f5b07f411e34bec9904158c4 | [
"Apache-2.0"
] | 8 | 2017-11-03T13:36:53.000Z | 2021-09-05T11:05:17.000Z | vagrant_files/generator/files/databases/druid_cl5_rf1.py | TSDBBench/Overlord | d72b6927ceaf6631f5b07f411e34bec9904158c4 | [
"Apache-2.0"
] | 6 | 2016-11-10T12:56:41.000Z | 2018-06-19T21:53:58.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
__author__ = 'Andreas Bader'
__version__ = "0.01"
# db_folders -> List of DB Folder (for space check)
# db_client -> name of ycsb client
# db_args -> special ycsb arguments for this db
# db_name -> name of this db (e.g. for workload file)
# db_desc -> more detailed name/description
# jvm_args -> special jvm_args for this db and ycsb
# prerun_once -> list of commands to run local once before ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# postrun_once -> list of commands to run local once after ycsb (%%IP%% uses first db vm) (without ycsb, sync or space diff or poweroff commands!)
# prerun -> list of commands to run before ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# postrun -> list of commands to run after ycsb (all vms or local) (without ycsb, sync or space diff or poweroff commands!)
# prerun_master -> list of commands to run before ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_master -> list of commands to run after ycsb (only on master(first=ID 0) vm or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_slaves -> list of commands to run before ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# postrun_slaves -> list of commands to run after ycsb (only on slave (all without master(=ID 0)) vms or local)) (without ycsb, sync or space diff or poweroff commands!)
# prerun_dict -> list of commands to run before ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# postrun_dict -> list of commands to run after ycsb for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# check -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (systemctl start xyz oftern returns true even if start failed somehow. Check that here!)
# check_master -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (only on master(first=ID 0) vm or local))
# check_slaves -> list of commands to run after prerun (all vms or local) for checking if everything runs correctly (all without master(=ID 0)) vms or local))
# check_dict -> list of commands to run after prerun for each db vm (key=number of vm) (without ycsb, sync or space diff or poweroff commands!) (%%SSH%% not needed)
# basic -> True/False, if True this is a basic database, so no need to ssh for space checking
# sequence -> which vm should be provisioned first? (for all postrun/prerun dicts/lists. First number is considered master db vm, rest are slaves.)
# include -> which base modules should be imported and added to the dictionary (standard functions that are reusable). Warning: infinite import loop possible!
# the following variables are possible in prerun_once, postrun_once, prerun, prerun_master, prerun_slaves, check, check_master, check_slaves, postrun, postrun_master, postrun_slaves, prerun_dict, postrun_dict, check_dict, db_args:
# %%IP%% -> IP of (actual) db vm
# %%IPgen%% -> IP of (actual) generator vm (on which this script runs)
# %%IPn%% -> IP of db vm number n (e.g. %%IP2%%)
# %%IPall%% -> give String with IP of all vms)
# %%HN%% -> Hostname of (actual) db vm
# %%HNgen%% -> Hostname of (actual) generator vm (on which this script runs)
# %%HNn%% -> Hostname of db vm number n (e.g. %%HN2%%)
# %%HNall%% -> give String with Hostname of all vms)
# %%SSH%% -> if SSH should be used (set at the beginning)
# Order of Preruns/Postruns:
# 1. prerun/postrun/check, 2. prerun_master/postrun_master/check_master, 3. preun_skaves/postrun_slaves/check_slaves, 4.prerun_dict/postrun_dict/check_dict
# General Order:
# prerun -> check -> ycsb -> postrun
def getDict():
dbConfig={}
dbConfig["db_folders"]=["/tmp/druid/indexCache", "/tmp/persistent/zk_druid", "/tmp/persistent/task/", "/tmp/druid/localStorage", "/var/lib/mysql"]
dbConfig["db_client"]="druid"
dbConfig["db_args"]="-p zookeeperip=%%IP0%% -p queryip=%%IP1%% -p zookeeperport=2181 -p queryport=8090 -p replicants=1"
dbConfig["db_name"]="druid_cl5_rf1"
dbConfig["db_desc"]="Druid (Broker,Coordinator,Historical,MiddleManager,Overlord) on 5 VMs with Replication Factor 1. Ingest via Tranquility/Finagle, Query via REST."
dbConfig["jvm_args"]="-jvm-args='-Xmx4096m'"
dbConfig["prerun_once"]= []
dbConfig["postrun_once"]= []
dbConfig["prerun"]= ["%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/_common/common.runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP1%%|g\" /home/vagrant/config/broker/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/coordinator/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP2%%|g\" /home/vagrant/config/historical/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP3%%|g\" /home/vagrant/config/middleManager/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP4%%|g\" /home/vagrant/config/overlord/runtime.properties'"]
dbConfig["postrun"]= []
dbConfig["prerun_master"]= []
dbConfig["postrun_master"]= []
dbConfig["prerun_slaves"]= []
dbConfig["postrun_slaves"]= []
dbConfig["prerun_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_coordinator.service'"],
1 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_broker.service'"],
2 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_historical.service'"],
3 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_middlemanager.service'"],
4 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_overlord.service'",
"bash -c 'sleep 180'"]
}
dbConfig["postrun_dict"]= {}
dbConfig["check"]= []
dbConfig["check_master"]= []
dbConfig["check_slaves"]= []
dbConfig["check_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_coordinator.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_coordinator.service | grep -c \"active (running)\")-1))'"],
1 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_broker.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_broker.service | grep -c \"active (running)\")-1))'"],
2 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_historical.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_historical.service | grep -c \"active (running)\")-1))'"],
3 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_middlemanager.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_middlemanager.service | grep -c \"active (running)\")-1))'"],
4 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_overlord.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_overlord.service | grep -c \"active (running)\")-1))'"]
}
dbConfig["basic"]= False
dbConfig["sequence"]=[0,1,2,3,4]
dbConfig["include"] = []
return dbConfig | 83.816327 | 230 | 0.660701 |
__author__ = 'Andreas Bader'
__version__ = "0.01"
def getDict():
dbConfig={}
dbConfig["db_folders"]=["/tmp/druid/indexCache", "/tmp/persistent/zk_druid", "/tmp/persistent/task/", "/tmp/druid/localStorage", "/var/lib/mysql"]
dbConfig["db_client"]="druid"
dbConfig["db_args"]="-p zookeeperip=%%IP0%% -p queryip=%%IP1%% -p zookeeperport=2181 -p queryport=8090 -p replicants=1"
dbConfig["db_name"]="druid_cl5_rf1"
dbConfig["db_desc"]="Druid (Broker,Coordinator,Historical,MiddleManager,Overlord) on 5 VMs with Replication Factor 1. Ingest via Tranquility/Finagle, Query via REST."
dbConfig["jvm_args"]="-jvm-args='-Xmx4096m'"
dbConfig["prerun_once"]= []
dbConfig["postrun_once"]= []
dbConfig["prerun"]= ["%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/_common/common.runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP1%%|g\" /home/vagrant/config/broker/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP0%%|g\" /home/vagrant/config/coordinator/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP2%%|g\" /home/vagrant/config/historical/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP3%%|g\" /home/vagrant/config/middleManager/runtime.properties'",
"%%SSH%%sudo -s bash -c 'sed -i \"s|localhost|%%IP4%%|g\" /home/vagrant/config/overlord/runtime.properties'"]
dbConfig["postrun"]= []
dbConfig["prerun_master"]= []
dbConfig["postrun_master"]= []
dbConfig["prerun_slaves"]= []
dbConfig["postrun_slaves"]= []
dbConfig["prerun_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_coordinator.service'"],
1 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_broker.service'"],
2 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_historical.service'"],
3 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_middlemanager.service'"],
4 : ["%%SSH%%sudo -s bash -c 'systemctl start druid_overlord.service'",
"bash -c 'sleep 180'"]
}
dbConfig["postrun_dict"]= {}
dbConfig["check"]= []
dbConfig["check_master"]= []
dbConfig["check_slaves"]= []
dbConfig["check_dict"]= {
0 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_coordinator.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_coordinator.service | grep -c \"active (running)\")-1))'"],
1 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_broker.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_broker.service | grep -c \"active (running)\")-1))'"],
2 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_historical.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_historical.service | grep -c \"active (running)\")-1))'"],
3 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_middlemanager.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_middlemanager.service | grep -c \"active (running)\")-1))'"],
4 : ["%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_repo.service | grep -c \"inactive (dead)\")-1))'",
"%%SSH%%sudo -s bash -c 'exit $(systemctl status druid_overlord.service | grep -c \"active (exited)\")'",
"%%SSH%%sudo -s bash -c 'exit $(($(systemctl status druid_overlord.service | grep -c \"active (running)\")-1))'"]
}
dbConfig["basic"]= False
dbConfig["sequence"]=[0,1,2,3,4]
dbConfig["include"] = []
return dbConfig | true | true |
7901c635b9ceb8544fdc9593b965c809ae23da78 | 4,227 | py | Python | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-ExtensionsMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-ExtensionsMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/Nortel-MsCarrier-MscPassport-ExtensionsMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Nortel-MsCarrier-MscPassport-ExtensionsMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-MsCarrier-MscPassport-ExtensionsMIB
# Produced by pysmi-0.3.4 at Wed May 1 14:29:54 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
RowPointer, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "RowPointer")
mscPassportMIBs, mscComponents = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs", "mscComponents")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, MibIdentifier, iso, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, Counter32, Bits, Gauge32, IpAddress, TimeTicks, Integer32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibIdentifier", "iso", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "Counter32", "Bits", "Gauge32", "IpAddress", "TimeTicks", "Integer32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
extensionsMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5))
mscExtensions = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4))
mscExtensionIfTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1), )
if mibBuilder.loadTexts: mscExtensionIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfTable.setDescription('A table which provides enterprise extensions to the standard ifTable.')
mscExtensionIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: mscExtensionIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfEntry.setDescription(' An entry containing enterprise extensions to the standard ifEntry.')
mscIfRowPointer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1, 1), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscIfRowPointer.setStatus('mandatory')
if mibBuilder.loadTexts: mscIfRowPointer.setDescription('A pointer to the RowStatus variable for the component represented by the ifTable entry.')
extensionsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1))
extensionsGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1))
extensionsGroupCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2))
extensionsGroupCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2, 2))
extensionsCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3))
extensionsCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1))
extensionsCapabilitiesCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2))
extensionsCapabilitiesCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-ExtensionsMIB", extensionsGroup=extensionsGroup, extensionsGroupCA01=extensionsGroupCA01, extensionsCapabilitiesCA=extensionsCapabilitiesCA, extensionsGroupCA=extensionsGroupCA, extensionsMIB=extensionsMIB, mscIfRowPointer=mscIfRowPointer, extensionsCapabilitiesCA01A=extensionsCapabilitiesCA01A, extensionsGroupCA01A=extensionsGroupCA01A, extensionsCapabilities=extensionsCapabilities, extensionsCapabilitiesCA01=extensionsCapabilitiesCA01, mscExtensions=mscExtensions, mscExtensionIfTable=mscExtensionIfTable, mscExtensionIfEntry=mscExtensionIfEntry)
| 114.243243 | 607 | 0.772652 |
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
RowPointer, = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-StandardTextualConventionsMIB", "RowPointer")
mscPassportMIBs, mscComponents = mibBuilder.importSymbols("Nortel-MsCarrier-MscPassport-UsefulDefinitionsMIB", "mscPassportMIBs", "mscComponents")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, MibIdentifier, iso, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, NotificationType, Counter32, Bits, Gauge32, IpAddress, TimeTicks, Integer32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "MibIdentifier", "iso", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "NotificationType", "Counter32", "Bits", "Gauge32", "IpAddress", "TimeTicks", "Integer32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
extensionsMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5))
mscExtensions = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4))
mscExtensionIfTable = MibTable((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1), )
if mibBuilder.loadTexts: mscExtensionIfTable.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfTable.setDescription('A table which provides enterprise extensions to the standard ifTable.')
mscExtensionIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: mscExtensionIfEntry.setStatus('mandatory')
if mibBuilder.loadTexts: mscExtensionIfEntry.setDescription(' An entry containing enterprise extensions to the standard ifEntry.')
mscIfRowPointer = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 36, 2, 1, 4, 1, 1, 1), RowPointer()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mscIfRowPointer.setStatus('mandatory')
if mibBuilder.loadTexts: mscIfRowPointer.setDescription('A pointer to the RowStatus variable for the component represented by the ifTable entry.')
extensionsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1))
extensionsGroupCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1))
extensionsGroupCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2))
extensionsGroupCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 1, 1, 2, 2))
extensionsCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3))
extensionsCapabilitiesCA = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1))
extensionsCapabilitiesCA01 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2))
extensionsCapabilitiesCA01A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 36, 2, 2, 5, 3, 1, 2, 2))
mibBuilder.exportSymbols("Nortel-MsCarrier-MscPassport-ExtensionsMIB", extensionsGroup=extensionsGroup, extensionsGroupCA01=extensionsGroupCA01, extensionsCapabilitiesCA=extensionsCapabilitiesCA, extensionsGroupCA=extensionsGroupCA, extensionsMIB=extensionsMIB, mscIfRowPointer=mscIfRowPointer, extensionsCapabilitiesCA01A=extensionsCapabilitiesCA01A, extensionsGroupCA01A=extensionsGroupCA01A, extensionsCapabilities=extensionsCapabilities, extensionsCapabilitiesCA01=extensionsCapabilitiesCA01, mscExtensions=mscExtensions, mscExtensionIfTable=mscExtensionIfTable, mscExtensionIfEntry=mscExtensionIfEntry)
| true | true |
7901c67ce98b80bb45d71a8a82809845d3c6f033 | 1,401 | py | Python | src/apps/users/views/rest/client_address.py | leonardon473/my-dinner-backend | ae64fca37d102ce5bda7570be8680e03e9b142d8 | [
"MIT"
] | null | null | null | src/apps/users/views/rest/client_address.py | leonardon473/my-dinner-backend | ae64fca37d102ce5bda7570be8680e03e9b142d8 | [
"MIT"
] | null | null | null | src/apps/users/views/rest/client_address.py | leonardon473/my-dinner-backend | ae64fca37d102ce5bda7570be8680e03e9b142d8 | [
"MIT"
] | null | null | null | # -----------------------------------------------------------------------------
# Libraries
# -----------------------------------------------------------------------------
# Core libs
from typing import TYPE_CHECKING
# Third party libs
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
# Project libs
from apps.users.models import ClientAddress
from apps.users.serializers.client_address import (
ClientAddressCreateSerializer,
ClientAddressRetrieveSerializer,
)
# If type checking, __all__
if TYPE_CHECKING:
pass
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class ClientAddressCreateListView(ListCreateAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressCreateSerializer
class ClientAddressRetrieveUpdateView(RetrieveUpdateDestroyAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressRetrieveSerializer
| 33.357143 | 83 | 0.443969 |
from typing import TYPE_CHECKING
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
from apps.users.models import ClientAddress
from apps.users.serializers.client_address import (
ClientAddressCreateSerializer,
ClientAddressRetrieveSerializer,
)
if TYPE_CHECKING:
pass
class ClientAddressCreateListView(ListCreateAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressCreateSerializer
class ClientAddressRetrieveUpdateView(RetrieveUpdateDestroyAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressRetrieveSerializer
| true | true |
7901c90c781e5c3ca570333b040cdaa196547fc9 | 2,794 | py | Python | scalene-triangle/libs/PDB_filegetter.py | dsw7/BridgingInteractions | 4ed55bb2009a424c4e306ede04f0bf3572331fae | [
"MIT"
] | 1 | 2021-09-16T01:48:20.000Z | 2021-09-16T01:48:20.000Z | scalene-triangle/libs/PDB_filegetter.py | dsw7/BridgingInteractions | 4ed55bb2009a424c4e306ede04f0bf3572331fae | [
"MIT"
] | null | null | null | scalene-triangle/libs/PDB_filegetter.py | dsw7/BridgingInteractions | 4ed55bb2009a424c4e306ede04f0bf3572331fae | [
"MIT"
] | null | null | null | # Written by David Weber
# dsw7@sfu.ca
"""
In this short namespace I house a class that connects to PDB and downloads
file over PDB file transfer protocol.
"""
# ------------------------------------------------------------------------------
import gzip
from os import remove, getcwd, path # built in
# my pymol API built on Python2 - try both imports
try:
from urllib.request import urlretrieve, urlcleanup
except ImportError:
from urllib import urlretrieve, urlcleanup
ROOT = 'ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{}/{}'
class PDBFile:
def __init__(self, code):
"""Initialize a PDBFile object with a pdb file of interest
Parameters
----------
code : the pdb code if interest
Any valid PDB code can be passed into PDBFile.
Examples
--------
>>> pdb_file = PDBFile('1rcy')
"""
self.code = code.lower()
def fetch_from_PDB(self):
"""
Connects to PDB FTP server, downloads a .gz file of interest,
decompresses the .gz file into .ent and then dumps a copy of
the pdb{code}.ent file into cwd.
Parameters
----------
None
Examples
--------
>>> inst = PDBFile('1rcy')
>>> path_to_file = inst.fetch_from_PDB()
>>> print(path_to_file)
"""
subdir = self.code[1:3]
infile = 'pdb{}.ent.gz'.format(self.code)
decompressed = infile.strip('.gz')
fullpath = ROOT.format(subdir, infile)
try:
urlcleanup()
urlretrieve(fullpath, infile)
except Exception:
return 'URLError'
else:
with gzip.open(infile, 'rb') as gz:
with open(decompressed, 'wb') as out:
out.writelines(gz)
remove(infile)
return path.join(getcwd(), decompressed)
def clear(self):
"""
Deletes file from current working directory after the file has
been processed by some algorithm.
Parameters
----------
None
Examples
--------
>>> inst = PDBFile('1rcy')
>>> path_to_file = inst.fetch_from_PDB()
>>> print(path_to_file) # process the file using some algorithm
>>> inst.clear()
"""
filename = 'pdb{}.ent'.format(self.code)
try:
remove(path.join(getcwd(), filename))
except FileNotFoundError:
print('Cannot delete file. Does not exist.')
| 28.222222 | 81 | 0.494273 |
import gzip
from os import remove, getcwd, path
try:
from urllib.request import urlretrieve, urlcleanup
except ImportError:
from urllib import urlretrieve, urlcleanup
ROOT = 'ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{}/{}'
class PDBFile:
def __init__(self, code):
self.code = code.lower()
def fetch_from_PDB(self):
subdir = self.code[1:3]
infile = 'pdb{}.ent.gz'.format(self.code)
decompressed = infile.strip('.gz')
fullpath = ROOT.format(subdir, infile)
try:
urlcleanup()
urlretrieve(fullpath, infile)
except Exception:
return 'URLError'
else:
with gzip.open(infile, 'rb') as gz:
with open(decompressed, 'wb') as out:
out.writelines(gz)
remove(infile)
return path.join(getcwd(), decompressed)
def clear(self):
filename = 'pdb{}.ent'.format(self.code)
try:
remove(path.join(getcwd(), filename))
except FileNotFoundError:
print('Cannot delete file. Does not exist.')
| true | true |
7901c91a06af59b437ee73b9b9946d6eaf847d95 | 6,664 | py | Python | examples/03_sweep_linear.py | RedVoxInc/libquantum | 5e0d741e69be0ac1b94c018a4a0de99f4630deae | [
"Apache-2.0"
] | 2 | 2021-02-24T21:54:16.000Z | 2021-03-05T23:06:54.000Z | examples/03_sweep_linear.py | RedVoxInc/libquantum | 5e0d741e69be0ac1b94c018a4a0de99f4630deae | [
"Apache-2.0"
] | null | null | null | examples/03_sweep_linear.py | RedVoxInc/libquantum | 5e0d741e69be0ac1b94c018a4a0de99f4630deae | [
"Apache-2.0"
] | null | null | null | """
libquantum example 3: 03_sweep_linear.py
Construct classic linear chirp and illustrate CWT and STFT TRFs.
"""
import os
from pathlib import Path
import numpy as np
import scipy.io.wavfile
import matplotlib.pyplot as plt
from libquantum import atoms, entropy, scales, spectra, utils, synthetics
import libquantum.plot_templates.plot_time_frequency_reps as pltq
if __name__ == "__main__":
"""
Exercises with classic linear sweep
Option of exporting to wav
"""
# Do you want to export a wav file? True or False
do_save_wave = False
# If True, saves to home directory
home_dir: str = str(Path.home())
# Or can specify a preferred wav file directory
# home_dir: str = "/Users/mgarces/Documents/DATA_API_M/synthetics"
output_wav_directory = os.path.join(home_dir, "wav")
EVENT_NAME = "redshift_linear_sweep"
print("Event Name: " + EVENT_NAME)
wav_filename = EVENT_NAME
order_number_input = 3
station_id_str = 'Synth'
run_time_epoch_s = utils.datetime_now_epoch_s()
# Chirp type
is_redshift = True
sig_wf_sample_rate_hz = 8000.
sig_frequency_hz_start = 40.
sig_frequency_hz_end = 400.
sig_duration_s = 13.19675
head_s = 0.5
# sig_wf_sample_rate_hz = 8000.
# sig_frequency_hz_start = 40.
# sig_frequency_hz_end = 400.
# sig_duration_s = 13.19675
# head_s = 0.5
# Blueshift sweep
sig_wf_blu, sig_wf_epoch_s = synthetics.chirp_linear_in_noise(snr_bits=12.,
sample_rate_hz=sig_wf_sample_rate_hz,
duration_s=sig_duration_s,
frequency_start_hz=sig_frequency_hz_start,
frequency_end_hz=sig_frequency_hz_end,
intro_s=head_s,
outro_s=head_s)
sig_wf_red = np.flipud(sig_wf_blu)
# Choose origin and red/blue shift
sig_wf_epoch_s += run_time_epoch_s
sig_wf = np.copy(sig_wf_red)
# Antialias filter synthetic
synthetics.antialias_halfNyquist(synth=sig_wf)
# Export to wav directory
if do_save_wave:
wav_sample_rate_hz = 8000.
export_filename = os.path.join(output_wav_directory, wav_filename + "_8kz.wav")
synth_wav = 0.9 * np.real(sig_wf) / np.max(np.abs((np.real(sig_wf))))
scipy.io.wavfile.write(export_filename, int(wav_sample_rate_hz), synth_wav)
# Frame to mic start and end and plot
event_reference_time_epoch_s = sig_wf_epoch_s[0]
max_time_s, min_frequency_hz = scales.from_duration(band_order_Nth=order_number_input,
sig_duration_s=sig_duration_s)
print('\nRequest Order N=', order_number_input)
print('Lowest frequency in hz that can support this order for this signal duration is ', min_frequency_hz)
print('Scale with signal duration and to Nyquist, default G2 base re F1')
# Select plot frequencies
fmin = np.ceil(min_frequency_hz)
fmax = sig_wf_sample_rate_hz/2.
# TFR SECTION
# Compute complex wavelet transform (cwt) from signal duration
if is_redshift:
mic_cwt, mic_cwt_bits, mic_cwt_time_s, mic_cwt_frequency_hz = \
atoms.cwt_chirp_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input,
dictionary_type="tone",
index_shift=-1)
else:
mic_cwt, mic_cwt_bits, mic_cwt_time_s, mic_cwt_frequency_hz = \
atoms.cwt_chirp_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input,
dictionary_type="tone")
mic_cwt_snr, mic_cwt_snr_bits, mic_cwt_snr_entropy = entropy.snr_mean_max(tfr_coeff_complex=mic_cwt)
pltq.plot_wf_mesh_mesh_vert(redvox_id=station_id_str,
wf_panel_2_sig=sig_wf,
wf_panel_2_time=sig_wf_epoch_s,
mesh_time=mic_cwt_time_s,
mesh_frequency=mic_cwt_frequency_hz,
mesh_panel_1_trf=mic_cwt_bits,
mesh_panel_1_colormap_scaling="range",
mesh_panel_0_tfr=mic_cwt_snr_entropy,
wf_panel_2_units="Norm",
mesh_panel_1_cbar_units="bits",
mesh_panel_0_cbar_units="eSNR bits",
start_time_epoch=event_reference_time_epoch_s,
figure_title="CWT for " + EVENT_NAME,
frequency_hz_ymin=fmin,
frequency_hz_ymax=fmax)
# Compute short term Fourier transform (STFT) from segmented signal duration
mic_stft, mic_stft_bits, mic_stft_time_s, mic_stft_frequency_hz = \
spectra.stft_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input)
mic_stft_snr, mic_stft_snr_bits, mic_stft_snr_entropy = entropy.snr_mean_max(tfr_coeff_complex=mic_stft)
# Log frequency is the default, for linear use frequency_scaling="linear",
pltq.plot_wf_mesh_mesh_vert(frequency_scaling="log",
redvox_id=station_id_str,
wf_panel_2_sig=sig_wf,
wf_panel_2_time=sig_wf_epoch_s,
mesh_time=mic_stft_time_s,
mesh_frequency=mic_stft_frequency_hz,
mesh_panel_1_trf=mic_stft_bits,
mesh_panel_1_colormap_scaling="range",
mesh_panel_0_tfr=mic_stft_snr_entropy,
wf_panel_2_units="Norm",
mesh_panel_1_cbar_units="bits",
mesh_panel_0_cbar_units="eSNR bits",
figure_title="STFT for " + EVENT_NAME,
frequency_hz_ymin=fmin,
frequency_hz_ymax=fmax)
plt.show()
| 44.13245 | 110 | 0.579232 |
import os
from pathlib import Path
import numpy as np
import scipy.io.wavfile
import matplotlib.pyplot as plt
from libquantum import atoms, entropy, scales, spectra, utils, synthetics
import libquantum.plot_templates.plot_time_frequency_reps as pltq
if __name__ == "__main__":
do_save_wave = False
home_dir: str = str(Path.home())
output_wav_directory = os.path.join(home_dir, "wav")
EVENT_NAME = "redshift_linear_sweep"
print("Event Name: " + EVENT_NAME)
wav_filename = EVENT_NAME
order_number_input = 3
station_id_str = 'Synth'
run_time_epoch_s = utils.datetime_now_epoch_s()
is_redshift = True
sig_wf_sample_rate_hz = 8000.
sig_frequency_hz_start = 40.
sig_frequency_hz_end = 400.
sig_duration_s = 13.19675
head_s = 0.5
sig_wf_blu, sig_wf_epoch_s = synthetics.chirp_linear_in_noise(snr_bits=12.,
sample_rate_hz=sig_wf_sample_rate_hz,
duration_s=sig_duration_s,
frequency_start_hz=sig_frequency_hz_start,
frequency_end_hz=sig_frequency_hz_end,
intro_s=head_s,
outro_s=head_s)
sig_wf_red = np.flipud(sig_wf_blu)
sig_wf_epoch_s += run_time_epoch_s
sig_wf = np.copy(sig_wf_red)
synthetics.antialias_halfNyquist(synth=sig_wf)
if do_save_wave:
wav_sample_rate_hz = 8000.
export_filename = os.path.join(output_wav_directory, wav_filename + "_8kz.wav")
synth_wav = 0.9 * np.real(sig_wf) / np.max(np.abs((np.real(sig_wf))))
scipy.io.wavfile.write(export_filename, int(wav_sample_rate_hz), synth_wav)
event_reference_time_epoch_s = sig_wf_epoch_s[0]
max_time_s, min_frequency_hz = scales.from_duration(band_order_Nth=order_number_input,
sig_duration_s=sig_duration_s)
print('\nRequest Order N=', order_number_input)
print('Lowest frequency in hz that can support this order for this signal duration is ', min_frequency_hz)
print('Scale with signal duration and to Nyquist, default G2 base re F1')
fmin = np.ceil(min_frequency_hz)
fmax = sig_wf_sample_rate_hz/2.
if is_redshift:
mic_cwt, mic_cwt_bits, mic_cwt_time_s, mic_cwt_frequency_hz = \
atoms.cwt_chirp_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input,
dictionary_type="tone",
index_shift=-1)
else:
mic_cwt, mic_cwt_bits, mic_cwt_time_s, mic_cwt_frequency_hz = \
atoms.cwt_chirp_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input,
dictionary_type="tone")
mic_cwt_snr, mic_cwt_snr_bits, mic_cwt_snr_entropy = entropy.snr_mean_max(tfr_coeff_complex=mic_cwt)
pltq.plot_wf_mesh_mesh_vert(redvox_id=station_id_str,
wf_panel_2_sig=sig_wf,
wf_panel_2_time=sig_wf_epoch_s,
mesh_time=mic_cwt_time_s,
mesh_frequency=mic_cwt_frequency_hz,
mesh_panel_1_trf=mic_cwt_bits,
mesh_panel_1_colormap_scaling="range",
mesh_panel_0_tfr=mic_cwt_snr_entropy,
wf_panel_2_units="Norm",
mesh_panel_1_cbar_units="bits",
mesh_panel_0_cbar_units="eSNR bits",
start_time_epoch=event_reference_time_epoch_s,
figure_title="CWT for " + EVENT_NAME,
frequency_hz_ymin=fmin,
frequency_hz_ymax=fmax)
mic_stft, mic_stft_bits, mic_stft_time_s, mic_stft_frequency_hz = \
spectra.stft_from_sig(sig_wf=sig_wf,
frequency_sample_rate_hz=sig_wf_sample_rate_hz,
band_order_Nth=order_number_input)
mic_stft_snr, mic_stft_snr_bits, mic_stft_snr_entropy = entropy.snr_mean_max(tfr_coeff_complex=mic_stft)
pltq.plot_wf_mesh_mesh_vert(frequency_scaling="log",
redvox_id=station_id_str,
wf_panel_2_sig=sig_wf,
wf_panel_2_time=sig_wf_epoch_s,
mesh_time=mic_stft_time_s,
mesh_frequency=mic_stft_frequency_hz,
mesh_panel_1_trf=mic_stft_bits,
mesh_panel_1_colormap_scaling="range",
mesh_panel_0_tfr=mic_stft_snr_entropy,
wf_panel_2_units="Norm",
mesh_panel_1_cbar_units="bits",
mesh_panel_0_cbar_units="eSNR bits",
figure_title="STFT for " + EVENT_NAME,
frequency_hz_ymin=fmin,
frequency_hz_ymax=fmax)
plt.show()
| true | true |
7901c99ab98015bba242cc58af0f02592b798a4d | 3,920 | py | Python | advbench/lib/meters.py | constrainedlearning/advbench | 68f9f6d77268aad45517ca84d383b996724cc976 | [
"MIT"
] | null | null | null | advbench/lib/meters.py | constrainedlearning/advbench | 68f9f6d77268aad45517ca84d383b996724cc976 | [
"MIT"
] | null | null | null | advbench/lib/meters.py | constrainedlearning/advbench | 68f9f6d77268aad45517ca84d383b996724cc976 | [
"MIT"
] | null | null | null | import time
try:
import wandb
wandb_log=True
except ImportError:
wandb_log=False
import numpy as np
from advbench.lib.plotting import plot_perturbed_wandb
from einops import rearrange
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, avg_mom=0.5):
self.avg_mom = avg_mom
self.reset()
self.print = True
def reset(self):
self.val = 0
self.avg = 0 # running average of whole epoch
self.smooth_avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.smooth_avg = val if self.count == 0 else self.avg*self.avg_mom + val*(1-self.avg_mom)
self.avg = self.sum / self.count
class TimeMeter:
def __init__(self):
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.start = time.time()
def batch_start(self):
self.data_time.update(time.time() - self.start)
def batch_end(self):
self.batch_time.update(time.time() - self.start)
self.start = time.time()
if wandb:
class WBHistogramMeter:
def __init__(self, name):
self.print = False
self.name = name
def reset(self):
pass
def update(self, val):
wandb.log({self.name: wandb.Histogram(val)})
class WBDeltaMeter(WBHistogramMeter):
def __init__(self, names = [], dims = 0, max_points = 100):
self.max_points = max_points
self.print = False
self.dims = dims
if isinstance(names, str):
names = [f"{names} {i}" for i in range(dims)]
self.meters = [WBHistogramMeter(name) for name in names]
def reset(self):
pass
def update(self, vals):
if self.dims>3:
pass
elif len(vals.shape)==3:
for i in range(len(self.meters)):
self.meters[i].update(vals[:,i,:self.max_points].flatten())
else:
for i in range(len(vals[0])):
self.meters[i].update(vals[:,i])
class WBLinePlotMeter():
def __init__(self, name):
self.print = False
self.name = name
def reset(self):
pass
def update(self, grid, vals):
plot_perturbed_wandb(grid, vals, name=self.name)
class WBDualMeter(WBHistogramMeter):
def __init__(self, grid, translations, names = "dual vs angle", locs = [(0, 0), (-1,-1)], log_every=500):
self.print = False
self.locs = []
tx, ty = translations
for loc in locs:
self.locs.append((grid[:,1]==tx[loc[0]])&(grid[:,2]==ty[loc[1]]))
if isinstance(names, str):
names = [f"{names} {grid[i[0], 1].detach().cpu().item(), grid[i[0], 2].detach().cpu().item()}" for i in locs]
self.grid = grid
self.meters = [WBLinePlotMeter(name) for name in names]
self.log_every = log_every
self.counter = 0
def reset(self):
self.counter=0
def update(self, vals):
if self.counter%self.log_every == 0:
print("*"*10)
print("log")
for i in range(len(self.locs)):
self.meters[i].update(self.grid[self.locs[i], 0].detach().cpu().numpy(), vals[self.locs[i]].detach().cpu().numpy())
self.counter+=1
else:
class WBHistogramMeter:
def __init__(self, name):
self.print = False
def reset(self):
pass
def update(self, val):
pass
class WBDeltaMeter(WBHistogramMeter):
def __init__(self,names = [], dims = 0):
self.print = False
| 29.923664 | 135 | 0.53801 | import time
try:
import wandb
wandb_log=True
except ImportError:
wandb_log=False
import numpy as np
from advbench.lib.plotting import plot_perturbed_wandb
from einops import rearrange
class AverageMeter:
def __init__(self, avg_mom=0.5):
self.avg_mom = avg_mom
self.reset()
self.print = True
def reset(self):
self.val = 0
self.avg = 0
self.smooth_avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.smooth_avg = val if self.count == 0 else self.avg*self.avg_mom + val*(1-self.avg_mom)
self.avg = self.sum / self.count
class TimeMeter:
def __init__(self):
self.batch_time = AverageMeter()
self.data_time = AverageMeter()
self.start = time.time()
def batch_start(self):
self.data_time.update(time.time() - self.start)
def batch_end(self):
self.batch_time.update(time.time() - self.start)
self.start = time.time()
if wandb:
class WBHistogramMeter:
def __init__(self, name):
self.print = False
self.name = name
def reset(self):
pass
def update(self, val):
wandb.log({self.name: wandb.Histogram(val)})
class WBDeltaMeter(WBHistogramMeter):
def __init__(self, names = [], dims = 0, max_points = 100):
self.max_points = max_points
self.print = False
self.dims = dims
if isinstance(names, str):
names = [f"{names} {i}" for i in range(dims)]
self.meters = [WBHistogramMeter(name) for name in names]
def reset(self):
pass
def update(self, vals):
if self.dims>3:
pass
elif len(vals.shape)==3:
for i in range(len(self.meters)):
self.meters[i].update(vals[:,i,:self.max_points].flatten())
else:
for i in range(len(vals[0])):
self.meters[i].update(vals[:,i])
class WBLinePlotMeter():
def __init__(self, name):
self.print = False
self.name = name
def reset(self):
pass
def update(self, grid, vals):
plot_perturbed_wandb(grid, vals, name=self.name)
class WBDualMeter(WBHistogramMeter):
def __init__(self, grid, translations, names = "dual vs angle", locs = [(0, 0), (-1,-1)], log_every=500):
self.print = False
self.locs = []
tx, ty = translations
for loc in locs:
self.locs.append((grid[:,1]==tx[loc[0]])&(grid[:,2]==ty[loc[1]]))
if isinstance(names, str):
names = [f"{names} {grid[i[0], 1].detach().cpu().item(), grid[i[0], 2].detach().cpu().item()}" for i in locs]
self.grid = grid
self.meters = [WBLinePlotMeter(name) for name in names]
self.log_every = log_every
self.counter = 0
def reset(self):
self.counter=0
def update(self, vals):
if self.counter%self.log_every == 0:
print("*"*10)
print("log")
for i in range(len(self.locs)):
self.meters[i].update(self.grid[self.locs[i], 0].detach().cpu().numpy(), vals[self.locs[i]].detach().cpu().numpy())
self.counter+=1
else:
class WBHistogramMeter:
def __init__(self, name):
self.print = False
def reset(self):
pass
def update(self, val):
pass
class WBDeltaMeter(WBHistogramMeter):
def __init__(self,names = [], dims = 0):
self.print = False
| true | true |
7901cb2909c12cdd0fa5ec1b07c3122248d3a5f7 | 7,766 | py | Python | tests/core/test_expectation_suite_crud_methods.py | lfpll/great_expectations | f61fa7c2e6e813cd5ff84ab7403e05271cada257 | [
"Apache-2.0"
] | 1 | 2020-04-10T18:07:58.000Z | 2020-04-10T18:07:58.000Z | tests/core/test_expectation_suite_crud_methods.py | lfpll/great_expectations | f61fa7c2e6e813cd5ff84ab7403e05271cada257 | [
"Apache-2.0"
] | null | null | null | tests/core/test_expectation_suite_crud_methods.py | lfpll/great_expectations | f61fa7c2e6e813cd5ff84ab7403e05271cada257 | [
"Apache-2.0"
] | null | null | null | import json
import pytest
from great_expectations.core import ExpectationConfiguration, ExpectationSuite
from .test_expectation_suite import baseline_suite, exp1, exp2, exp3, exp4
@pytest.fixture
def empty_suite():
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def exp5():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
def test_append_expectation(empty_suite, exp1, exp2):
assert len(empty_suite.expectations) == 0
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 1
# Adding the same expectation again *does* add duplicates.
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 2
empty_suite.append_expectation(exp2)
assert len(empty_suite.expectations) == 3
# Turn this on once we're ready to enforce strict typing.
# with pytest.raises(TypeError):
# empty_suite.append_expectation("not an expectation")
# Turn this on once we're ready to enforce strict typing.
# with pytest.raises(TypeError):
# empty_suite.append_expectation(exp1.to_json_dict())
def test_find_expectation_indexes(baseline_suite, exp5):
# Passing no parameters "finds" all Expectations
assert baseline_suite.find_expectation_indexes() == [0, 1]
# Match on single columns
assert baseline_suite.find_expectation_indexes(column="a") == [0]
assert baseline_suite.find_expectation_indexes(column="b") == [1]
# Non-existent column returns no matches
assert baseline_suite.find_expectation_indexes(column="z") == []
# It can return multiple expectation_type matches
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_be_in_set"
) == [0, 1]
# It can return multiple column matches
baseline_suite.append_expectation(exp5)
assert baseline_suite.find_expectation_indexes(column="a") == [0, 2]
# It can match a single expectation_type
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
) == [2]
# expectation_kwargs can match full kwargs
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={
"column": "b",
"value_set": [-1, -2, -3],
"result_format": "BASIC",
}
) == [1]
# expectation_kwargs can match partial kwargs
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={"column": "a"}
) == [0, 2]
# expectation_type and expectation_kwargs work in conjunction
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null",
expectation_kwargs={"column": "a"},
) == [2]
# column and expectation_kwargs work in conjunction
assert baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"result_format": "BASIC"}
) == [0]
# column and expectation_type work in conjunction
assert baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_not_be_null",
) == [2]
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
assert (
baseline_suite.find_expectation_indexes(
column="zzz", expectation_type="expect_column_values_to_be_between",
)
== []
)
with pytest.raises(ValueError):
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"column": "b"}
)
== []
)
def test_find_expectation_indexes_on_empty_suite(empty_suite):
assert (
empty_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
)
== []
)
assert empty_suite.find_expectation_indexes(column="x") == []
assert empty_suite.find_expectation_indexes(expectation_kwargs={}) == []
def test_find_expectations(baseline_suite, exp1, exp2):
# Note: most of the logic in this method is based on
# find_expectation_indexes and _copy_and_clean_up_expectations_from_indexes
# These tests do not thoroughly cover that logic.
# Instead, they focus on the behavior of the discard_* methods
assert (
baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
result = baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_in_set",
)
assert len(result) == 1
assert result[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={
"column": "a",
"value_set": [1, 2, 3],
# "result_format": "BASIC"
},
meta={"notes": "This is an expectation."},
)
exp_with_all_the_params = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={
"column": "a",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
},
meta={},
)
baseline_suite.append_expectation(exp_with_all_the_params)
assert baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_not_be_null",
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
assert (
baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0]
== exp_with_all_the_params
)
assert baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a", "result_format": "BASIC", "catch_exceptions": True,},
meta={},
)
def test_remove_expectation(baseline_suite):
# ValueError: Multiple expectations matched arguments. No expectations removed.
with pytest.raises(ValueError):
baseline_suite.remove_expectation()
# ValueError: No matching expectation found.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(column="does_not_exist")
# ValueError: Multiple expectations matched arguments. No expectations removed.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 2
assert baseline_suite.remove_expectation(column="a") == None
assert len(baseline_suite.expectations) == 1
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 0
# ValueError: No matching expectation found.
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
| 32.224066 | 84 | 0.683492 | import json
import pytest
from great_expectations.core import ExpectationConfiguration, ExpectationSuite
from .test_expectation_suite import baseline_suite, exp1, exp2, exp3, exp4
@pytest.fixture
def empty_suite():
return ExpectationSuite(
expectation_suite_name="warning",
expectations=[],
meta={"notes": "This is an expectation suite."},
)
@pytest.fixture
def exp5():
return ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
def test_append_expectation(empty_suite, exp1, exp2):
assert len(empty_suite.expectations) == 0
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 1
empty_suite.append_expectation(exp1)
assert len(empty_suite.expectations) == 2
empty_suite.append_expectation(exp2)
assert len(empty_suite.expectations) == 3
# with pytest.raises(TypeError):
# empty_suite.append_expectation("not an expectation")
# Turn this on once we're ready to enforce strict typing.
def test_find_expectation_indexes(baseline_suite, exp5):
assert baseline_suite.find_expectation_indexes() == [0, 1]
assert baseline_suite.find_expectation_indexes(column="a") == [0]
assert baseline_suite.find_expectation_indexes(column="b") == [1]
assert baseline_suite.find_expectation_indexes(column="z") == []
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_be_in_set"
) == [0, 1]
baseline_suite.append_expectation(exp5)
assert baseline_suite.find_expectation_indexes(column="a") == [0, 2]
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
) == [2]
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={
"column": "b",
"value_set": [-1, -2, -3],
"result_format": "BASIC",
}
) == [1]
assert baseline_suite.find_expectation_indexes(
expectation_kwargs={"column": "a"}
) == [0, 2]
assert baseline_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null",
expectation_kwargs={"column": "a"},
) == [2]
assert baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"result_format": "BASIC"}
) == [0]
assert baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_not_be_null",
) == [2]
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
assert (
baseline_suite.find_expectation_indexes(
column="zzz", expectation_type="expect_column_values_to_be_between",
)
== []
)
with pytest.raises(ValueError):
assert (
baseline_suite.find_expectation_indexes(
column="a", expectation_kwargs={"column": "b"}
)
== []
)
def test_find_expectation_indexes_on_empty_suite(empty_suite):
assert (
empty_suite.find_expectation_indexes(
expectation_type="expect_column_values_to_not_be_null"
)
== []
)
assert empty_suite.find_expectation_indexes(column="x") == []
assert empty_suite.find_expectation_indexes(expectation_kwargs={}) == []
def test_find_expectations(baseline_suite, exp1, exp2):
assert (
baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_between",
)
== []
)
result = baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_be_in_set",
)
assert len(result) == 1
assert result[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_be_in_set",
kwargs={
"column": "a",
"value_set": [1, 2, 3],
},
meta={"notes": "This is an expectation."},
)
exp_with_all_the_params = ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={
"column": "a",
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
},
meta={},
)
baseline_suite.append_expectation(exp_with_all_the_params)
assert baseline_suite.find_expectations(
column="a", expectation_type="expect_column_values_to_not_be_null",
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a",},
meta={},
)
assert (
baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0]
== exp_with_all_the_params
)
assert baseline_suite.find_expectations(
column="a",
expectation_type="expect_column_values_to_not_be_null",
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
)[0] == ExpectationConfiguration(
expectation_type="expect_column_values_to_not_be_null",
kwargs={"column": "a", "result_format": "BASIC", "catch_exceptions": True,},
meta={},
)
def test_remove_expectation(baseline_suite):
with pytest.raises(ValueError):
baseline_suite.remove_expectation()
with pytest.raises(ValueError):
baseline_suite.remove_expectation(column="does_not_exist")
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 2
assert baseline_suite.remove_expectation(column="a") == None
assert len(baseline_suite.expectations) == 1
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
assert len(baseline_suite.expectations) == 0
with pytest.raises(ValueError):
baseline_suite.remove_expectation(
expectation_type="expect_column_values_to_be_in_set"
)
| true | true |
7901cb2da8fb1b2c446ba68ee208268baff173a2 | 1,624 | py | Python | QCompute/QuantumPlatform/ProcedureParams.py | rickyHong/Qcompute-repl | 588b46020af209938f370032185ee8fe3e300795 | [
"Apache-2.0"
] | null | null | null | QCompute/QuantumPlatform/ProcedureParams.py | rickyHong/Qcompute-repl | 588b46020af209938f370032185ee8fe3e300795 | [
"Apache-2.0"
] | null | null | null | QCompute/QuantumPlatform/ProcedureParams.py | rickyHong/Qcompute-repl | 588b46020af209938f370032185ee8fe3e300795 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf8 -*-
# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Procedure Params
"""
class ProcedureParams:
"""
The procedure params dict
"""
def __init__(self):
"""
The constructor of the ProcedureParams class
"""
self.paramsDict = {} # the inner data for procedure params dict
def __getitem__(self, index):
"""
Get the procedure params according to the index.
Create the register when it does not exist.
:param index:
:return: ProcedureParamStorage
"""
value = self.paramsDict.get(index)
if value is not None:
return value
value = ProcedureParamStorage(index)
self.paramsDict[index] = value
return value
class ProcedureParamStorage:
"""
The storage for procedure param
"""
def __init__(self, index):
"""
The quantum param object needs to know its index.
:param index: the quantum register index
"""
self.index = index | 25.375 | 74 | 0.647783 |
class ProcedureParams:
def __init__(self):
self.paramsDict = {}
def __getitem__(self, index):
value = self.paramsDict.get(index)
if value is not None:
return value
value = ProcedureParamStorage(index)
self.paramsDict[index] = value
return value
class ProcedureParamStorage:
def __init__(self, index):
self.index = index | true | true |
7901cc2f39e597d9485cfcc2e34018f75346c3b1 | 8,760 | py | Python | src/graph_nheads_att_gan/train.py | jiangnanboy/gcn_for_prediction_of_protein_interactions | b2a9eb06cdfe0971d0c352299db1075ec4827dd9 | [
"Apache-2.0"
] | 1 | 2022-01-04T11:42:42.000Z | 2022-01-04T11:42:42.000Z | src/graph_nheads_att_gan/train.py | jiangnanboy/gcn_for_prediction_of_protein_interactions | b2a9eb06cdfe0971d0c352299db1075ec4827dd9 | [
"Apache-2.0"
] | null | null | null | src/graph_nheads_att_gan/train.py | jiangnanboy/gcn_for_prediction_of_protein_interactions | b2a9eb06cdfe0971d0c352299db1075ec4827dd9 | [
"Apache-2.0"
] | null | null | null | import scipy.sparse as sp
import numpy as np
import torch
import time
import os
from configparser import ConfigParser
import sys
sys.path.append('/home/shiyan/project/gcn_for_prediction_of_protein_interactions/')
from src.util.load_data import load_data, sparse_to_tuple, mask_test_edges, preprocess_graph
from src.util.loss import arga_loss_function, varga_loss_function
from src.util.metrics import get_roc_score
from src.util import define_optimizer
from src.graph_nheads_att_gan.model import NHGATModelGAN
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Train():
def __init__(self):
pass
def train_model(self, config_path):
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (
os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
# load config file
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
# data catalog path
data_catalog = config.get(section, "data_catalog")
# train file path
train_file_name = config.get(section, "train_file_name")
# model save/load path
model_path = config.get(section, "model_path")
# model param config
hidden_dim1 = config.getint(section, "hidden_dim1")
hidden_dim2 = config.getint(section, "hidden_dim2")
hidden_dim3 = config.getint(section, 'hidden_dim3')
num_heads = config.getint(section, 'num_heads')
dropout = config.getfloat(section, "dropout")
vae_bool = config.getboolean(section, 'vae_bool')
alpha = config.getfloat(section, 'alpha')
lr = config.getfloat(section, "lr")
lr_decay = config.getfloat(section, 'lr_decay')
weight_decay = config.getfloat(section, "weight_decay")
gamma = config.getfloat(section, "gamma")
momentum = config.getfloat(section, "momentum")
eps = config.getfloat(section, "eps")
clip = config.getfloat(section, "clip")
epochs = config.getint(section, "epochs")
optimizer_name = config.get(section, "optimizer")
# 加载相关数据
adj = load_data(os.path.join(data_catalog, train_file_name))
num_nodes = adj.shape[0]
num_edges = adj.sum()
features = sparse_to_tuple(sp.identity(num_nodes))
num_features = features[2][1]
# 去除对角线元素
# 下边的右部分为:返回adj_orig的对角元素(一维),并增加一维,抽出adj_orig的对角元素并构建只有这些对角元素的对角矩阵
adj_orig = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj_orig.eliminate_zeros()
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj_orig)
adj = adj_train
# 返回D^{-0.5}SD^{-0.5}的coords, data, shape,其中S=A+I
adj_norm = preprocess_graph(adj)
adj_label = adj_train + sp.eye(adj_train.shape[0])
# adj_label = sparse_to_tuple(adj_label)
adj_label = torch.FloatTensor(adj_label.toarray()).to(DEVICE)
'''
注意,adj的每个元素非1即0。pos_weight是用于训练的邻接矩阵中负样本边(既不存在的边)和正样本边的倍数(即比值),这个数值在二分类交叉熵损失函数中用到,
如果正样本边所占的比例和负样本边所占比例失衡,比如正样本边很多,负样本边很少,那么在求loss的时候可以提供weight参数,将正样本边的weight设置小一点,负样本边的weight设置大一点,
此时能够很好的平衡两类在loss中的占比,任务效果可以得到进一步提升。参考:https://www.zhihu.com/question/383567632
负样本边的weight都为1,正样本边的weight都为pos_weight
'''
pos_weight = float(adj.shape[0] * adj.shape[0] - num_edges) / num_edges
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
# create model
print('create model ...')
model = NHGATModelGAN(num_features, hidden_dim1=hidden_dim1, hidden_dim2=hidden_dim2, hidden_dim3=hidden_dim3, num_heads=num_heads, dropout=dropout, alpha=alpha, vae_bool=vae_bool)
# define optimizer
if optimizer_name == 'adam':
optimizer = define_optimizer.define_optimizer_adam(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'adamw':
optimizer = define_optimizer.define_optimizer_adamw(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'sgd':
optimizer = define_optimizer.define_optimizer_sgd(model, lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimizer_name == 'adagrad':
optimizer = define_optimizer.define_optimizer_adagrad(model, lr=lr, lr_decay=lr_decay,
weight_decay=weight_decay)
elif optimizer_name == 'rmsprop':
optimizer = define_optimizer.define_optimizer_rmsprop(model, lr=lr, weight_decay=weight_decay,
momentum=momentum)
elif optimizer_name == 'adadelta':
optimizer = define_optimizer.define_optimizer_adadelta(model, lr=lr, weight_decay=weight_decay)
else:
raise NameError('No define optimization function name!')
model = model.to(DEVICE)
# 稀疏张量被表示为一对致密张量:一维张量和二维张量的索引。可以通过提供这两个张量来构造稀疏张量
adj_norm = torch.sparse.FloatTensor(torch.LongTensor(adj_norm[0].T),
torch.FloatTensor(adj_norm[1]),
torch.Size(adj_norm[2]))
features = torch.sparse.FloatTensor(torch.LongTensor(features[0].T),
torch.FloatTensor(features[1]),
torch.Size(features[2])).to_dense()
adj_norm = adj_norm.to(DEVICE)
features = features.to(DEVICE)
norm = torch.FloatTensor(np.array(norm)).to(DEVICE)
pos_weight = torch.tensor(pos_weight).to(DEVICE)
num_nodes = torch.tensor(num_nodes).to(DEVICE)
print('start training...')
best_valid_roc_score = float('-inf')
hidden_emb = None
model.train()
for epoch in range(epochs):
t = time.time()
optimizer.zero_grad()
# 解码后的邻接矩阵,判别器
recovered, dis_real, dis_fake, mu, logvar = model(features, adj_norm)
if vae_bool:
loss = varga_loss_function(preds=recovered, labels=adj_label,
mu=mu, logvar=logvar,
dis_real=dis_real, dis_fake=dis_fake,
n_nodes=num_nodes,
norm=norm, pos_weight=pos_weight)
else:
loss = arga_loss_function(preds=recovered, labels=adj_label,
dis_real=dis_real, dis_fake=dis_fake,
norm=norm, pos_weight=pos_weight)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
cur_loss = loss.item()
optimizer.step()
hidden_emb = mu.data.cpu().numpy()
# 评估验证集,val set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
# 保存最好的roc score
if roc_score > best_valid_roc_score:
best_valid_roc_score = roc_score
# 不需要保存整个model,只需保存hidden_emb,因为后面的解码是用hidden_emb内积的形式作推断
np.save(model_path, hidden_emb)
print("Epoch:", '%04d' % (epoch + 1), "train_loss = ", "{:.5f}".format(cur_loss),
"val_roc_score = ", "{:.5f}".format(roc_score),
"average_precision_score = ", "{:.5f}".format(ap_score),
"time=", "{:.5f}".format(time.time() - t)
)
print("Optimization Finished!")
# 评估测试集,test set
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, test_edges, test_edges_false)
print('test roc score: {}'.format(roc_score))
print('test ap score: {}'.format(ap_score))
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
train = Train()
train.train_model(config_path)
| 47.351351 | 192 | 0.574543 | import scipy.sparse as sp
import numpy as np
import torch
import time
import os
from configparser import ConfigParser
import sys
sys.path.append('/home/shiyan/project/gcn_for_prediction_of_protein_interactions/')
from src.util.load_data import load_data, sparse_to_tuple, mask_test_edges, preprocess_graph
from src.util.loss import arga_loss_function, varga_loss_function
from src.util.metrics import get_roc_score
from src.util import define_optimizer
from src.graph_nheads_att_gan.model import NHGATModelGAN
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Train():
def __init__(self):
pass
def train_model(self, config_path):
if os.path.exists(config_path) and (os.path.split(config_path)[1].split('.')[0] == 'config') and (
os.path.splitext(config_path)[1].split('.')[1] == 'cfg'):
config = ConfigParser()
config.read(config_path)
section = config.sections()[0]
data_catalog = config.get(section, "data_catalog")
train_file_name = config.get(section, "train_file_name")
model_path = config.get(section, "model_path")
hidden_dim1 = config.getint(section, "hidden_dim1")
hidden_dim2 = config.getint(section, "hidden_dim2")
hidden_dim3 = config.getint(section, 'hidden_dim3')
num_heads = config.getint(section, 'num_heads')
dropout = config.getfloat(section, "dropout")
vae_bool = config.getboolean(section, 'vae_bool')
alpha = config.getfloat(section, 'alpha')
lr = config.getfloat(section, "lr")
lr_decay = config.getfloat(section, 'lr_decay')
weight_decay = config.getfloat(section, "weight_decay")
gamma = config.getfloat(section, "gamma")
momentum = config.getfloat(section, "momentum")
eps = config.getfloat(section, "eps")
clip = config.getfloat(section, "clip")
epochs = config.getint(section, "epochs")
optimizer_name = config.get(section, "optimizer")
adj = load_data(os.path.join(data_catalog, train_file_name))
num_nodes = adj.shape[0]
num_edges = adj.sum()
features = sparse_to_tuple(sp.identity(num_nodes))
num_features = features[2][1]
adj_orig = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj_orig.eliminate_zeros()
adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(adj_orig)
adj = adj_train
adj_norm = preprocess_graph(adj)
adj_label = adj_train + sp.eye(adj_train.shape[0])
adj_label = torch.FloatTensor(adj_label.toarray()).to(DEVICE)
pos_weight = float(adj.shape[0] * adj.shape[0] - num_edges) / num_edges
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)
print('create model ...')
model = NHGATModelGAN(num_features, hidden_dim1=hidden_dim1, hidden_dim2=hidden_dim2, hidden_dim3=hidden_dim3, num_heads=num_heads, dropout=dropout, alpha=alpha, vae_bool=vae_bool)
if optimizer_name == 'adam':
optimizer = define_optimizer.define_optimizer_adam(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'adamw':
optimizer = define_optimizer.define_optimizer_adamw(model, lr=lr, weight_decay=weight_decay)
elif optimizer_name == 'sgd':
optimizer = define_optimizer.define_optimizer_sgd(model, lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif optimizer_name == 'adagrad':
optimizer = define_optimizer.define_optimizer_adagrad(model, lr=lr, lr_decay=lr_decay,
weight_decay=weight_decay)
elif optimizer_name == 'rmsprop':
optimizer = define_optimizer.define_optimizer_rmsprop(model, lr=lr, weight_decay=weight_decay,
momentum=momentum)
elif optimizer_name == 'adadelta':
optimizer = define_optimizer.define_optimizer_adadelta(model, lr=lr, weight_decay=weight_decay)
else:
raise NameError('No define optimization function name!')
model = model.to(DEVICE)
adj_norm = torch.sparse.FloatTensor(torch.LongTensor(adj_norm[0].T),
torch.FloatTensor(adj_norm[1]),
torch.Size(adj_norm[2]))
features = torch.sparse.FloatTensor(torch.LongTensor(features[0].T),
torch.FloatTensor(features[1]),
torch.Size(features[2])).to_dense()
adj_norm = adj_norm.to(DEVICE)
features = features.to(DEVICE)
norm = torch.FloatTensor(np.array(norm)).to(DEVICE)
pos_weight = torch.tensor(pos_weight).to(DEVICE)
num_nodes = torch.tensor(num_nodes).to(DEVICE)
print('start training...')
best_valid_roc_score = float('-inf')
hidden_emb = None
model.train()
for epoch in range(epochs):
t = time.time()
optimizer.zero_grad()
recovered, dis_real, dis_fake, mu, logvar = model(features, adj_norm)
if vae_bool:
loss = varga_loss_function(preds=recovered, labels=adj_label,
mu=mu, logvar=logvar,
dis_real=dis_real, dis_fake=dis_fake,
n_nodes=num_nodes,
norm=norm, pos_weight=pos_weight)
else:
loss = arga_loss_function(preds=recovered, labels=adj_label,
dis_real=dis_real, dis_fake=dis_fake,
norm=norm, pos_weight=pos_weight)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
cur_loss = loss.item()
optimizer.step()
hidden_emb = mu.data.cpu().numpy()
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, val_edges, val_edges_false)
if roc_score > best_valid_roc_score:
best_valid_roc_score = roc_score
np.save(model_path, hidden_emb)
print("Epoch:", '%04d' % (epoch + 1), "train_loss = ", "{:.5f}".format(cur_loss),
"val_roc_score = ", "{:.5f}".format(roc_score),
"average_precision_score = ", "{:.5f}".format(ap_score),
"time=", "{:.5f}".format(time.time() - t)
)
print("Optimization Finished!")
roc_score, ap_score = get_roc_score(hidden_emb, adj_orig, test_edges, test_edges_false)
print('test roc score: {}'.format(roc_score))
print('test ap score: {}'.format(ap_score))
else:
raise FileNotFoundError('File config.cfg not found : ' + config_path)
if __name__ == '__main__':
config_path = os.path.join(os.getcwd(), 'config.cfg')
train = Train()
train.train_model(config_path)
| true | true |
7901ccb774253d7645d7cf53b030d4938891716d | 2,038 | py | Python | grom/ui/ui_multiRename.py | hovo1990/GROM | fd7d4753c7d0d6bdab94b625d45456690c5b4ea2 | [
"CNRI-Python"
] | 8 | 2015-04-16T12:07:07.000Z | 2020-05-31T09:06:27.000Z | grom/ui/ui_multiRename.py | egtai/GROM | fd7d4753c7d0d6bdab94b625d45456690c5b4ea2 | [
"CNRI-Python"
] | null | null | null | grom/ui/ui_multiRename.py | egtai/GROM | fd7d4753c7d0d6bdab94b625d45456690c5b4ea2 | [
"CNRI-Python"
] | 2 | 2019-03-06T14:48:58.000Z | 2021-07-28T08:05:28.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_multi_rename_qt5.ui'
#
# Created: Thu Aug 14 17:13:08 2014
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Multi_Rename_Dialog(object):
def setupUi(self, Multi_Rename_Dialog):
Multi_Rename_Dialog.setObjectName("Multi_Rename_Dialog")
Multi_Rename_Dialog.resize(194, 105)
Multi_Rename_Dialog.setMinimumSize(QtCore.QSize(194, 105))
Multi_Rename_Dialog.setMaximumSize(QtCore.QSize(300, 150))
self.gridLayout = QtWidgets.QGridLayout(Multi_Rename_Dialog)
self.gridLayout.setObjectName("gridLayout")
self.RenamebuttonBox = QtWidgets.QDialogButtonBox(Multi_Rename_Dialog)
self.RenamebuttonBox.setOrientation(QtCore.Qt.Horizontal)
self.RenamebuttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.RenamebuttonBox.setObjectName("RenamebuttonBox")
self.gridLayout.addWidget(self.RenamebuttonBox, 2, 0, 1, 1)
self.RnameLabel = QtWidgets.QLabel(Multi_Rename_Dialog)
self.RnameLabel.setObjectName("RnameLabel")
self.gridLayout.addWidget(self.RnameLabel, 0, 0, 1, 1)
self.RenamelineEdit = QtWidgets.QLineEdit(Multi_Rename_Dialog)
self.RenamelineEdit.setObjectName("RenamelineEdit")
self.gridLayout.addWidget(self.RenamelineEdit, 1, 0, 1, 1)
self.retranslateUi(Multi_Rename_Dialog)
self.RenamebuttonBox.accepted.connect(Multi_Rename_Dialog.accept)
self.RenamebuttonBox.rejected.connect(Multi_Rename_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Multi_Rename_Dialog)
def retranslateUi(self, Multi_Rename_Dialog):
_translate = QtCore.QCoreApplication.translate
Multi_Rename_Dialog.setWindowTitle(_translate("Multi_Rename_Dialog", "Dialog"))
self.RnameLabel.setText(_translate("Multi_Rename_Dialog", "Rename to:"))
| 48.52381 | 114 | 0.748773 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Multi_Rename_Dialog(object):
def setupUi(self, Multi_Rename_Dialog):
Multi_Rename_Dialog.setObjectName("Multi_Rename_Dialog")
Multi_Rename_Dialog.resize(194, 105)
Multi_Rename_Dialog.setMinimumSize(QtCore.QSize(194, 105))
Multi_Rename_Dialog.setMaximumSize(QtCore.QSize(300, 150))
self.gridLayout = QtWidgets.QGridLayout(Multi_Rename_Dialog)
self.gridLayout.setObjectName("gridLayout")
self.RenamebuttonBox = QtWidgets.QDialogButtonBox(Multi_Rename_Dialog)
self.RenamebuttonBox.setOrientation(QtCore.Qt.Horizontal)
self.RenamebuttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.RenamebuttonBox.setObjectName("RenamebuttonBox")
self.gridLayout.addWidget(self.RenamebuttonBox, 2, 0, 1, 1)
self.RnameLabel = QtWidgets.QLabel(Multi_Rename_Dialog)
self.RnameLabel.setObjectName("RnameLabel")
self.gridLayout.addWidget(self.RnameLabel, 0, 0, 1, 1)
self.RenamelineEdit = QtWidgets.QLineEdit(Multi_Rename_Dialog)
self.RenamelineEdit.setObjectName("RenamelineEdit")
self.gridLayout.addWidget(self.RenamelineEdit, 1, 0, 1, 1)
self.retranslateUi(Multi_Rename_Dialog)
self.RenamebuttonBox.accepted.connect(Multi_Rename_Dialog.accept)
self.RenamebuttonBox.rejected.connect(Multi_Rename_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Multi_Rename_Dialog)
def retranslateUi(self, Multi_Rename_Dialog):
_translate = QtCore.QCoreApplication.translate
Multi_Rename_Dialog.setWindowTitle(_translate("Multi_Rename_Dialog", "Dialog"))
self.RnameLabel.setText(_translate("Multi_Rename_Dialog", "Rename to:"))
| true | true |
7901cdd575f6adcb8ec86589d3fa6b4b7c51d9e7 | 5,299 | py | Python | alipay/aop/api/domain/MybankPaymentTradeFinancingOrderRefundModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/MybankPaymentTradeFinancingOrderRefundModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/MybankPaymentTradeFinancingOrderRefundModel.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankPaymentTradeFinancingOrderRefundModel(object):
def __init__(self):
self._amount = None
self._biz_no = None
self._currency_value = None
self._ext_info = None
self._order_no = None
self._refund_type = None
self._remark = None
self._request_no = None
self._request_time = None
self._scene_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def currency_value(self):
return self._currency_value
@currency_value.setter
def currency_value(self, value):
self._currency_value = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def refund_type(self):
return self._refund_type
@refund_type.setter
def refund_type(self, value):
self._refund_type = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
@property
def request_time(self):
return self._request_time
@request_time.setter
def request_time(self, value):
self._request_time = value
@property
def scene_type(self):
return self._scene_type
@scene_type.setter
def scene_type(self, value):
self._scene_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_no:
if hasattr(self.biz_no, 'to_alipay_dict'):
params['biz_no'] = self.biz_no.to_alipay_dict()
else:
params['biz_no'] = self.biz_no
if self.currency_value:
if hasattr(self.currency_value, 'to_alipay_dict'):
params['currency_value'] = self.currency_value.to_alipay_dict()
else:
params['currency_value'] = self.currency_value
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.refund_type:
if hasattr(self.refund_type, 'to_alipay_dict'):
params['refund_type'] = self.refund_type.to_alipay_dict()
else:
params['refund_type'] = self.refund_type
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
if self.request_time:
if hasattr(self.request_time, 'to_alipay_dict'):
params['request_time'] = self.request_time.to_alipay_dict()
else:
params['request_time'] = self.request_time
if self.scene_type:
if hasattr(self.scene_type, 'to_alipay_dict'):
params['scene_type'] = self.scene_type.to_alipay_dict()
else:
params['scene_type'] = self.scene_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankPaymentTradeFinancingOrderRefundModel()
if 'amount' in d:
o.amount = d['amount']
if 'biz_no' in d:
o.biz_no = d['biz_no']
if 'currency_value' in d:
o.currency_value = d['currency_value']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'order_no' in d:
o.order_no = d['order_no']
if 'refund_type' in d:
o.refund_type = d['refund_type']
if 'remark' in d:
o.remark = d['remark']
if 'request_no' in d:
o.request_no = d['request_no']
if 'request_time' in d:
o.request_time = d['request_time']
if 'scene_type' in d:
o.scene_type = d['scene_type']
return o
| 30.107955 | 79 | 0.578411 |
import json
from alipay.aop.api.constant.ParamConstants import *
class MybankPaymentTradeFinancingOrderRefundModel(object):
def __init__(self):
self._amount = None
self._biz_no = None
self._currency_value = None
self._ext_info = None
self._order_no = None
self._refund_type = None
self._remark = None
self._request_no = None
self._request_time = None
self._scene_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def currency_value(self):
return self._currency_value
@currency_value.setter
def currency_value(self, value):
self._currency_value = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def order_no(self):
return self._order_no
@order_no.setter
def order_no(self, value):
self._order_no = value
@property
def refund_type(self):
return self._refund_type
@refund_type.setter
def refund_type(self, value):
self._refund_type = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def request_no(self):
return self._request_no
@request_no.setter
def request_no(self, value):
self._request_no = value
@property
def request_time(self):
return self._request_time
@request_time.setter
def request_time(self, value):
self._request_time = value
@property
def scene_type(self):
return self._scene_type
@scene_type.setter
def scene_type(self, value):
self._scene_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.biz_no:
if hasattr(self.biz_no, 'to_alipay_dict'):
params['biz_no'] = self.biz_no.to_alipay_dict()
else:
params['biz_no'] = self.biz_no
if self.currency_value:
if hasattr(self.currency_value, 'to_alipay_dict'):
params['currency_value'] = self.currency_value.to_alipay_dict()
else:
params['currency_value'] = self.currency_value
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.order_no:
if hasattr(self.order_no, 'to_alipay_dict'):
params['order_no'] = self.order_no.to_alipay_dict()
else:
params['order_no'] = self.order_no
if self.refund_type:
if hasattr(self.refund_type, 'to_alipay_dict'):
params['refund_type'] = self.refund_type.to_alipay_dict()
else:
params['refund_type'] = self.refund_type
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.request_no:
if hasattr(self.request_no, 'to_alipay_dict'):
params['request_no'] = self.request_no.to_alipay_dict()
else:
params['request_no'] = self.request_no
if self.request_time:
if hasattr(self.request_time, 'to_alipay_dict'):
params['request_time'] = self.request_time.to_alipay_dict()
else:
params['request_time'] = self.request_time
if self.scene_type:
if hasattr(self.scene_type, 'to_alipay_dict'):
params['scene_type'] = self.scene_type.to_alipay_dict()
else:
params['scene_type'] = self.scene_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MybankPaymentTradeFinancingOrderRefundModel()
if 'amount' in d:
o.amount = d['amount']
if 'biz_no' in d:
o.biz_no = d['biz_no']
if 'currency_value' in d:
o.currency_value = d['currency_value']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'order_no' in d:
o.order_no = d['order_no']
if 'refund_type' in d:
o.refund_type = d['refund_type']
if 'remark' in d:
o.remark = d['remark']
if 'request_no' in d:
o.request_no = d['request_no']
if 'request_time' in d:
o.request_time = d['request_time']
if 'scene_type' in d:
o.scene_type = d['scene_type']
return o
| true | true |
7901cdf0112f00577707949029b10e7b8a57973e | 2,614 | py | Python | SpacyHu/SpacyHu/LemmatizerMorphAnalyzer.py | Prodinal/GateSpacyWrapping | 046c415eb22ce7c2cc4aaca904410f852e993974 | [
"MIT"
] | null | null | null | SpacyHu/SpacyHu/LemmatizerMorphAnalyzer.py | Prodinal/GateSpacyWrapping | 046c415eb22ce7c2cc4aaca904410f852e993974 | [
"MIT"
] | null | null | null | SpacyHu/SpacyHu/LemmatizerMorphAnalyzer.py | Prodinal/GateSpacyWrapping | 046c415eb22ce7c2cc4aaca904410f852e993974 | [
"MIT"
] | null | null | null | import spacy
from spacy.tokens import Doc, Span, Token
import urllib
import xml.etree.ElementTree as ET
import re
from SpacyHu.BaseSpacyHuComponent import BaseSpacyHuComponent
class HuLemmaMorph(BaseSpacyHuComponent):
def __init__(self,
nlp,
label='Morph',
url='http://hlt.bme.hu/chatbot/gate/process?run='):
necessary_modules = ['QT', 'HFSTLemm']
super().__init__(nlp, label, url, necessary_modules)
Token.set_extension('morph', default='')
Token.set_extension('lemma', default='')
def get_word_from_annotation(self, annotation):
for feature in annotation.getchildren():
if feature.find('Name').text == 'string':
return feature.find('Value').text
def get_token_by_idx(self, idx, doc):
for token in doc:
if token.idx == idx:
return token
def get_lemma_from_morph(self, morph):
return set(re.findall(r'(?<=lemma=).*?(?=\})', morph))
def __call__(self, doc):
text = urllib.parse.quote_plus(doc.text)
result = urllib.request.urlopen(self.url + text).read()
annotationset = ET.fromstring(result).find('AnnotationSet')
for annotation in annotationset.getchildren():
if annotation.get('Type') != 'Token':
continue
word_index = int(annotation.get('StartNode'))
word = self.get_word_from_annotation(annotation)
for feature in annotation.getchildren():
if feature.find('Name').text == 'anas':
token = self.get_token_by_idx(word_index, doc)
anas = (feature.find('Value').text
if feature.find('Value').text is not None
else '')
token._.morph = set(anas.split(';'))
token._.lemma = self.get_lemma_from_morph(anas)
break
return doc
if __name__ == "__main__":
from Tokenizer import HuTokenizer
debug_text = 'Jó, hogy ez az alma piros, mert az olyan almákat szeretem.'
# debug_text = 'megszentségteleníthetetlenségeitekért meghalnak'
remote_url = 'http://hlt.bme.hu/chatbot/gate/process?run='
nlp = spacy.blank("en")
nlp.tokenizer = HuTokenizer(nlp.vocab, url=remote_url)
morph_analyzer = HuLemmaMorph(nlp, url=remote_url)
nlp.add_pipe(morph_analyzer, last=True)
doc = nlp(debug_text)
for token in doc:
print('Token is: ' + token.text)
print(token._.lemma)
print(token._.morph)
print()
| 36.305556 | 77 | 0.602142 | import spacy
from spacy.tokens import Doc, Span, Token
import urllib
import xml.etree.ElementTree as ET
import re
from SpacyHu.BaseSpacyHuComponent import BaseSpacyHuComponent
class HuLemmaMorph(BaseSpacyHuComponent):
def __init__(self,
nlp,
label='Morph',
url='http://hlt.bme.hu/chatbot/gate/process?run='):
necessary_modules = ['QT', 'HFSTLemm']
super().__init__(nlp, label, url, necessary_modules)
Token.set_extension('morph', default='')
Token.set_extension('lemma', default='')
def get_word_from_annotation(self, annotation):
for feature in annotation.getchildren():
if feature.find('Name').text == 'string':
return feature.find('Value').text
def get_token_by_idx(self, idx, doc):
for token in doc:
if token.idx == idx:
return token
def get_lemma_from_morph(self, morph):
return set(re.findall(r'(?<=lemma=).*?(?=\})', morph))
def __call__(self, doc):
text = urllib.parse.quote_plus(doc.text)
result = urllib.request.urlopen(self.url + text).read()
annotationset = ET.fromstring(result).find('AnnotationSet')
for annotation in annotationset.getchildren():
if annotation.get('Type') != 'Token':
continue
word_index = int(annotation.get('StartNode'))
word = self.get_word_from_annotation(annotation)
for feature in annotation.getchildren():
if feature.find('Name').text == 'anas':
token = self.get_token_by_idx(word_index, doc)
anas = (feature.find('Value').text
if feature.find('Value').text is not None
else '')
token._.morph = set(anas.split(';'))
token._.lemma = self.get_lemma_from_morph(anas)
break
return doc
if __name__ == "__main__":
from Tokenizer import HuTokenizer
debug_text = 'Jó, hogy ez az alma piros, mert az olyan almákat szeretem.'
remote_url = 'http://hlt.bme.hu/chatbot/gate/process?run='
nlp = spacy.blank("en")
nlp.tokenizer = HuTokenizer(nlp.vocab, url=remote_url)
morph_analyzer = HuLemmaMorph(nlp, url=remote_url)
nlp.add_pipe(morph_analyzer, last=True)
doc = nlp(debug_text)
for token in doc:
print('Token is: ' + token.text)
print(token._.lemma)
print(token._.morph)
print()
| true | true |
7901ce20f3cfeffffbc33eaecd41256801f8807e | 1,664 | py | Python | setup.py | lapidshay/DEMON | c7eeeb22be279c9dd9a6555bae06702f369021ac | [
"BSD-2-Clause"
] | 1 | 2018-04-09T17:01:00.000Z | 2018-04-09T17:01:00.000Z | setup.py | KDDComplexNetworkAnalysis/DEMON | c32d80198a90c4d6c2165e34cffd79315aa764e7 | [
"BSD-2-Clause"
] | null | null | null | setup.py | KDDComplexNetworkAnalysis/DEMON | c32d80198a90c4d6c2165e34cffd79315aa764e7 | [
"BSD-2-Clause"
] | null | null | null | from setuptools import setup, find_packages
__author__ = 'Giulio Rossetti'
__license__ = "BSD 2 Clause"
__email__ = "giulio.rossetti@gmail.com"
# Get the long description from the README file
# with open(path.join(here, 'README.md'), encoding='utf-8') as f:
# long_description = f.read()
setup(name='demon',
version='2.0.4',
license='BSD-2-Clause',
description='Community Discovery algorithm',
url='https://github.com/GiulioRossetti/DEMON',
author='Giulio Rossetti',
author_email='giulio.rossetti@gmail.com',
use_2to3=True,
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
keywords=['complex-networks', 'community discovery'],
install_requires=['networkx', 'future', ''],
packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test", "demon.test", "demon.test.*"]),
)
| 36.977778 | 109 | 0.615385 | from setuptools import setup, find_packages
__author__ = 'Giulio Rossetti'
__license__ = "BSD 2 Clause"
__email__ = "giulio.rossetti@gmail.com"
setup(name='demon',
version='2.0.4',
license='BSD-2-Clause',
description='Community Discovery algorithm',
url='https://github.com/GiulioRossetti/DEMON',
author='Giulio Rossetti',
author_email='giulio.rossetti@gmail.com',
use_2to3=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
"Operating System :: OS Independent",
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3'
],
keywords=['complex-networks', 'community discovery'],
install_requires=['networkx', 'future', ''],
packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test", "demon.test", "demon.test.*"]),
)
| true | true |
7901ce538a3da1b295edd54873a1c484b4a994be | 21,581 | py | Python | test/functional/feature_pruning.py | umkoin/umkoin | ba28fce6f6b22099ba7b900619653f9e342d3cd1 | [
"MIT"
] | 6 | 2018-02-28T22:23:46.000Z | 2020-02-13T13:49:44.000Z | test/functional/feature_pruning.py | umkoin/umkoin | ba28fce6f6b22099ba7b900619653f9e342d3cd1 | [
"MIT"
] | null | null | null | test/functional/feature_pruning.py | umkoin/umkoin | ba28fce6f6b22099ba7b900619653f9e342d3cd1 | [
"MIT"
] | 6 | 2018-02-05T12:51:25.000Z | 2020-04-26T10:42:49.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the pruning code.
WARNING:
This test uses 4GB of disk space.
This test takes 30 mins or more (up to 2 hours)
"""
import os
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock
from test_framework.script import (
CScript,
OP_NOP,
OP_RETURN,
)
from test_framework.test_framework import UmkoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
# compatible with pruning based on key creation time.
TIMESTAMP_WINDOW = 2 * 60 * 60
def mine_large_blocks(node, n):
# Make a large scriptPubKey for the coinbase transaction. This is OP_RETURN
# followed by 950k of OP_NOP. This would be non-standard in a non-coinbase
# transaction but is consensus valid.
# Set the nTime if this is the first time this function has been called.
# A static variable ensures that time is monotonicly increasing and is therefore
# different for each block created => blockhash is unique.
if "nTimes" not in mine_large_blocks.__dict__:
mine_large_blocks.nTime = 0
# Get the block parameters for the first block
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
for _ in range(n):
# Build the coinbase transaction (with large scriptPubKey)
coinbase_tx = create_coinbase(height)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
coinbase_tx.vout[0].scriptPubKey = big_script
coinbase_tx.rehash()
# Build the block
block = CBlock()
block.nVersion = best_block["version"]
block.hashPrevBlock = previousblockhash
block.nTime = mine_large_blocks.nTime
block.nBits = int('207fffff', 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Submit to the node
node.submitblock(block.serialize().hex())
previousblockhash = block.sha256
height += 1
mine_large_blocks.nTime += 1
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(UmkoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
self.supports_cli = False
# Create nodes 0 and 1 to mine.
# Create node 2 to test pruning.
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
# Create nodes 3 and 4 to test manual pruning (they will be re-started with manual pruning later)
# Create nodes 5 to test wallet in prune mode, but do not connect
self.extra_args = [
self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000"],
["-maxreceivebuffer=20000"],
["-prune=550"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, 'blocks', '')
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
self.connect_nodes(0, 4)
self.sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.generate(self.nodes[1], 200)
self.sync_blocks(self.nodes[0:2])
self.generate(self.nodes[0], 150)
# Then mine enough full blocks to create more than 550MiB of data
mine_large_blocks(self.nodes[0], 645)
self.sync_blocks(self.nodes[0:5])
def test_invalid_command_line_options(self):
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune cannot be configured with a negative value.',
extra_args=['-prune=-1'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune configured below the minimum of 550 MiB. Please use a higher number.',
extra_args=['-prune=549'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune mode is incompatible with -txindex.',
extra_args=['-prune=550', '-txindex'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune mode is incompatible with -coinstatsindex.',
extra_args=['-prune=550', '-coinstatsindex'],
)
def test_height_min(self):
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
self.log.info(f"Though we're already using more than 550MiB, current usage: {calc_usage(self.prunedir)}")
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
self.wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for _ in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
# Mine 24 blocks in node 1
mine_large_blocks(self.nodes[1], 24)
# Reorg back with 25 block chain from node 0
mine_large_blocks(self.nodes[0], 25)
# Create connections in the order so both nodes can see the reorg at the same time
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.log.info(f"Usage can be over target because of high stale rate: {calc_usage(self.prunedir)}")
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
# This will cause Node 2 to do a reorg requiring 288 blocks of undo data to the reorg_test chain
height = self.nodes[1].getblockcount()
self.log.info(f"Current block height: {height}")
self.forkheight = height - 287
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
self.log.info(f"Invalidating block {self.forkhash} at height {self.forkheight}")
self.nodes[1].invalidateblock(self.forkhash)
# We've now switched to our previously mined-24 block fork on node 1, but that's not what we want
# So invalidate that fork as well, until we're on the same chain as node 0/2 (but at an ancestor 288 blocks ago)
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
assert self.nodes[1].getblockcount() == self.forkheight - 1
self.log.info(f"New best height: {self.nodes[1].getblockcount()}")
# Disconnect node1 and generate the new chain
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.log.info("Generating new longer chain of 300 more blocks")
self.generate(self.nodes[1], 300)
self.log.info("Reconnect nodes")
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.sync_blocks(self.nodes[0:3], timeout=120)
self.log.info(f"Verify height on node 2: {self.nodes[2].getblockcount()}")
self.log.info(f"Usage possibly still high because of stale blocks in block files: {calc_usage(self.prunedir)}")
self.log.info("Mine 220 more large blocks so we have requisite history")
mine_large_blocks(self.nodes[0], 220)
self.sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir)
self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
self.nodes[2].verifychain(checklevel=4, nblocks=0)
self.log.info(f"Will need to redownload block {self.forkheight}")
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large blocks are in the block files after it,
# it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
# At this point node 2 is within 288 blocks of the fork point so it will preserve its ability to reorg
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info(f"Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {blocks_to_mine}")
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
goalbesthash = self.generate(self.nodes[0], blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
# Wait for Node 2 to reorg to proper height
self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
# Verify we can now have the data for a block previously pruned
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
# now re-start in manual pruning mode
self.restart_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index):
ret = node.pruneblockchain(height=height(index))
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", f"blk{index:05}.dat"))
# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# Save block transaction count before pruning, assert value
block1_details = node.getblock(node.getblockhash(1))
assert_equal(block1_details["nTx"], len(block1_details["tx"]))
# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
self.generate(node, 6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
# Pruned block should still know the number of transactions
assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
# negative heights should raise an exception
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
# height=100 too low to prune first block file so this is a no-op
prune(100)
assert has_block(0), "blk00000.dat is missing when should still be there"
# Does nothing
node.pruneblockchain(height(0))
assert has_block(0), "blk00000.dat is missing when should still be there"
# height=500 should prune first file
prune(500)
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
assert has_block(1), "blk00001.dat is missing when should still be there"
# height=650 should prune second file
prune(650)
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000)
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
self.generate(node, 288)
prune(1000)
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
# stop node, start back up with auto-prune at 550 MiB, make sure still runs
self.restart_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
# check that the pruning node's wallet is still in good shape
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.restart_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
self.connect_nodes(0, 5)
nds = [self.nodes[0], self.nodes[5]]
self.sync_blocks(nds, wait=5, timeout=300)
self.restart_node(5, extra_args=["-prune=550"]) # restart to trigger rescan
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space")
self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
# Disconnect N0
# And mine a 24 block chain on N1 and a separate 25 block chain on N0
# N1=N2 **...*+...+(1044)
# N0 **...**...**(1045)
#
# reconnect nodes causing reorg on N1 and N2
# N1=N2 **...*(1020) *...**(1045)
# \
# +...+(1044)
#
# repeat this process until you have 12 stale forks hanging off the
# main chain on N1 and N2
# N0 *************************...***************************(1320)
#
# N1=N2 **...*(1020) *...**(1045) *.. ..**(1295) *...**(1320)
# \ \ \
# +...+(1044) &.. $...$(1319)
# Save some current chain state for later use
self.mainchainheight = self.nodes[2].getblockcount() # 1320
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
self.reorg_test() # (1033, )
# Now create a 288 block reorg by mining a longer chain on N1
# First disconnect N1
# Then invalidate 1033 on main chain and 1032 on fork so height is 1032 on main chain
# N1 **...*(1020) **...**(1032)X..
# \
# ++...+(1031)X..
#
# Now mine 300 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@(1332)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# Reconnect nodes and mine 220 more blocks on N1
# N1 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ X...
# \ \
# ++...+(1031)X.. ..
#
# N2 **...*(1020) **...**(1032) @@...@@@(1552)
# \ \
# \ *...**(1320)
# \ \
# ++...++(1044) ..
#
# N0 ********************(1032) @@...@@@(1552)
# \
# *...**(1320)
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
# Verify that N2 still has block 1033 on current chain (@), but not on main chain (*)
# Invalidate 1033 on current chain (@) on N2 and we should be able to reorg to
# original main chain (*), but will require redownload of some blocks
# In order to have a peer we think we can download from, must also perform this invalidation
# on N0 and mine a new longest chain to trigger.
# Final result:
# N0 ********************(1032) **...****(1553)
# \
# X@...@@@(1552)
#
# N2 **...*(1020) **...**(1032) **...****(1553)
# \ \
# \ X@...@@@(1552)
# \
# +..
#
# N1 doesn't change because 1033 on main chain (*) is invalid
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Test invalid pruning command line options")
self.test_invalid_command_line_options()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| 44.866944 | 138 | 0.613781 |
import os
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock
from test_framework.script import (
CScript,
OP_NOP,
OP_RETURN,
)
from test_framework.test_framework import UmkoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
TIMESTAMP_WINDOW = 2 * 60 * 60
def mine_large_blocks(node, n):
if "nTimes" not in mine_large_blocks.__dict__:
mine_large_blocks.nTime = 0
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
for _ in range(n):
coinbase_tx = create_coinbase(height)
coinbase_tx.vin[0].nSequence = 2 ** 32 - 1
coinbase_tx.vout[0].scriptPubKey = big_script
coinbase_tx.rehash()
block = CBlock()
block.nVersion = best_block["version"]
block.hashPrevBlock = previousblockhash
block.nTime = mine_large_blocks.nTime
block.nBits = int('207fffff', 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
node.submitblock(block.serialize().hex())
previousblockhash = block.sha256
height += 1
mine_large_blocks.nTime += 1
def calc_usage(blockdir):
return sum(os.path.getsize(blockdir + f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.)
class PruneTest(UmkoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 6
self.supports_cli = False
self.full_node_default_args = ["-maxreceivebuffer=20000", "-checkblocks=5"]
self.extra_args = [
self.full_node_default_args,
self.full_node_default_args,
["-maxreceivebuffer=20000", "-prune=550"],
["-maxreceivebuffer=20000"],
["-maxreceivebuffer=20000"],
["-prune=550"],
]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.prunedir = os.path.join(self.nodes[2].datadir, self.chain, 'blocks', '')
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
self.connect_nodes(0, 4)
self.sync_blocks(self.nodes[0:5])
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
self.import_deterministic_coinbase_privkeys()
def create_big_chain(self):
self.generate(self.nodes[1], 200)
self.sync_blocks(self.nodes[0:2])
self.generate(self.nodes[0], 150)
mine_large_blocks(self.nodes[0], 645)
self.sync_blocks(self.nodes[0:5])
def test_invalid_command_line_options(self):
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune cannot be configured with a negative value.',
extra_args=['-prune=-1'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune configured below the minimum of 550 MiB. Please use a higher number.',
extra_args=['-prune=549'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune mode is incompatible with -txindex.',
extra_args=['-prune=550', '-txindex'],
)
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Prune mode is incompatible with -coinstatsindex.',
extra_args=['-prune=550', '-coinstatsindex'],
)
def test_height_min(self):
assert os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), "blk00000.dat is missing, pruning too early"
self.log.info("Success")
self.log.info(f"Though we're already using more than 550MiB, current usage: {calc_usage(self.prunedir)}")
self.log.info("Mining 25 more blocks should cause the first block file to be pruned")
# Pruning doesn't run until we're allocating another chunk, 20 full blocks past the height cutoff will ensure this
mine_large_blocks(self.nodes[0], 25)
# Wait for blk00000.dat to be pruned
self.wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30)
self.log.info("Success")
usage = calc_usage(self.prunedir)
self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def create_chain_with_staleblocks(self):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
for _ in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
# Mine 24 blocks in node 1
mine_large_blocks(self.nodes[1], 24)
# Reorg back with 25 block chain from node 0
mine_large_blocks(self.nodes[0], 25)
# Create connections in the order so both nodes can see the reorg at the same time
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.log.info(f"Usage can be over target because of high stale rate: {calc_usage(self.prunedir)}")
def reorg_test(self):
# Node 1 will mine a 300 block chain starting 287 blocks back from Node 0 and Node 2's tip
height = self.nodes[1].getblockcount()
self.log.info(f"Current block height: {height}")
self.forkheight = height - 287
self.forkhash = self.nodes[1].getblockhash(self.forkheight)
self.log.info(f"Invalidating block {self.forkhash} at height {self.forkheight}")
self.nodes[1].invalidateblock(self.forkhash)
mainchainhash = self.nodes[0].getblockhash(self.forkheight - 1)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
while curhash != mainchainhash:
self.nodes[1].invalidateblock(curhash)
curhash = self.nodes[1].getblockhash(self.forkheight - 1)
assert self.nodes[1].getblockcount() == self.forkheight - 1
self.log.info(f"New best height: {self.nodes[1].getblockcount()}")
# Disconnect node1 and generate the new chain
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.log.info("Generating new longer chain of 300 more blocks")
self.generate(self.nodes[1], 300)
self.log.info("Reconnect nodes")
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.sync_blocks(self.nodes[0:3], timeout=120)
self.log.info(f"Verify height on node 2: {self.nodes[2].getblockcount()}")
self.log.info(f"Usage possibly still high because of stale blocks in block files: {calc_usage(self.prunedir)}")
self.log.info("Mine 220 more large blocks so we have requisite history")
mine_large_blocks(self.nodes[0], 220)
self.sync_blocks(self.nodes[0:3], timeout=120)
usage = calc_usage(self.prunedir)
self.log.info(f"Usage should be below target: {usage}")
assert_greater_than(550, usage)
def reorg_back(self):
# Verify that a block on the old main chain fork has been pruned away
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.nodes[2].getblock, self.forkhash)
with self.nodes[2].assert_debug_log(expected_msgs=['block verification stopping at height', '(pruning, no data)']):
self.nodes[2].verifychain(checklevel=4, nblocks=0)
self.log.info(f"Will need to redownload block {self.forkheight}")
# Verify that we have enough history to reorg back to the fork point
# Although this is more than 288 blocks, because this chain was written more recently
# and only its other 299 small and 220 large blocks are in the block files after it,
# it is expected to still be retained
self.nodes[2].getblock(self.nodes[2].getblockhash(self.forkheight))
first_reorg_height = self.nodes[2].getblockcount()
curchainhash = self.nodes[2].getblockhash(self.mainchainheight)
self.nodes[2].invalidateblock(curchainhash)
goalbestheight = self.mainchainheight
goalbesthash = self.mainchainhash2
# As of 0.10 the current block download logic is not able to reorg to the original chain created in
# create_chain_with_stale_blocks because it doesn't know of any peer that's on that chain from which to
# redownload its missing blocks.
# Invalidate the reorg_test chain in node 0 as well, it can successfully switch to the original chain
# because it has all the block data.
# However it must mine enough blocks to have a more work chain than the reorg_test chain in order
# to trigger node 2's block download logic.
if self.nodes[2].getblockcount() < self.mainchainheight:
blocks_to_mine = first_reorg_height + 1 - self.mainchainheight
self.log.info(f"Rewind node 0 to prev main chain to mine longer chain to trigger redownload. Blocks needed: {blocks_to_mine}")
self.nodes[0].invalidateblock(curchainhash)
assert_equal(self.nodes[0].getblockcount(), self.mainchainheight)
assert_equal(self.nodes[0].getbestblockhash(), self.mainchainhash2)
goalbesthash = self.generate(self.nodes[0], blocks_to_mine)[-1]
goalbestheight = first_reorg_height + 1
self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload")
self.wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900)
assert_equal(self.nodes[2].getbestblockhash(), goalbesthash)
assert_equal(self.nodes[2].getblock(self.forkhash)["height"], self.forkheight)
def manual_test(self, node_number, use_timestamp):
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)
self.restart_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
def height(index):
if use_timestamp:
return node.getblockheader(node.getblockhash(index))["time"] + TIMESTAMP_WINDOW
else:
return index
def prune(index):
ret = node.pruneblockchain(height=height(index))
assert_equal(ret, node.getblockchaininfo()['pruneheight'])
def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", f"blk{index:05}.dat"))
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
block1_details = node.getblock(node.getblockhash(1))
assert_equal(block1_details["nTx"], len(block1_details["tx"]))
self.generate(node, 6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
assert_raises_rpc_error(-8, "Negative", node.pruneblockchain, -10)
prune(100)
assert has_block(0), "blk00000.dat is missing when should still be there"
node.pruneblockchain(height(0))
assert has_block(0), "blk00000.dat is missing when should still be there"
prune(500)
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
assert has_block(1), "blk00001.dat is missing when should still be there"
prune(650)
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"
prune(1000)
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
self.generate(node, 288)
prune(1000)
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
self.restart_node(node_number, extra_args=["-prune=550"])
self.log.info("Success")
def wallet_test(self):
self.log.info("Stop and start pruning node to trigger wallet rescan")
self.restart_node(2, extra_args=["-prune=550"])
self.log.info("Success")
# check that wallet loads successfully when restarting a pruned node after IBD.
# this was reported to fail in #7494.
self.log.info("Syncing node 5 to test wallet")
self.connect_nodes(0, 5)
nds = [self.nodes[0], self.nodes[5]]
self.sync_blocks(nds, wait=5, timeout=300)
self.restart_node(5, extra_args=["-prune=550"]) # restart to trigger rescan
self.log.info("Success")
def run_test(self):
self.log.info("Warning! This test requires 4GB of disk space")
self.log.info("Mining a big blockchain of 995 blocks")
self.create_big_chain()
# Chain diagram key:
# * blocks on main chain
# +,&,$,@ blocks on other forks
# X invalidated block
# N1 Node 1
#
# Start by mining a simple chain that all nodes have
# N0=N1=N2 **...*(995)
# stop manual-pruning node with 995 blocks
self.stop_node(3)
self.stop_node(4)
self.log.info("Check that we haven't started pruning yet because we're below PruneAfterHeight")
self.test_height_min()
# Extend this chain past the PruneAfterHeight
# N0=N1=N2 **...*(1020)
self.log.info("Check that we'll exceed disk space target if we have a very high stale block rate")
self.create_chain_with_staleblocks()
self.mainchainheight = self.nodes[2].getblockcount()
self.mainchainhash2 = self.nodes[2].getblockhash(self.mainchainheight)
self.log.info("Check that we can survive a 288 block reorg still")
self.reorg_test()
self.log.info("Test that we can rerequest a block we previously pruned if needed for a reorg")
self.reorg_back()
self.log.info("Test manual pruning with block indices")
self.manual_test(3, use_timestamp=False)
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
self.log.info("Test wallet re-scan")
self.wallet_test()
self.log.info("Test invalid pruning command line options")
self.test_invalid_command_line_options()
self.log.info("Done")
if __name__ == '__main__':
PruneTest().main()
| true | true |
7901cf733b21a13444709d33afd7941197320ca2 | 11,162 | py | Python | ucr/core/architecture/head/rec_srn_head.py | DocYard-ai/UCR | 7618aa336f56e71d9fd8cdc2d591e3d138e3dc68 | [
"Apache-2.0"
] | 10 | 2021-04-06T15:57:20.000Z | 2021-11-14T23:00:13.000Z | ucr/core/architecture/head/rec_srn_head.py | felixdittrich92/UCR | 7618aa336f56e71d9fd8cdc2d591e3d138e3dc68 | [
"Apache-2.0"
] | 5 | 2021-04-22T10:28:17.000Z | 2022-03-02T07:47:53.000Z | ucr/core/architecture/head/rec_srn_head.py | felixdittrich92/UCR | 7618aa336f56e71d9fd8cdc2d591e3d138e3dc68 | [
"Apache-2.0"
] | 2 | 2021-04-30T10:27:51.000Z | 2021-08-02T15:12:10.000Z | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Modifications copyright (c) 2021 DocYard Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import torch
from torch import nn
from torch.nn import functional as F
from .self_attention import WrapEncoder, WrapEncoderForFeature
gradient_clip = 10
from functools import partial
class PVAM(nn.Module):
def __init__(
self,
in_channels,
char_num,
max_text_length,
num_heads,
num_encoder_tus,
hidden_dims,
):
super(PVAM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.hidden_dims = hidden_dims
# Transformer encoder
t = 256
self.wrap_encoder_for_feature = WrapEncoderForFeature(
src_vocab_size=1,
max_length=t,
n_layer=self.num_encoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
# PVAM
self.flatten0 = nn.Flatten(start_dim=0, end_dim=1)
self.fc0 = nn.Linear(
in_features=in_channels,
out_features=in_channels,
)
self.emb = nn.Embedding(
num_embeddings=self.max_length, embedding_dim=in_channels
)
self.flatten1 = nn.Flatten(start_dim=0, end_dim=2)
self.fc1 = nn.Linear(
in_features=in_channels, out_features=1, bias=False
)
def forward(self, inputs, encoder_word_pos, gsrm_word_pos):
b, c, h, w = inputs.shape
conv_features = torch.reshape(inputs, shape=(-1, c, h * w))
conv_features = conv_features.permute(0, 2, 1)
# transformer encoder
b, t, c = conv_features.shape
enc_inputs = [conv_features, encoder_word_pos, None]
word_features = self.wrap_encoder_for_feature(enc_inputs)
# pvam
b, t, c = word_features.shape
word_features = self.fc0(word_features)
word_features_ = torch.reshape(word_features, (-1, 1, t, c))
word_features_ = torch.tile(word_features_, (1, self.max_length, 1, 1))
word_pos_feature = self.emb(gsrm_word_pos)
word_pos_feature_ = torch.reshape(
word_pos_feature, (-1, self.max_length, 1, c)
)
word_pos_feature_ = torch.tile(word_pos_feature_, (1, 1, t, 1))
y = word_pos_feature_ + word_features_
y = F.tanh(y)
attention_weight = self.fc1(y)
attention_weight = torch.reshape(
attention_weight, shape=(-1, self.max_length, t)
)
attention_weight = F.softmax(attention_weight, dim=-1)
pvam_features = torch.bmm(
attention_weight, word_features
) # [b, max_length, c]
return pvam_features
class GSRM(nn.Module):
def __init__(
self,
in_channels,
char_num,
max_text_length,
num_heads,
num_encoder_tus,
num_decoder_tus,
hidden_dims,
):
super(GSRM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.num_decoder_TUs = num_decoder_tus
self.hidden_dims = hidden_dims
self.fc0 = nn.Linear(
in_features=in_channels, out_features=self.char_num
)
self.wrap_encoder0 = WrapEncoder(
src_vocab_size=self.char_num + 1,
max_length=self.max_length,
n_layer=self.num_decoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
self.wrap_encoder1 = WrapEncoder(
src_vocab_size=self.char_num + 1,
max_length=self.max_length,
n_layer=self.num_decoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
# self.mul = lambda x: torch.matmul(x,
# (self.wrap_encoder0.prepare_decoder.emb0.weightk).transpose(-2, -1)) # ! This is an error here, weightk is wrong correct it by visualizing torch model_dict
self.mul = partial(
self.f, self.wrap_encoder0.prepare_decoder.emb0.weight
)
@staticmethod
def f(w, x):
return torch.matmul(x, w.transpose(-2, -1))
def forward(
self, inputs, gsrm_word_pos, gsrm_slf_attn_bias1, gsrm_slf_attn_bias2
):
# ===== GSRM Visual-to-semantic embedding block =====
b, t, c = inputs.shape
pvam_features = torch.reshape(inputs, (-1, c))
word_out = self.fc0(pvam_features)
word_ids = torch.argmax(F.softmax(word_out), dim=1)
word_ids = torch.reshape(word_ids, shape=(-1, t, 1))
# ===== GSRM Semantic reasoning block =====
"""
This module is achieved through bi-transformers,
ngram_feature1 is the froward one, ngram_fetaure2 is the backward one
"""
pad_idx = self.char_num
word1 = word_ids.float()
pad1 = nn.ConstantPad1d((1, 0), value=1.0 * pad_idx)
word1 = pad1(word1.permute(0, 2, 1)).permute(0, 2, 1)
word1 = word1.long()
word1 = word1[:, :-1, :]
word2 = word_ids
enc_inputs_1 = [word1, gsrm_word_pos, gsrm_slf_attn_bias1]
enc_inputs_2 = [word2, gsrm_word_pos, gsrm_slf_attn_bias2]
gsrm_feature1 = self.wrap_encoder0(enc_inputs_1)
gsrm_feature2 = self.wrap_encoder1(enc_inputs_2)
pad = nn.ConstantPad1d((0, 1), value=0.0)
gsrm_feature2 = pad(gsrm_feature2.permute(0, 2, 1)).permute(0, 2, 1)
gsrm_feature2 = gsrm_feature2[
:,
1:,
]
gsrm_features = gsrm_feature1 + gsrm_feature2
gsrm_out = self.mul(gsrm_features)
b, t, c = gsrm_out.shape
gsrm_out = torch.reshape(gsrm_out, (-1, c))
return gsrm_features, word_out, gsrm_out
class VSFD(nn.Module):
def __init__(self, in_channels=512, pvam_ch=512, char_num=38):
super(VSFD, self).__init__()
self.char_num = char_num
self.fc0 = nn.Linear(in_features=in_channels * 2, out_features=pvam_ch)
self.fc1 = nn.Linear(in_features=pvam_ch, out_features=self.char_num)
def forward(self, pvam_feature, gsrm_feature):
b, t, c1 = pvam_feature.shape
b, t, c2 = gsrm_feature.shape
combine_feature_ = torch.cat([pvam_feature, gsrm_feature], dim=2)
img_comb_feature_ = torch.reshape(
combine_feature_, shape=(-1, c1 + c2)
)
img_comb_feature_map = self.fc0(img_comb_feature_)
img_comb_feature_map = torch.sigmoid(img_comb_feature_map)
img_comb_feature_map = torch.reshape(
img_comb_feature_map, shape=(-1, t, c1)
)
combine_feature = (
img_comb_feature_map * pvam_feature
+ (1.0 - img_comb_feature_map) * gsrm_feature
)
img_comb_feature = torch.reshape(combine_feature, shape=(-1, c1))
out = self.fc1(img_comb_feature)
return out
class SRNHead(nn.Module):
def __init__(
self,
in_channels,
out_channels,
max_text_length,
num_heads,
num_encoder_TUs,
num_decoder_TUs,
hidden_dims,
**kwargs
):
super(SRNHead, self).__init__()
self.char_num = out_channels
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_TUs
self.num_decoder_TUs = num_decoder_TUs
self.hidden_dims = hidden_dims
self.pvam = PVAM(
in_channels=in_channels,
char_num=self.char_num,
max_text_length=self.max_length,
num_heads=self.num_heads,
num_encoder_tus=self.num_encoder_TUs,
hidden_dims=self.hidden_dims,
)
self.gsrm = GSRM(
in_channels=in_channels,
char_num=self.char_num,
max_text_length=self.max_length,
num_heads=self.num_heads,
num_encoder_tus=self.num_encoder_TUs,
num_decoder_tus=self.num_decoder_TUs,
hidden_dims=self.hidden_dims,
)
self.vsfd = VSFD(in_channels=in_channels, char_num=self.char_num)
self.gsrm.wrap_encoder1.prepare_decoder.emb0 = (
self.gsrm.wrap_encoder0.prepare_decoder.emb0
)
def forward(self, inputs, others):
encoder_word_pos = others[0]
gsrm_word_pos = others[1]
gsrm_slf_attn_bias1 = others[2]
gsrm_slf_attn_bias2 = others[3]
pvam_feature = self.pvam(inputs, encoder_word_pos, gsrm_word_pos)
gsrm_feature, word_out, gsrm_out = self.gsrm(
pvam_feature,
gsrm_word_pos,
gsrm_slf_attn_bias1,
gsrm_slf_attn_bias2,
)
final_out = self.vsfd(pvam_feature, gsrm_feature)
if not self.training:
final_out = F.softmax(final_out, dim=1)
_, decoded_out = torch.topk(final_out, k=1)
predicts = OrderedDict(
[
("predict", final_out),
("pvam_feature", pvam_feature),
("decoded_out", decoded_out),
("word_out", word_out),
("gsrm_out", gsrm_out),
]
)
return predicts
| 33.620482 | 201 | 0.613241 |
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import torch
from torch import nn
from torch.nn import functional as F
from .self_attention import WrapEncoder, WrapEncoderForFeature
gradient_clip = 10
from functools import partial
class PVAM(nn.Module):
def __init__(
self,
in_channels,
char_num,
max_text_length,
num_heads,
num_encoder_tus,
hidden_dims,
):
super(PVAM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.hidden_dims = hidden_dims
t = 256
self.wrap_encoder_for_feature = WrapEncoderForFeature(
src_vocab_size=1,
max_length=t,
n_layer=self.num_encoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
self.flatten0 = nn.Flatten(start_dim=0, end_dim=1)
self.fc0 = nn.Linear(
in_features=in_channels,
out_features=in_channels,
)
self.emb = nn.Embedding(
num_embeddings=self.max_length, embedding_dim=in_channels
)
self.flatten1 = nn.Flatten(start_dim=0, end_dim=2)
self.fc1 = nn.Linear(
in_features=in_channels, out_features=1, bias=False
)
def forward(self, inputs, encoder_word_pos, gsrm_word_pos):
b, c, h, w = inputs.shape
conv_features = torch.reshape(inputs, shape=(-1, c, h * w))
conv_features = conv_features.permute(0, 2, 1)
b, t, c = conv_features.shape
enc_inputs = [conv_features, encoder_word_pos, None]
word_features = self.wrap_encoder_for_feature(enc_inputs)
b, t, c = word_features.shape
word_features = self.fc0(word_features)
word_features_ = torch.reshape(word_features, (-1, 1, t, c))
word_features_ = torch.tile(word_features_, (1, self.max_length, 1, 1))
word_pos_feature = self.emb(gsrm_word_pos)
word_pos_feature_ = torch.reshape(
word_pos_feature, (-1, self.max_length, 1, c)
)
word_pos_feature_ = torch.tile(word_pos_feature_, (1, 1, t, 1))
y = word_pos_feature_ + word_features_
y = F.tanh(y)
attention_weight = self.fc1(y)
attention_weight = torch.reshape(
attention_weight, shape=(-1, self.max_length, t)
)
attention_weight = F.softmax(attention_weight, dim=-1)
pvam_features = torch.bmm(
attention_weight, word_features
)
return pvam_features
class GSRM(nn.Module):
def __init__(
self,
in_channels,
char_num,
max_text_length,
num_heads,
num_encoder_tus,
num_decoder_tus,
hidden_dims,
):
super(GSRM, self).__init__()
self.char_num = char_num
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_tus
self.num_decoder_TUs = num_decoder_tus
self.hidden_dims = hidden_dims
self.fc0 = nn.Linear(
in_features=in_channels, out_features=self.char_num
)
self.wrap_encoder0 = WrapEncoder(
src_vocab_size=self.char_num + 1,
max_length=self.max_length,
n_layer=self.num_decoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
self.wrap_encoder1 = WrapEncoder(
src_vocab_size=self.char_num + 1,
max_length=self.max_length,
n_layer=self.num_decoder_TUs,
n_head=self.num_heads,
d_key=int(self.hidden_dims / self.num_heads),
d_value=int(self.hidden_dims / self.num_heads),
d_model=self.hidden_dims,
d_inner_hid=self.hidden_dims,
prepostprocess_dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1,
preprocess_cmd="n",
postprocess_cmd="da",
weight_sharing=True,
)
mb0.weight
)
@staticmethod
def f(w, x):
return torch.matmul(x, w.transpose(-2, -1))
def forward(
self, inputs, gsrm_word_pos, gsrm_slf_attn_bias1, gsrm_slf_attn_bias2
):
b, t, c = inputs.shape
pvam_features = torch.reshape(inputs, (-1, c))
word_out = self.fc0(pvam_features)
word_ids = torch.argmax(F.softmax(word_out), dim=1)
word_ids = torch.reshape(word_ids, shape=(-1, t, 1))
pad_idx = self.char_num
word1 = word_ids.float()
pad1 = nn.ConstantPad1d((1, 0), value=1.0 * pad_idx)
word1 = pad1(word1.permute(0, 2, 1)).permute(0, 2, 1)
word1 = word1.long()
word1 = word1[:, :-1, :]
word2 = word_ids
enc_inputs_1 = [word1, gsrm_word_pos, gsrm_slf_attn_bias1]
enc_inputs_2 = [word2, gsrm_word_pos, gsrm_slf_attn_bias2]
gsrm_feature1 = self.wrap_encoder0(enc_inputs_1)
gsrm_feature2 = self.wrap_encoder1(enc_inputs_2)
pad = nn.ConstantPad1d((0, 1), value=0.0)
gsrm_feature2 = pad(gsrm_feature2.permute(0, 2, 1)).permute(0, 2, 1)
gsrm_feature2 = gsrm_feature2[
:,
1:,
]
gsrm_features = gsrm_feature1 + gsrm_feature2
gsrm_out = self.mul(gsrm_features)
b, t, c = gsrm_out.shape
gsrm_out = torch.reshape(gsrm_out, (-1, c))
return gsrm_features, word_out, gsrm_out
class VSFD(nn.Module):
def __init__(self, in_channels=512, pvam_ch=512, char_num=38):
super(VSFD, self).__init__()
self.char_num = char_num
self.fc0 = nn.Linear(in_features=in_channels * 2, out_features=pvam_ch)
self.fc1 = nn.Linear(in_features=pvam_ch, out_features=self.char_num)
def forward(self, pvam_feature, gsrm_feature):
b, t, c1 = pvam_feature.shape
b, t, c2 = gsrm_feature.shape
combine_feature_ = torch.cat([pvam_feature, gsrm_feature], dim=2)
img_comb_feature_ = torch.reshape(
combine_feature_, shape=(-1, c1 + c2)
)
img_comb_feature_map = self.fc0(img_comb_feature_)
img_comb_feature_map = torch.sigmoid(img_comb_feature_map)
img_comb_feature_map = torch.reshape(
img_comb_feature_map, shape=(-1, t, c1)
)
combine_feature = (
img_comb_feature_map * pvam_feature
+ (1.0 - img_comb_feature_map) * gsrm_feature
)
img_comb_feature = torch.reshape(combine_feature, shape=(-1, c1))
out = self.fc1(img_comb_feature)
return out
class SRNHead(nn.Module):
def __init__(
self,
in_channels,
out_channels,
max_text_length,
num_heads,
num_encoder_TUs,
num_decoder_TUs,
hidden_dims,
**kwargs
):
super(SRNHead, self).__init__()
self.char_num = out_channels
self.max_length = max_text_length
self.num_heads = num_heads
self.num_encoder_TUs = num_encoder_TUs
self.num_decoder_TUs = num_decoder_TUs
self.hidden_dims = hidden_dims
self.pvam = PVAM(
in_channels=in_channels,
char_num=self.char_num,
max_text_length=self.max_length,
num_heads=self.num_heads,
num_encoder_tus=self.num_encoder_TUs,
hidden_dims=self.hidden_dims,
)
self.gsrm = GSRM(
in_channels=in_channels,
char_num=self.char_num,
max_text_length=self.max_length,
num_heads=self.num_heads,
num_encoder_tus=self.num_encoder_TUs,
num_decoder_tus=self.num_decoder_TUs,
hidden_dims=self.hidden_dims,
)
self.vsfd = VSFD(in_channels=in_channels, char_num=self.char_num)
self.gsrm.wrap_encoder1.prepare_decoder.emb0 = (
self.gsrm.wrap_encoder0.prepare_decoder.emb0
)
def forward(self, inputs, others):
encoder_word_pos = others[0]
gsrm_word_pos = others[1]
gsrm_slf_attn_bias1 = others[2]
gsrm_slf_attn_bias2 = others[3]
pvam_feature = self.pvam(inputs, encoder_word_pos, gsrm_word_pos)
gsrm_feature, word_out, gsrm_out = self.gsrm(
pvam_feature,
gsrm_word_pos,
gsrm_slf_attn_bias1,
gsrm_slf_attn_bias2,
)
final_out = self.vsfd(pvam_feature, gsrm_feature)
if not self.training:
final_out = F.softmax(final_out, dim=1)
_, decoded_out = torch.topk(final_out, k=1)
predicts = OrderedDict(
[
("predict", final_out),
("pvam_feature", pvam_feature),
("decoded_out", decoded_out),
("word_out", word_out),
("gsrm_out", gsrm_out),
]
)
return predicts
| true | true |
7901d10fa0fae95fc20ad66e101aa64dab9b3697 | 5,466 | py | Python | .config/polybar/weather/weather.py | NearHuscarl/dotfiles | c984e9d53f7a32dc98bdaf8a78788466e3b396ab | [
"BSD-3-Clause"
] | 4 | 2018-08-10T19:52:16.000Z | 2020-09-10T12:01:27.000Z | .config/polybar/weather/weather.py | NearHuscarl/dotfiles | c984e9d53f7a32dc98bdaf8a78788466e3b396ab | [
"BSD-3-Clause"
] | 3 | 2021-02-08T20:24:45.000Z | 2021-04-30T20:38:17.000Z | .config/polybar/weather/weather.py | NearHuscarl/dotfiles | c984e9d53f7a32dc98bdaf8a78788466e3b396ab | [
"BSD-3-Clause"
] | 2 | 2018-03-25T09:01:35.000Z | 2020-04-26T10:12:42.000Z | #!/bin/env python
""" Module to display weather info on polybar """
# -*- coding: utf-8 -*-
import argparse
import datetime
import logging
import os
import time
import requests
import importlib
# pylint: disable=redefined-builtin
from requests import ConnectionError
from requests.exceptions import HTTPError, Timeout
from util import color_polybar, color_bash as cb
class MyInternetIsShitty(Exception):
""" Custom exception """
pass
def get_args():
""" Get script argument """
parser = argparse.ArgumentParser(description='Show current weather on polybar')
parser.add_argument('log', nargs='?', help='Logging for debugging or not')
parser.add_argument('-u', '--unit', default='metric', nargs='?',
help='unit: metric or imperial. Default: metric')
return parser.parse_args()
def set_up_logging():
""" Set some logging parameter """
if importlib.util.find_spec('requests'):
# Shut up the request module logger
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.DEBUG)
def get_day_or_night():
""" return 'day' or 'night' based on current hour """
hour = int(datetime.datetime.now().strftime('%H'))
if hour >= 18 or hour <= 5:
return 'night'
return 'day'
def get_weather_icon(weather_id):
""" Get weather icon based on weather condition """
day_night_status = get_day_or_night()
weather = {
'thunderstorm': 200 <= weather_id <= 232,
'rain': 300 <= weather_id <= 531,
'snow': 600 <= weather_id <= 622,
'atmosphere': 701 <= weather_id <= 781,
'squall': weather_id == 771,
'tornado': weather_id == 781 or weather_id == 900,
'clear_day': weather_id == 800 and day_night_status == 'day',
'clear_night': weather_id == 800 and day_night_status == 'night',
'tropical storm': weather_id == 901,
'hurricane': weather_id == 902,
'cold': weather_id == 903,
'hot': weather_id == 904,
'windy': weather_id == 905,
'cloudy': 801 <= weather_id <= 804,
'hail': weather_id == 906
}
if weather['thunderstorm']:
return ''
elif weather['rain']:
return ''
elif weather['snow'] or weather['cold']:
return ''
elif weather['atmosphere'] or weather['windy']:
return ''
elif (weather['squall'] or
weather['tornado'] or
weather['tropical storm'] or
weather['hurricane']):
return ''
elif weather['clear_day'] or weather['hot']:
return ''
elif weather['clear_night']:
return ''
elif weather['cloudy']:
return ''
elif weather['hail']:
return ''
def get_thermo_icon(temp_value, temp_unit):
""" Get thermometer icon based on temperature """
if temp_unit == 'F':
temp_value = convert_temp_unit(temp_unit, 'C')
if temp_value <= -15:
return ''
elif -15 < temp_value <= 0:
return ''
elif 0 < temp_value <= 15:
return ''
elif 15 < temp_value <= 30:
return ''
elif temp_value > 30:
return ''
def convert_temp_unit(temp_value, temp_unit):
""" Convert current temp_value to temp_unit """
if temp_unit == 'C':
return round((temp_value - 32) / 1.8)
elif temp_unit == 'F':
return round(temp_value * 1.8 + 32)
def get_api_key():
""" Get secret api key from a file on filesystem """
paren_dir = os.path.dirname(os.path.realpath(__file__))
api_path = os.path.join(paren_dir, 'weather_api.txt')
with open(api_path, 'r') as file:
api_key = file.read().replace('\n', '')
return api_key
def get_city_id():
""" Workaround to get city id based on my schedule """
region_code = {
'TPHCM': 1580578,
'TPHCM2': 1566083,
'Hai Duong': 1581326,
'Tan An': 1567069
}
hour = int(datetime.datetime.now().strftime('%H'))
weekday = datetime.datetime.now().strftime('%a')
# 5pm Fri to 5pm Sun: Tan An, else Hai Duong
if (hour >= 17 and weekday == 'Fri') or weekday == 'Sat' or (hour < 17 and weekday == 'Sun'):
return region_code['Tan An']
return region_code['Hai Duong']
def update_weather(city_id, units, api_key):
""" Update weather by using openweather api """
url = 'http://api.openweathermap.org/data/2.5/weather?id={}&appid={}&units={}'
temp_unit = 'C' if units == 'metric' else 'K'
error_icon = color_polybar('', 'red')
try:
req = requests.get(url.format(city_id, api_key, units))
try:
description = req.json()['weather'][0]['description'].capitalize()
except ValueError:
print(error_icon, flush=True)
raise MyInternetIsShitty
temp_value = round(req.json()['main']['temp'])
temp = str(temp_value) + '°' + temp_unit
thermo_icon = color_polybar(get_thermo_icon(temp_value, units), 'main')
weather_id = req.json()['weather'][0]['id']
weather_icon = color_polybar(get_weather_icon(weather_id), 'main')
print('{} {} {} {}'.format(weather_icon, description, thermo_icon, temp), flush=True)
except (HTTPError, Timeout, ConnectionError):
print(error_icon, flush=True)
raise MyInternetIsShitty
def main():
""" main function """
arg = get_args()
if arg.log == 'debug':
set_up_logging()
units = arg.unit
api_key = get_api_key()
city_id = get_city_id()
while True:
try:
update_weather(city_id, units, api_key)
except MyInternetIsShitty:
logging.info(cb('update failed: ', 'red'))
time.sleep(3)
else:
logging.info(cb('update success', 'green'))
time.sleep(700)
if __name__ == '__main__':
main()
# vim: nofoldenable
| 27.059406 | 94 | 0.662642 |
import argparse
import datetime
import logging
import os
import time
import requests
import importlib
from requests import ConnectionError
from requests.exceptions import HTTPError, Timeout
from util import color_polybar, color_bash as cb
class MyInternetIsShitty(Exception):
pass
def get_args():
parser = argparse.ArgumentParser(description='Show current weather on polybar')
parser.add_argument('log', nargs='?', help='Logging for debugging or not')
parser.add_argument('-u', '--unit', default='metric', nargs='?',
help='unit: metric or imperial. Default: metric')
return parser.parse_args()
def set_up_logging():
if importlib.util.find_spec('requests'):
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.DEBUG)
def get_day_or_night():
hour = int(datetime.datetime.now().strftime('%H'))
if hour >= 18 or hour <= 5:
return 'night'
return 'day'
def get_weather_icon(weather_id):
day_night_status = get_day_or_night()
weather = {
'thunderstorm': 200 <= weather_id <= 232,
'rain': 300 <= weather_id <= 531,
'snow': 600 <= weather_id <= 622,
'atmosphere': 701 <= weather_id <= 781,
'squall': weather_id == 771,
'tornado': weather_id == 781 or weather_id == 900,
'clear_day': weather_id == 800 and day_night_status == 'day',
'clear_night': weather_id == 800 and day_night_status == 'night',
'tropical storm': weather_id == 901,
'hurricane': weather_id == 902,
'cold': weather_id == 903,
'hot': weather_id == 904,
'windy': weather_id == 905,
'cloudy': 801 <= weather_id <= 804,
'hail': weather_id == 906
}
if weather['thunderstorm']:
return ''
elif weather['rain']:
return ''
elif weather['snow'] or weather['cold']:
return ''
elif weather['atmosphere'] or weather['windy']:
return ''
elif (weather['squall'] or
weather['tornado'] or
weather['tropical storm'] or
weather['hurricane']):
return ''
elif weather['clear_day'] or weather['hot']:
return ''
elif weather['clear_night']:
return ''
elif weather['cloudy']:
return ''
elif weather['hail']:
return ''
def get_thermo_icon(temp_value, temp_unit):
if temp_unit == 'F':
temp_value = convert_temp_unit(temp_unit, 'C')
if temp_value <= -15:
return ''
elif -15 < temp_value <= 0:
return ''
elif 0 < temp_value <= 15:
return ''
elif 15 < temp_value <= 30:
return ''
elif temp_value > 30:
return ''
def convert_temp_unit(temp_value, temp_unit):
if temp_unit == 'C':
return round((temp_value - 32) / 1.8)
elif temp_unit == 'F':
return round(temp_value * 1.8 + 32)
def get_api_key():
paren_dir = os.path.dirname(os.path.realpath(__file__))
api_path = os.path.join(paren_dir, 'weather_api.txt')
with open(api_path, 'r') as file:
api_key = file.read().replace('\n', '')
return api_key
def get_city_id():
region_code = {
'TPHCM': 1580578,
'TPHCM2': 1566083,
'Hai Duong': 1581326,
'Tan An': 1567069
}
hour = int(datetime.datetime.now().strftime('%H'))
weekday = datetime.datetime.now().strftime('%a')
if (hour >= 17 and weekday == 'Fri') or weekday == 'Sat' or (hour < 17 and weekday == 'Sun'):
return region_code['Tan An']
return region_code['Hai Duong']
def update_weather(city_id, units, api_key):
url = 'http://api.openweathermap.org/data/2.5/weather?id={}&appid={}&units={}'
temp_unit = 'C' if units == 'metric' else 'K'
error_icon = color_polybar('', 'red')
try:
req = requests.get(url.format(city_id, api_key, units))
try:
description = req.json()['weather'][0]['description'].capitalize()
except ValueError:
print(error_icon, flush=True)
raise MyInternetIsShitty
temp_value = round(req.json()['main']['temp'])
temp = str(temp_value) + '°' + temp_unit
thermo_icon = color_polybar(get_thermo_icon(temp_value, units), 'main')
weather_id = req.json()['weather'][0]['id']
weather_icon = color_polybar(get_weather_icon(weather_id), 'main')
print('{} {} {} {}'.format(weather_icon, description, thermo_icon, temp), flush=True)
except (HTTPError, Timeout, ConnectionError):
print(error_icon, flush=True)
raise MyInternetIsShitty
def main():
arg = get_args()
if arg.log == 'debug':
set_up_logging()
units = arg.unit
api_key = get_api_key()
city_id = get_city_id()
while True:
try:
update_weather(city_id, units, api_key)
except MyInternetIsShitty:
logging.info(cb('update failed: ', 'red'))
time.sleep(3)
else:
logging.info(cb('update success', 'green'))
time.sleep(700)
if __name__ == '__main__':
main()
| true | true |
7901d205c7f7e4d05a3a0cb476882753468eb403 | 14,065 | py | Python | UI_util.py | JiazeWang/lung_nodule_integ_viewer | fc996644d96c8f7fac631345539ce3ca01af7013 | [
"MIT"
] | null | null | null | UI_util.py | JiazeWang/lung_nodule_integ_viewer | fc996644d96c8f7fac631345539ce3ca01af7013 | [
"MIT"
] | null | null | null | UI_util.py | JiazeWang/lung_nodule_integ_viewer | fc996644d96c8f7fac631345539ce3ca01af7013 | [
"MIT"
] | null | null | null | import SimpleITK as sitk
import numpy as np
import torch
import math
import time
import sys
import cv2
from scipy.ndimage.interpolation import zoom
from torch.autograd import Variable
sys.path.append('../lung_nodule_detector')
from training.layers import nms
def load_itk_image(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any(transformM != np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing, isflip
def lumTrans(img):
lungwin = np.array([-1200.,600.])
newimg = (img-lungwin[0])/(lungwin[1]-lungwin[0])
newimg[newimg<0]=0
newimg[newimg>1]=1
newimg = (newimg*255).astype('uint8')
return newimg
def resample(imgs, spacing, new_spacing, progressBar, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
progressBar.setValue(40)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def resample_v1(imgs, spacing, new_spacing, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def split_data(data, stride, split_comber):
print (data.shape[1:])
nz, nh, nw = data.shape[1:]
pz = int(np.ceil(float(nz) / stride)) * stride
ph = int(np.ceil(float(nh) / stride)) * stride
pw = int(np.ceil(float(nw) / stride)) * stride
data = np.pad(data, [[0, 0], [0, pz - nz], [0, ph - nh], [0, pw - nw]], 'constant', constant_values=0)
xx, yy, zz = np.meshgrid(np.linspace(-0.5, 0.5, data.shape[1] / stride),
np.linspace(-0.5, 0.5, data.shape[2] / stride),
np.linspace(-0.5, 0.5, data.shape[3] / stride), indexing='ij')
coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32')
data, nzhw = split_comber.split(data)
coord2, nzhw2 = split_comber.split(coord,
side_len=split_comber.side_len / stride,
max_stride=split_comber.max_stride / stride,
margin=split_comber.margin / stride)
assert np.all(nzhw == nzhw2)
data = (data.astype(np.float32) - 128) / 128
return torch.from_numpy(data), torch.from_numpy(coord2), np.array(nzhw)
def convert_prob(pbb):
for label in pbb:
pos_ori = label[1:4]
radious_ori = label[4]
#pos_ori = pos_ori + extendbox[:, 0]
label[1:4] = pos_ori
label[4] = radious_ori
label[0] = sigmoid(label[0])
return pbb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def predict_nodule(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb, progressBar):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
progressBar.setValue(10 + (80/len(splitlist) * (i+1)))
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
# fps 1.215909091, sens 0.933333333, thres 0.371853054
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
# check overlap under 3mm
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
# print (name)
# print (lbb)
world_pbb = convert_prob(pbb_cand_list_nms)
# print (world_pbb)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def predict_nodule_v1(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
# fps 1.215909091, sens 0.933333333, thres 0.371853054
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
# check overlap under 3mm
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
# print (name)
# print (lbb)
world_pbb = convert_prob(pbb_cand_list_nms)
# print (world_pbb)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def draw_nodule_rect(lbb, world_pbb, img_arr):
for i in range(len(lbb)):
label = lbb[i]
# label = np.ceil(label)
r = (label[3] / 2) * 1.3
top_left = (max(int(math.ceil(label[2] - r)), 0),
max(int(math.ceil(label[1] - r)), 0))
bottom_right = (min(int(math.ceil(label[2] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(label[1] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(label[0] - r)), 0),
min(int(math.ceil(label[0] + r)), np.shape(img_arr)[0])]
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (0, 255, 0), 1)
for i in range(len(world_pbb)):
candidate = world_pbb[i]
r = (candidate[4] / 2) * 1.3
top_left = (max(int(math.ceil(candidate[3] - r)), 0),
max(int(math.ceil(candidate[2] - r)), 0))
text_top_left = (max(int(math.ceil(candidate[3] - r)) - 1, 0),
max(int(math.ceil(candidate[2] - r)) - 1, 0))
bottom_right = (min(int(math.ceil(candidate[3] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(candidate[2] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(candidate[1] - r)), 0),
min(int(math.ceil(candidate[1] + r)), np.shape(img_arr)[0])]
font = cv2.FONT_HERSHEY_SIMPLEX
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (255, 0, 0), 1)
#cv2.putText(img_arr[j], "c" + str(i) + "_" +str(round(candidate[0], 2)), top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
cv2.putText(img_arr[j], "c" + str(i), text_top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
def crop_all(target, img_arr, crop_size = 48):
target = np.copy(target)
start = []
for i in range(3):
start.append(int(round(target[i])) - int(crop_size / 2))
pad = []
pad.append([0, 0])
for i in range(3):
leftpad = max(0, -start[i])
rightpad = max(0, start[i] + crop_size - img_arr.shape[i + 1])
pad.append([leftpad, rightpad])
crop = img_arr[:,
max(start[0], 0):min(start[0] + crop_size, img_arr.shape[1]),
max(start[1], 0):min(start[1] + crop_size, img_arr.shape[2]),
max(start[2], 0):min(start[2] + crop_size, img_arr.shape[3])]
crop = np.pad(crop, pad, 'constant', constant_values=0)
for i in range(3):
target[i] = target[i] - start[i]
return crop, target
def crop_nodule_arr_2ch(target, img_arr, crop_size = 48):
img_size = [crop_size, crop_size, crop_size]
crop_img, target = crop_all(target, img_arr, crop_size)
imgs = np.squeeze(crop_img, axis=0)
z = int(target[0])
y = int(target[1])
x = int(target[2])
print (z, y, x)
# z = 24
# y = 24
# x = 24
nodule_size = int(target[3])
margin = max(7, nodule_size * 0.4)
radius = int((nodule_size + margin) / 2)
s_z_pad = 0
e_z_pad = 0
s_y_pad = 0
e_y_pad = 0
s_x_pad = 0
e_x_pad = 0
s_z = max(0, z - radius)
if (s_z == 0):
s_z_pad = -(z - radius)
e_z = min(np.shape(imgs)[0], z + radius)
if (e_z == np.shape(imgs)[0]):
e_z_pad = (z + radius) - np.shape(imgs)[0]
s_y = max(0, y - radius)
if (s_y == 0):
s_y_pad = -(y - radius)
e_y = min(np.shape(imgs)[1], y + radius)
if (e_y == np.shape(imgs)[1]):
e_y_pad = (y + radius) - np.shape(imgs)[1]
s_x = max(0, x - radius)
if (s_x == 0):
s_x_pad = -(x - radius)
e_x = min(np.shape(imgs)[2], x + radius)
if (e_x == np.shape(imgs)[2]):
e_x_pad = (x + radius) - np.shape(imgs)[2]
# print (s_x, e_x, s_y, e_y, s_z, e_z)
# print (np.shape(img_arr[s_z:e_z, s_y:e_y, s_x:e_x]))
nodule_img = imgs[s_z:e_z, s_y:e_y, s_x:e_x]
nodule_img = np.pad(nodule_img, [[s_z_pad, e_z_pad], [s_y_pad, e_y_pad], [s_x_pad, e_x_pad]], 'constant',
constant_values=0)
imgpad_size = [img_size[0] - np.shape(nodule_img)[0],
img_size[1] - np.shape(nodule_img)[1],
img_size[2] - np.shape(nodule_img)[2]]
imgpad = []
imgpad_left = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
imgpad_right = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
for i in range(3):
if (imgpad_size[i] % 2 != 0):
rand = np.random.randint(2)
if rand == 0:
imgpad.append([imgpad_left[i], imgpad_right[i] + 1])
else:
imgpad.append([imgpad_left[i] + 1, imgpad_right[i]])
else:
imgpad.append([imgpad_left[i], imgpad_right[i]])
padding_crop = np.pad(nodule_img, imgpad, 'constant', constant_values=0)
padding_crop = np.expand_dims(padding_crop, axis=0)
crop = np.concatenate((padding_crop, crop_img))
crop = (crop.astype(np.float32) - 128) / 128
return torch.from_numpy(crop), crop
def predict_attribute(attribute_net, crop_img):
attribute_net.eval()
with torch.no_grad():
crop_img = Variable(crop_img.cuda(async=True))
output = attribute_net(crop_img)
return output
| 34.55774 | 135 | 0.572129 | import SimpleITK as sitk
import numpy as np
import torch
import math
import time
import sys
import cv2
from scipy.ndimage.interpolation import zoom
from torch.autograd import Variable
sys.path.append('../lung_nodule_detector')
from training.layers import nms
def load_itk_image(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any(transformM != np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing, isflip
def lumTrans(img):
lungwin = np.array([-1200.,600.])
newimg = (img-lungwin[0])/(lungwin[1]-lungwin[0])
newimg[newimg<0]=0
newimg[newimg>1]=1
newimg = (newimg*255).astype('uint8')
return newimg
def resample(imgs, spacing, new_spacing, progressBar, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
progressBar.setValue(40)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def resample_v1(imgs, spacing, new_spacing, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def split_data(data, stride, split_comber):
print (data.shape[1:])
nz, nh, nw = data.shape[1:]
pz = int(np.ceil(float(nz) / stride)) * stride
ph = int(np.ceil(float(nh) / stride)) * stride
pw = int(np.ceil(float(nw) / stride)) * stride
data = np.pad(data, [[0, 0], [0, pz - nz], [0, ph - nh], [0, pw - nw]], 'constant', constant_values=0)
xx, yy, zz = np.meshgrid(np.linspace(-0.5, 0.5, data.shape[1] / stride),
np.linspace(-0.5, 0.5, data.shape[2] / stride),
np.linspace(-0.5, 0.5, data.shape[3] / stride), indexing='ij')
coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32')
data, nzhw = split_comber.split(data)
coord2, nzhw2 = split_comber.split(coord,
side_len=split_comber.side_len / stride,
max_stride=split_comber.max_stride / stride,
margin=split_comber.margin / stride)
assert np.all(nzhw == nzhw2)
data = (data.astype(np.float32) - 128) / 128
return torch.from_numpy(data), torch.from_numpy(coord2), np.array(nzhw)
def convert_prob(pbb):
for label in pbb:
pos_ori = label[1:4]
radious_ori = label[4]
label[1:4] = pos_ori
label[4] = radious_ori
label[0] = sigmoid(label[0])
return pbb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def predict_nodule(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb, progressBar):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
progressBar.setValue(10 + (80/len(splitlist) * (i+1)))
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
world_pbb = convert_prob(pbb_cand_list_nms)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def predict_nodule_v1(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
world_pbb = convert_prob(pbb_cand_list_nms)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def draw_nodule_rect(lbb, world_pbb, img_arr):
for i in range(len(lbb)):
label = lbb[i]
r = (label[3] / 2) * 1.3
top_left = (max(int(math.ceil(label[2] - r)), 0),
max(int(math.ceil(label[1] - r)), 0))
bottom_right = (min(int(math.ceil(label[2] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(label[1] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(label[0] - r)), 0),
min(int(math.ceil(label[0] + r)), np.shape(img_arr)[0])]
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (0, 255, 0), 1)
for i in range(len(world_pbb)):
candidate = world_pbb[i]
r = (candidate[4] / 2) * 1.3
top_left = (max(int(math.ceil(candidate[3] - r)), 0),
max(int(math.ceil(candidate[2] - r)), 0))
text_top_left = (max(int(math.ceil(candidate[3] - r)) - 1, 0),
max(int(math.ceil(candidate[2] - r)) - 1, 0))
bottom_right = (min(int(math.ceil(candidate[3] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(candidate[2] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(candidate[1] - r)), 0),
min(int(math.ceil(candidate[1] + r)), np.shape(img_arr)[0])]
font = cv2.FONT_HERSHEY_SIMPLEX
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (255, 0, 0), 1)
cv2.putText(img_arr[j], "c" + str(i), text_top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
def crop_all(target, img_arr, crop_size = 48):
target = np.copy(target)
start = []
for i in range(3):
start.append(int(round(target[i])) - int(crop_size / 2))
pad = []
pad.append([0, 0])
for i in range(3):
leftpad = max(0, -start[i])
rightpad = max(0, start[i] + crop_size - img_arr.shape[i + 1])
pad.append([leftpad, rightpad])
crop = img_arr[:,
max(start[0], 0):min(start[0] + crop_size, img_arr.shape[1]),
max(start[1], 0):min(start[1] + crop_size, img_arr.shape[2]),
max(start[2], 0):min(start[2] + crop_size, img_arr.shape[3])]
crop = np.pad(crop, pad, 'constant', constant_values=0)
for i in range(3):
target[i] = target[i] - start[i]
return crop, target
def crop_nodule_arr_2ch(target, img_arr, crop_size = 48):
img_size = [crop_size, crop_size, crop_size]
crop_img, target = crop_all(target, img_arr, crop_size)
imgs = np.squeeze(crop_img, axis=0)
z = int(target[0])
y = int(target[1])
x = int(target[2])
print (z, y, x)
nodule_size = int(target[3])
margin = max(7, nodule_size * 0.4)
radius = int((nodule_size + margin) / 2)
s_z_pad = 0
e_z_pad = 0
s_y_pad = 0
e_y_pad = 0
s_x_pad = 0
e_x_pad = 0
s_z = max(0, z - radius)
if (s_z == 0):
s_z_pad = -(z - radius)
e_z = min(np.shape(imgs)[0], z + radius)
if (e_z == np.shape(imgs)[0]):
e_z_pad = (z + radius) - np.shape(imgs)[0]
s_y = max(0, y - radius)
if (s_y == 0):
s_y_pad = -(y - radius)
e_y = min(np.shape(imgs)[1], y + radius)
if (e_y == np.shape(imgs)[1]):
e_y_pad = (y + radius) - np.shape(imgs)[1]
s_x = max(0, x - radius)
if (s_x == 0):
s_x_pad = -(x - radius)
e_x = min(np.shape(imgs)[2], x + radius)
if (e_x == np.shape(imgs)[2]):
e_x_pad = (x + radius) - np.shape(imgs)[2]
nodule_img = imgs[s_z:e_z, s_y:e_y, s_x:e_x]
nodule_img = np.pad(nodule_img, [[s_z_pad, e_z_pad], [s_y_pad, e_y_pad], [s_x_pad, e_x_pad]], 'constant',
constant_values=0)
imgpad_size = [img_size[0] - np.shape(nodule_img)[0],
img_size[1] - np.shape(nodule_img)[1],
img_size[2] - np.shape(nodule_img)[2]]
imgpad = []
imgpad_left = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
imgpad_right = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
for i in range(3):
if (imgpad_size[i] % 2 != 0):
rand = np.random.randint(2)
if rand == 0:
imgpad.append([imgpad_left[i], imgpad_right[i] + 1])
else:
imgpad.append([imgpad_left[i] + 1, imgpad_right[i]])
else:
imgpad.append([imgpad_left[i], imgpad_right[i]])
padding_crop = np.pad(nodule_img, imgpad, 'constant', constant_values=0)
padding_crop = np.expand_dims(padding_crop, axis=0)
crop = np.concatenate((padding_crop, crop_img))
crop = (crop.astype(np.float32) - 128) / 128
return torch.from_numpy(crop), crop
def predict_attribute(attribute_net, crop_img):
attribute_net.eval()
with torch.no_grad():
crop_img = Variable(crop_img.cuda(async=True))
output = attribute_net(crop_img)
return output
| false | true |
7901d2c50ce44e216f493b22e5238ee4b6c3f3d1 | 2,222 | py | Python | utils.py | glee1228/segment_temporal_context_aggregation | e5778f848f1cfd89bd1f77beb5e1b38a66a2f13d | [
"Apache-2.0"
] | 1 | 2022-03-15T18:17:45.000Z | 2022-03-15T18:17:45.000Z | utils.py | glee1228/segment_temporal_context_aggregation | e5778f848f1cfd89bd1f77beb5e1b38a66a2f13d | [
"Apache-2.0"
] | null | null | null | utils.py | glee1228/segment_temporal_context_aggregation | e5778f848f1cfd89bd1f77beb5e1b38a66a2f13d | [
"Apache-2.0"
] | null | null | null | import io
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False):
"""Truncates or pads a tensor to new_size on on a given axis.
Truncate or extend tensor such that tensor.shape[axis] == new_size. If the
size increases, the padding will be performed at the end, using fill_value.
Args:
tensor: The tensor to be resized.
axis: An integer representing the dimension to be sliced.
new_size: An integer or 0d tensor representing the new value for
tensor.shape[axis].
fill_value: Value to use to fill any new entries in the tensor. Will be
cast to the type of tensor.
Returns:
The resized tensor.
"""
tensor = torch.Tensor(tensor)
shape = list(tensor.shape)
pad_shape = shape[:]
pad_shape[axis] = max(0, new_size - shape[axis])
start = 0 if shape[axis] <= new_size else np.random.randint(
shape[axis] - new_size) # random clip
old_length = shape[axis]
shape[axis] = min(shape[axis], new_size)
resized = torch.cat([
torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,))
) if start > 0 and random_sampling else torch.narrow(tensor, dim=axis, start=start, length=shape[axis]),
torch.Tensor(*pad_shape).fill_(fill_value)
], dim=axis)
return resized
class CircleLoss(torch.nn.Module):
def __init__(self, m=0.25, gamma=256):
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, logits, labels):
alpha = torch.clamp_min(logits + self.m, min=0).detach() # an
alpha[labels] = torch.clamp_min(-logits[labels] + 1 + self.m, min=0).detach() # ap
delta = torch.ones_like(logits, device=logits.device, dtype=logits.dtype) * self.m # delta_n
delta[labels] = 1 - self.m # delta_p
return self.loss(alpha * (logits - delta) * self.gamma, labels) | 37.033333 | 131 | 0.633663 | import io
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
def resize_axis(tensor, axis, new_size, fill_value=0, random_sampling=False):
tensor = torch.Tensor(tensor)
shape = list(tensor.shape)
pad_shape = shape[:]
pad_shape[axis] = max(0, new_size - shape[axis])
start = 0 if shape[axis] <= new_size else np.random.randint(
shape[axis] - new_size)
old_length = shape[axis]
shape[axis] = min(shape[axis], new_size)
resized = torch.cat([
torch.index_select(tensor, dim=axis, index=torch.randint(old_length, (new_size,))
) if start > 0 and random_sampling else torch.narrow(tensor, dim=axis, start=start, length=shape[axis]),
torch.Tensor(*pad_shape).fill_(fill_value)
], dim=axis)
return resized
class CircleLoss(torch.nn.Module):
def __init__(self, m=0.25, gamma=256):
super(CircleLoss, self).__init__()
self.m = m
self.gamma = gamma
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, logits, labels):
alpha = torch.clamp_min(logits + self.m, min=0).detach()
alpha[labels] = torch.clamp_min(-logits[labels] + 1 + self.m, min=0).detach()
delta = torch.ones_like(logits, device=logits.device, dtype=logits.dtype) * self.m
delta[labels] = 1 - self.m
return self.loss(alpha * (logits - delta) * self.gamma, labels) | true | true |
7901d3136560dfc43f1dec71eefcb769f082b0a4 | 2,373 | py | Python | trim/montecarlo/options/source.py | drix00/pytrim-montecarlo | 415d4dc5bcbbc6d4206bbfe3fed6c4978a65b926 | [
"Apache-2.0"
] | null | null | null | trim/montecarlo/options/source.py | drix00/pytrim-montecarlo | 415d4dc5bcbbc6d4206bbfe3fed6c4978a65b926 | [
"Apache-2.0"
] | 16 | 2019-09-05T21:25:48.000Z | 2019-09-08T22:05:20.000Z | trim/montecarlo/options/source.py | drix00/pytrim-montecarlo | 415d4dc5bcbbc6d4206bbfe3fed6c4978a65b926 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. py:currentmodule:: trim.montecarlo.source
.. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca>
"""
# Copyright 2019 Hendrix Demers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Standard library modules.
# Third party modules.
# Local modules.
# Project modules.
from trim.montecarlo.math import Point
# Globals and constants variables.
GROUP_SOURCE = "source"
GROUP_POSITIONS = "position (nm)"
GROUP_DIRECTION = "direction"
ATTRIBUTE_KINETIC_ENERGY = "kinetic energy (keV)"
ATTRIBUTE_MASS = "mass (amu)"
ATTRIBUTE_ATOMIC_NUMBER = "atomic number"
class Source:
def __init__(self):
# Default to Ar at 6 keV
self.position_nm = Point(0.0, 0.0, 0.0)
self.direction = Point(0.0, 0.0, -1.0)
self.kinetic_energy_keV = 6.0
self.mass_amu = 39.962
self.atomic_number = 18
def write(self, parent):
group = parent.require_group(GROUP_SOURCE)
position_group = group.require_group(GROUP_POSITIONS)
self.position_nm.write(position_group)
direction_group = group.require_group(GROUP_DIRECTION)
self.direction.write(direction_group)
group.attrs[ATTRIBUTE_KINETIC_ENERGY] = self.kinetic_energy_keV
group.attrs[ATTRIBUTE_MASS] = self.mass_amu
group.attrs[ATTRIBUTE_ATOMIC_NUMBER] = self.atomic_number
def read(self, parent):
group = parent.require_group(GROUP_SOURCE)
position_group = group.require_group(GROUP_POSITIONS)
self.position_nm.read(position_group)
direction_group = group.require_group(GROUP_DIRECTION)
self.direction.read(direction_group)
self.kinetic_energy_keV = group.attrs[ATTRIBUTE_KINETIC_ENERGY]
self.mass_amu = group.attrs[ATTRIBUTE_MASS]
self.atomic_number = group.attrs[ATTRIBUTE_ATOMIC_NUMBER]
| 30.423077 | 74 | 0.719343 |
from trim.montecarlo.math import Point
GROUP_SOURCE = "source"
GROUP_POSITIONS = "position (nm)"
GROUP_DIRECTION = "direction"
ATTRIBUTE_KINETIC_ENERGY = "kinetic energy (keV)"
ATTRIBUTE_MASS = "mass (amu)"
ATTRIBUTE_ATOMIC_NUMBER = "atomic number"
class Source:
def __init__(self):
self.position_nm = Point(0.0, 0.0, 0.0)
self.direction = Point(0.0, 0.0, -1.0)
self.kinetic_energy_keV = 6.0
self.mass_amu = 39.962
self.atomic_number = 18
def write(self, parent):
group = parent.require_group(GROUP_SOURCE)
position_group = group.require_group(GROUP_POSITIONS)
self.position_nm.write(position_group)
direction_group = group.require_group(GROUP_DIRECTION)
self.direction.write(direction_group)
group.attrs[ATTRIBUTE_KINETIC_ENERGY] = self.kinetic_energy_keV
group.attrs[ATTRIBUTE_MASS] = self.mass_amu
group.attrs[ATTRIBUTE_ATOMIC_NUMBER] = self.atomic_number
def read(self, parent):
group = parent.require_group(GROUP_SOURCE)
position_group = group.require_group(GROUP_POSITIONS)
self.position_nm.read(position_group)
direction_group = group.require_group(GROUP_DIRECTION)
self.direction.read(direction_group)
self.kinetic_energy_keV = group.attrs[ATTRIBUTE_KINETIC_ENERGY]
self.mass_amu = group.attrs[ATTRIBUTE_MASS]
self.atomic_number = group.attrs[ATTRIBUTE_ATOMIC_NUMBER]
| true | true |
7901d3289ac21da3c40fb0353d39ac702827912e | 4,270 | py | Python | datadog_checks_dev/datadog_checks/dev/tooling/create.py | zaquaz/integrations-core | a7d41ac9b573ae52f13f265d11dc5050885aa62b | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_dev/datadog_checks/dev/tooling/create.py | zaquaz/integrations-core | a7d41ac9b573ae52f13f265d11dc5050885aa62b | [
"BSD-3-Clause"
] | null | null | null | datadog_checks_dev/datadog_checks/dev/tooling/create.py | zaquaz/integrations-core | a7d41ac9b573ae52f13f265d11dc5050885aa62b | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import re
from datetime import datetime
from uuid import uuid4
from .utils import normalize_package_name
from ..utils import (
create_file,
dir_exists,
ensure_parent_dir_exists,
path_join,
read_file,
read_file_binary,
write_file,
write_file_binary
)
TEMPLATES_DIR = path_join(os.path.dirname(os.path.abspath(__file__)), 'templates')
BINARY_EXTENSIONS = ('.png', )
SIMPLE_NAME = r'^\w+$'
def get_valid_templates():
return sorted(os.listdir(TEMPLATES_DIR))
def construct_template_fields(integration_name, repo_choice, **kwargs):
normalized_integration_name = normalize_package_name(integration_name)
check_name_cap = (
integration_name.capitalize()
if re.match(SIMPLE_NAME, integration_name)
else integration_name
)
if repo_choice == 'core':
author = 'Datadog'
email = 'help@datadoghq.com'
email_packages = 'packages@datadoghq.com'
install_info = (
'The {check_name_cap} check is included in the [Datadog Agent][2] package, so you do not\n'
'need to install anything else on your server.'.format(check_name_cap=check_name_cap)
)
license_header = (
'# (C) Datadog, Inc. {year}\n'
'# All rights reserved\n'
'# Licensed under a 3-clause BSD style license (see LICENSE)\n'
.format(year=str(datetime.now().year))
)
support_type = 'core'
tox_base_dep = '../datadog_checks_base[deps]'
else:
author = 'U.N. Owen'
email = email_packages = 'friend@datadog.community'
install_info = (
'The {} check is not included in the [Datadog Agent][2] package, so you will\n'
'need to install it yourself.'.format(check_name_cap)
)
license_header = ''
support_type = 'contrib'
tox_base_dep = 'datadog-checks-base[deps]'
config = {
'author': author,
'check_class': '{}Check'.format(
''.join(part.capitalize() for part in normalized_integration_name.split('_'))
),
'check_name': normalized_integration_name,
'check_name_cap': check_name_cap,
'email': email,
'email_packages': email_packages,
'guid': uuid4(),
'license_header': license_header,
'install_info': install_info,
'repo_choice': repo_choice,
'support_type': support_type,
'tox_base_dep': tox_base_dep,
}
config.update(kwargs)
return config
def create_template_files(template_name, new_root, config, read=False):
files = []
template_root = path_join(TEMPLATES_DIR, template_name)
if not dir_exists(template_root):
return files
for root, _, template_files in os.walk(template_root):
for template_file in template_files:
template_path = path_join(root, template_file)
file_path = template_path.replace(template_root, '')
file_path = '{}{}'.format(new_root, file_path.format(**config))
files.append(
File(
file_path,
template_path,
config,
read=read
)
)
return files
class File(object):
def __init__(self, file_path, template_path, config, read=False):
self.file_path = file_path
self.template_path = template_path
self.config = config
self.binary = template_path.endswith(BINARY_EXTENSIONS)
self._read = read_file_binary if self.binary else read_file
self._write = write_file_binary if self.binary else write_file
self.contents = None
if read:
self.read()
def read(self):
contents = self._read(self.template_path)
if self.binary:
self.contents = contents
else:
self.contents = contents.format(**self.config)
def write(self):
if self.contents is None:
create_file(self.file_path)
else:
ensure_parent_dir_exists(self.file_path)
self._write(self.file_path, self.contents)
| 30.719424 | 103 | 0.622951 |
import os
import re
from datetime import datetime
from uuid import uuid4
from .utils import normalize_package_name
from ..utils import (
create_file,
dir_exists,
ensure_parent_dir_exists,
path_join,
read_file,
read_file_binary,
write_file,
write_file_binary
)
TEMPLATES_DIR = path_join(os.path.dirname(os.path.abspath(__file__)), 'templates')
BINARY_EXTENSIONS = ('.png', )
SIMPLE_NAME = r'^\w+$'
def get_valid_templates():
return sorted(os.listdir(TEMPLATES_DIR))
def construct_template_fields(integration_name, repo_choice, **kwargs):
normalized_integration_name = normalize_package_name(integration_name)
check_name_cap = (
integration_name.capitalize()
if re.match(SIMPLE_NAME, integration_name)
else integration_name
)
if repo_choice == 'core':
author = 'Datadog'
email = 'help@datadoghq.com'
email_packages = 'packages@datadoghq.com'
install_info = (
'The {check_name_cap} check is included in the [Datadog Agent][2] package, so you do not\n'
'need to install anything else on your server.'.format(check_name_cap=check_name_cap)
)
license_header = (
'# (C) Datadog, Inc. {year}\n'
'# All rights reserved\n'
'# Licensed under a 3-clause BSD style license (see LICENSE)\n'
.format(year=str(datetime.now().year))
)
support_type = 'core'
tox_base_dep = '../datadog_checks_base[deps]'
else:
author = 'U.N. Owen'
email = email_packages = 'friend@datadog.community'
install_info = (
'The {} check is not included in the [Datadog Agent][2] package, so you will\n'
'need to install it yourself.'.format(check_name_cap)
)
license_header = ''
support_type = 'contrib'
tox_base_dep = 'datadog-checks-base[deps]'
config = {
'author': author,
'check_class': '{}Check'.format(
''.join(part.capitalize() for part in normalized_integration_name.split('_'))
),
'check_name': normalized_integration_name,
'check_name_cap': check_name_cap,
'email': email,
'email_packages': email_packages,
'guid': uuid4(),
'license_header': license_header,
'install_info': install_info,
'repo_choice': repo_choice,
'support_type': support_type,
'tox_base_dep': tox_base_dep,
}
config.update(kwargs)
return config
def create_template_files(template_name, new_root, config, read=False):
files = []
template_root = path_join(TEMPLATES_DIR, template_name)
if not dir_exists(template_root):
return files
for root, _, template_files in os.walk(template_root):
for template_file in template_files:
template_path = path_join(root, template_file)
file_path = template_path.replace(template_root, '')
file_path = '{}{}'.format(new_root, file_path.format(**config))
files.append(
File(
file_path,
template_path,
config,
read=read
)
)
return files
class File(object):
def __init__(self, file_path, template_path, config, read=False):
self.file_path = file_path
self.template_path = template_path
self.config = config
self.binary = template_path.endswith(BINARY_EXTENSIONS)
self._read = read_file_binary if self.binary else read_file
self._write = write_file_binary if self.binary else write_file
self.contents = None
if read:
self.read()
def read(self):
contents = self._read(self.template_path)
if self.binary:
self.contents = contents
else:
self.contents = contents.format(**self.config)
def write(self):
if self.contents is None:
create_file(self.file_path)
else:
ensure_parent_dir_exists(self.file_path)
self._write(self.file_path, self.contents)
| true | true |
7901d4229ad529c05bf757e7cb3dd7ca1b424f4f | 11,995 | py | Python | web_reamer.py | febinrev/web_reamer | 4e584b2e63a34cc29518f42b1c799b979e70e51e | [
"MIT"
] | 1 | 2020-09-03T06:48:32.000Z | 2020-09-03T06:48:32.000Z | web_reamer.py | febinrev/web_reamer | 4e584b2e63a34cc29518f42b1c799b979e70e51e | [
"MIT"
] | null | null | null | web_reamer.py | febinrev/web_reamer | 4e584b2e63a34cc29518f42b1c799b979e70e51e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import os
import re
import argparse
import requests
from bs4 import BeautifulSoup as bs
version=1.1
print("""\033[1;36m
╦ ╦╔═╗╔╗ ╦═╗╔═╗╔═╗╔╦╗╔═╗╦═╗
║║║║╣ ╠╩╗ ╠╦╝║╣ ╠═╣║║║║╣ ╠╦╝
╚╩╝╚═╝╚═╝────╩╚═╚═╝╩ ╩╩ ╩╚═╝╩╚═
🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥
--> Coded by FEBIN 🛡️🌐
\033[1;39m""")
def febrev_fuzz(url):
import requests
os.system("clear")
feblist=open("admin-panel.txt","r+")
text=str(feblist.read())
adminpages=list(text.split())
feblist.close()
print(f"""
[\033[1;37m+\033[1;39m] STARTED CRAWLING TO FIND ADMIN PANEL OF URL : \033[1;34m{url}
""")
try:
if url.startswith("https://") or url.startswith("http://"):
url=url
else:
print("Error : INVALID URL ! URL must start with 'http://' or 'https://'")
exit()
if url.endswith("/"):
url=url
server=requests.get(url).headers.get('Server')
print(f"\033[1;37m SERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
else:
url=f"{url}/"
server=requests.get(url).headers.get('Server')
print(f"\033[1;37mSERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
for i in range(len(adminpages)):
reqresp=requests.get(f"{url}/{adminpages[i]}",timeout=10)
if reqresp.status_code == 200:
print(f"\033[1;39m FOUND ==> {url}{adminpages[i]} \033[1;34m")
elif reqresp.status_code == 302:
print("\033[1;39m FOUND 302 ==> {url}{adminpages[i]} \033[1;34m")
else:
pass
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Connection to the Server Failed, May be invalid URL or bad Internet connection. Check Your Internet connection,URL and try again\n ")
except requests.exceptions.ReadTimeout:
print("\033[1;31m [\033[1;31m-\033[1;39m] Error : EXECUTION STOPPED DUE TO !TIMED OUT! ERROR, YOUR INTERNET MAY BE DISCONNECTED!!!....EXITTED")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def sub_brute(domain,sublist):
if os.path.isfile(sublist):
print(f"[\033[1;37m+\033[1;39m] Subdomain wordlist {sublist} loaded -> OK")
print("")
pass
else:
print(f"[\033[1;31m-\033[1;39m] Wordlist {sublist} not found!!")
exit()
sub=open(sublist,"r+")
subs=sub.read().split("\n")
sub.close()
for host in subs:
try:
req=requests.get(f"http://{host}.{domain}")
print(f"\033[1;39m{host}.{domain} --> \033[1;37m{req.status_code}")
except requests.exceptions.ConnectionError:
pass
except UnicodeError:
pass
print("")
print("[\033[1;37m+\033[1;39m] Finshed!")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def wordlistgen(url,filepath):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[-] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
for string in feb:
count=feb.count(string)
strinit=0
while strinit < count:
feb.remove(string)
strinit=strinit+1
feb.sort()
for i in range(len(feb)):
try:
file=open(filepath,"a+")
file.write("\n"+feb[i])
file.close()
except FileNotFoundError:
homedir=os.environ.get('HOME')
file=open(f"{homedir}/fr-wordlist.txt","a+")
file.write("\n"+feb[i])
file.close()
if os.path.isfile(filepath):
print("")
print(f"\033[1;39m[\033[1;37m+\033[1;39m]Wordlist {filepath} successfully witten")
else:
print("\033[1;31m[-]Sorry:Path not Found!! The Path You Specified Doesn't Exist")
print("So Saved the wordlist as fr-wordlist.txt in the HOME Directory of the current User.....")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def word_analyze(url):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
print("\033[1;32m-"*74)
print("\033[1;32m| Words | count/frequency | Graph | ")
print("\033[1;32m-"*74)
for string in feb:
count=feb.count(string)
for i in range(count):
feb.remove(string)
print(f"\033[1;34m| {string + ' ' * (22 - len(string)) + '| '}{str(count) +' ' * (22 - len(str(count)))}| \033[1;32m{'█' * count} " )
print("\033[1;33m-"*74)
def endpoint_harvest(url):
print(f"[\033[1;37m+\033[1;39m] Collecting Endpoints / Links from the webpage {url}")
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
endpoint_pattern1=re.compile('(?:href=")(.*?)"')
endpoint_pattern2=re.compile('(?:src=")(.*?)"')
endpoint1=endpoint_pattern1.findall(pagedata)
endpoint2=endpoint_pattern2.findall(pagedata)
for link in endpoint1:
print(link.replace("href=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
for src in endpoint2:
print(src.replace("src=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
print("")
print("[\033[1;37m+\033[1;39m] Finished!")
def param(url):
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
params=soup.find_all("input")
print("[\033[1;37m+\033[1;39m] Extracting Parameters from the WebPage!\n")
for param in params:
print(param.get("name"))
print("[\033[1;37m+\033[1;39m] Finished!")
parser = argparse.ArgumentParser(description='Parse the domain, wordlist etc..')
parser.add_argument('-link',dest='link', action='store_true',help='Extract Endpoints from url!')
parser.add_argument('-admin',dest='admin', action='store_true',help='Find Admin Panel of the given URL !')
parser.add_argument('-sub',dest='sub', action='store_true',help='Subdomain brute force of the given domain !')
parser.add_argument('-param',dest='param', action='store_true',help='Find hidden parameters from the given URL !')
parser.add_argument('-wordlist',dest='wordlist', action='store_true',help='Create targeted wordlist from the given URL !')
parser.add_argument('-analyze',dest='analyze', action='store_true',help='Analyze words and their frequencies from the given URL !')
parser.add_argument('-u',"--url",dest='url', action='store',help='The URL of the webpage!')
parser.add_argument('-d',"--domain",dest='domain', action='store',help='The domain name for sub domain brute-force!')
parser.add_argument('-w',"--wordlist",dest='list', action='store',help='Extract Endpoints from url!')
parser.add_argument('-o',"--outfile",dest='outfile', action='store',help='Output file to save the generated wordlist!!')
parser.add_argument('-v',"--version",dest='version', action='store_true',help='Version / Update Check !')
args=parser.parse_args()
try:
if args.link and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
endpoint_harvest(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.admin and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
febrev_fuzz(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.sub and args.domain and args.list:
if args.domain.startswith("http://") or args.domain.startswith("https://"):
print("[\033[1;31m-\033[1;39m] Expected Domain name not URL!")
exit()
else:
sub_brute(args.domain,args.list)
elif args.wordlist and args.url and args.outfile:
if args.url.startswith("http://") or args.url.startswith("https://"):
wordlistgen(args.url,args.outfile)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.analyze and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
word_analyze(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.param and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
param(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.version:
print(f"CURRENT VERSION : {version}")
try:
verq=requests.get("http://raw.githubusercontent.com/febinrev/web_reamer/master/version")
ver=float(verq.text.split()[0])
if ver > version:
print(f"[\033[1;37m+\033[1;39m] New Version {ver} of WEB_REAMER is available : https://github.com/febinrev/web_reamer.git")
else:
print("[\033[1;37m+\033[1;39m] WEB_REAMER is up-to-date!")
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Error Connecting github !")
else:
print("""\033[1;33m
Usage:
\033[1;32m1. Endpoint / Link Extraction:
\033[1;39m ./web_reamer.py -link -u http://sample.com/ \033[1;32m
2. Admin Panel fuzzing:
\033[1;39m ./web_reamer.py -admin -u http://sample.com/ \033[1;32m
3. Subdomain Brute Force:
\033[1;39m ./web_reamer.py -sub -d sample.com -w subdomains.txt \033[1;32m
4. Find hidden parameters from webpage:
\033[1;39m ./web_reamer.py -param -u http://sample.com/ \033[1;32m
5. Create Targetted Wordlist from webpage:
\033[1;39m ./web_reamer.py -wordlist -u http://sample.com/ -o outfile_wordlist.txt \033[1;32m
6. Analyze Word frequencies from the WebPage :
\033[1;39m ./web_reamer.py -analyze -u http://sample.com/ \033[1;32m
7. Help :
\033[1;39m ./web_reamer.py -h \033[1;32m
\033[1;39m ./web_reamer.py --help \033[1;32m
8. Version / Update Check :
\033[1;39m ./web_reamer.py -v \033[1;32m
\033[1;39m ./web_reamer.py --version \033[1;32m
""")
except KeyboardInterrupt:
print("\n\033[1;39m[\033[1;31m-\033[1;39m] User Interruption! Exit!")
exit()
| 31.816976 | 166 | 0.656857 |
import sys
import os
import re
import argparse
import requests
from bs4 import BeautifulSoup as bs
version=1.1
print("""\033[1;36m
╦ ╦╔═╗╔╗ ╦═╗╔═╗╔═╗╔╦╗╔═╗╦═╗
║║║║╣ ╠╩╗ ╠╦╝║╣ ╠═╣║║║║╣ ╠╦╝
╚╩╝╚═╝╚═╝────╩╚═╚═╝╩ ╩╩ ╩╚═╝╩╚═
🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥
--> Coded by FEBIN 🛡️🌐
\033[1;39m""")
def febrev_fuzz(url):
import requests
os.system("clear")
feblist=open("admin-panel.txt","r+")
text=str(feblist.read())
adminpages=list(text.split())
feblist.close()
print(f"""
[\033[1;37m+\033[1;39m] STARTED CRAWLING TO FIND ADMIN PANEL OF URL : \033[1;34m{url}
""")
try:
if url.startswith("https://") or url.startswith("http://"):
url=url
else:
print("Error : INVALID URL ! URL must start with 'http://' or 'https://'")
exit()
if url.endswith("/"):
url=url
server=requests.get(url).headers.get('Server')
print(f"\033[1;37m SERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
else:
url=f"{url}/"
server=requests.get(url).headers.get('Server')
print(f"\033[1;37mSERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
for i in range(len(adminpages)):
reqresp=requests.get(f"{url}/{adminpages[i]}",timeout=10)
if reqresp.status_code == 200:
print(f"\033[1;39m FOUND ==> {url}{adminpages[i]} \033[1;34m")
elif reqresp.status_code == 302:
print("\033[1;39m FOUND 302 ==> {url}{adminpages[i]} \033[1;34m")
else:
pass
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Connection to the Server Failed, May be invalid URL or bad Internet connection. Check Your Internet connection,URL and try again\n ")
except requests.exceptions.ReadTimeout:
print("\033[1;31m [\033[1;31m-\033[1;39m] Error : EXECUTION STOPPED DUE TO !TIMED OUT! ERROR, YOUR INTERNET MAY BE DISCONNECTED!!!....EXITTED")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def sub_brute(domain,sublist):
if os.path.isfile(sublist):
print(f"[\033[1;37m+\033[1;39m] Subdomain wordlist {sublist} loaded -> OK")
print("")
pass
else:
print(f"[\033[1;31m-\033[1;39m] Wordlist {sublist} not found!!")
exit()
sub=open(sublist,"r+")
subs=sub.read().split("\n")
sub.close()
for host in subs:
try:
req=requests.get(f"http://{host}.{domain}")
print(f"\033[1;39m{host}.{domain} --> \033[1;37m{req.status_code}")
except requests.exceptions.ConnectionError:
pass
except UnicodeError:
pass
print("")
print("[\033[1;37m+\033[1;39m] Finshed!")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def wordlistgen(url,filepath):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[-] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
for string in feb:
count=feb.count(string)
strinit=0
while strinit < count:
feb.remove(string)
strinit=strinit+1
feb.sort()
for i in range(len(feb)):
try:
file=open(filepath,"a+")
file.write("\n"+feb[i])
file.close()
except FileNotFoundError:
homedir=os.environ.get('HOME')
file=open(f"{homedir}/fr-wordlist.txt","a+")
file.write("\n"+feb[i])
file.close()
if os.path.isfile(filepath):
print("")
print(f"\033[1;39m[\033[1;37m+\033[1;39m]Wordlist {filepath} successfully witten")
else:
print("\033[1;31m[-]Sorry:Path not Found!! The Path You Specified Doesn't Exist")
print("So Saved the wordlist as fr-wordlist.txt in the HOME Directory of the current User.....")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def word_analyze(url):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
print("\033[1;32m-"*74)
print("\033[1;32m| Words | count/frequency | Graph | ")
print("\033[1;32m-"*74)
for string in feb:
count=feb.count(string)
for i in range(count):
feb.remove(string)
print(f"\033[1;34m| {string + ' ' * (22 - len(string)) + '| '}{str(count) +' ' * (22 - len(str(count)))}| \033[1;32m{'█' * count} " )
print("\033[1;33m-"*74)
def endpoint_harvest(url):
print(f"[\033[1;37m+\033[1;39m] Collecting Endpoints / Links from the webpage {url}")
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
endpoint_pattern1=re.compile('(?:href=")(.*?)"')
endpoint_pattern2=re.compile('(?:src=")(.*?)"')
endpoint1=endpoint_pattern1.findall(pagedata)
endpoint2=endpoint_pattern2.findall(pagedata)
for link in endpoint1:
print(link.replace("href=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
for src in endpoint2:
print(src.replace("src=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
print("")
print("[\033[1;37m+\033[1;39m] Finished!")
def param(url):
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
params=soup.find_all("input")
print("[\033[1;37m+\033[1;39m] Extracting Parameters from the WebPage!\n")
for param in params:
print(param.get("name"))
print("[\033[1;37m+\033[1;39m] Finished!")
parser = argparse.ArgumentParser(description='Parse the domain, wordlist etc..')
parser.add_argument('-link',dest='link', action='store_true',help='Extract Endpoints from url!')
parser.add_argument('-admin',dest='admin', action='store_true',help='Find Admin Panel of the given URL !')
parser.add_argument('-sub',dest='sub', action='store_true',help='Subdomain brute force of the given domain !')
parser.add_argument('-param',dest='param', action='store_true',help='Find hidden parameters from the given URL !')
parser.add_argument('-wordlist',dest='wordlist', action='store_true',help='Create targeted wordlist from the given URL !')
parser.add_argument('-analyze',dest='analyze', action='store_true',help='Analyze words and their frequencies from the given URL !')
parser.add_argument('-u',"--url",dest='url', action='store',help='The URL of the webpage!')
parser.add_argument('-d',"--domain",dest='domain', action='store',help='The domain name for sub domain brute-force!')
parser.add_argument('-w',"--wordlist",dest='list', action='store',help='Extract Endpoints from url!')
parser.add_argument('-o',"--outfile",dest='outfile', action='store',help='Output file to save the generated wordlist!!')
parser.add_argument('-v',"--version",dest='version', action='store_true',help='Version / Update Check !')
args=parser.parse_args()
try:
if args.link and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
endpoint_harvest(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.admin and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
febrev_fuzz(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.sub and args.domain and args.list:
if args.domain.startswith("http://") or args.domain.startswith("https://"):
print("[\033[1;31m-\033[1;39m] Expected Domain name not URL!")
exit()
else:
sub_brute(args.domain,args.list)
elif args.wordlist and args.url and args.outfile:
if args.url.startswith("http://") or args.url.startswith("https://"):
wordlistgen(args.url,args.outfile)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.analyze and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
word_analyze(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.param and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
param(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.version:
print(f"CURRENT VERSION : {version}")
try:
verq=requests.get("http://raw.githubusercontent.com/febinrev/web_reamer/master/version")
ver=float(verq.text.split()[0])
if ver > version:
print(f"[\033[1;37m+\033[1;39m] New Version {ver} of WEB_REAMER is available : https://github.com/febinrev/web_reamer.git")
else:
print("[\033[1;37m+\033[1;39m] WEB_REAMER is up-to-date!")
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Error Connecting github !")
else:
print("""\033[1;33m
Usage:
\033[1;32m1. Endpoint / Link Extraction:
\033[1;39m ./web_reamer.py -link -u http://sample.com/ \033[1;32m
2. Admin Panel fuzzing:
\033[1;39m ./web_reamer.py -admin -u http://sample.com/ \033[1;32m
3. Subdomain Brute Force:
\033[1;39m ./web_reamer.py -sub -d sample.com -w subdomains.txt \033[1;32m
4. Find hidden parameters from webpage:
\033[1;39m ./web_reamer.py -param -u http://sample.com/ \033[1;32m
5. Create Targetted Wordlist from webpage:
\033[1;39m ./web_reamer.py -wordlist -u http://sample.com/ -o outfile_wordlist.txt \033[1;32m
6. Analyze Word frequencies from the WebPage :
\033[1;39m ./web_reamer.py -analyze -u http://sample.com/ \033[1;32m
7. Help :
\033[1;39m ./web_reamer.py -h \033[1;32m
\033[1;39m ./web_reamer.py --help \033[1;32m
8. Version / Update Check :
\033[1;39m ./web_reamer.py -v \033[1;32m
\033[1;39m ./web_reamer.py --version \033[1;32m
""")
except KeyboardInterrupt:
print("\n\033[1;39m[\033[1;31m-\033[1;39m] User Interruption! Exit!")
exit()
| true | true |
7901d4e84e0f30b763cbb5693063eee5af659b6b | 1,255 | py | Python | vitalstyles/cli.py | appressoas/vitalstyles | e91de56c2f6402bcd8703d71abfcd505ac1a5901 | [
"BSD-3-Clause"
] | null | null | null | vitalstyles/cli.py | appressoas/vitalstyles | e91de56c2f6402bcd8703d71abfcd505ac1a5901 | [
"BSD-3-Clause"
] | null | null | null | vitalstyles/cli.py | appressoas/vitalstyles | e91de56c2f6402bcd8703d71abfcd505ac1a5901 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import logging
from pprint import pformat
from . import guide
from . import settings
log = logging.getLogger(__name__)
def cli(settingsobject=None):
parser = argparse.ArgumentParser(description='Create a CSS/LESS/SASS style guide.')
if not settingsobject:
parser.add_argument('-f', '--settingsfile',
dest='settingsfile', default='vitalstyles.json',
help='Path to settings file. Defaults to "vitalstyles.json".')
parser.add_argument('-l', '--loglevel',
dest='loglevel', default='INFO',
choices=['DEBUG', 'INFO', 'ERROR'], help='Loglevel.')
args = parser.parse_args()
loglevel = getattr(logging, args.loglevel)
logging.basicConfig(
format='[%(name)s] %(levelname)s: %(message)s',
level=loglevel
)
if loglevel > logging.DEBUG:
markdownlogger = logging.getLogger('MARKDOWN')
markdownlogger.setLevel(logging.WARNING)
if not settingsobject:
settingsobject = settings.Settings(args.settingsfile)
logging.debug('Creating vitalstyles styleguide with the following settings:\n%s',
pformat(settingsobject.settings))
guide.Guide(settingsobject).render()
if __name__ == '__main__':
cli()
| 29.880952 | 87 | 0.67012 | import argparse
import logging
from pprint import pformat
from . import guide
from . import settings
log = logging.getLogger(__name__)
def cli(settingsobject=None):
parser = argparse.ArgumentParser(description='Create a CSS/LESS/SASS style guide.')
if not settingsobject:
parser.add_argument('-f', '--settingsfile',
dest='settingsfile', default='vitalstyles.json',
help='Path to settings file. Defaults to "vitalstyles.json".')
parser.add_argument('-l', '--loglevel',
dest='loglevel', default='INFO',
choices=['DEBUG', 'INFO', 'ERROR'], help='Loglevel.')
args = parser.parse_args()
loglevel = getattr(logging, args.loglevel)
logging.basicConfig(
format='[%(name)s] %(levelname)s: %(message)s',
level=loglevel
)
if loglevel > logging.DEBUG:
markdownlogger = logging.getLogger('MARKDOWN')
markdownlogger.setLevel(logging.WARNING)
if not settingsobject:
settingsobject = settings.Settings(args.settingsfile)
logging.debug('Creating vitalstyles styleguide with the following settings:\n%s',
pformat(settingsobject.settings))
guide.Guide(settingsobject).render()
if __name__ == '__main__':
cli()
| true | true |
7901d4f95411ca6514dce907d8c06b920e4dc8c7 | 3,672 | py | Python | infoblox_netmri/api/remote/models/spm_history_end_host_history_grid_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/spm_history_end_host_history_grid_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | infoblox_netmri/api/remote/models/spm_history_end_host_history_grid_remote.py | IngmarVG-IB/infoblox-netmri | b0c725fd64aee1890d83917d911b89236207e564 | [
"Apache-2.0"
] | null | null | null | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SpmHistoryEndHostHistoryGridRemote(RemoteModel):
"""
This table lists the end host history within the user specified period of time for a given end host.
| ``id:`` The internal NetMRI identifier of the grid entry.
| ``attribute type:`` number
| ``FirstSeen:`` The timestamp of when NetMRI first discovered this end host.
| ``attribute type:`` datetime
| ``LastSeen:`` The timestamp of when NetMRI last polled data from this end host.
| ``attribute type:`` datetime
| ``HostIPNumeric:`` The numerical value of the end host IP address.
| ``attribute type:`` number
| ``HostIPAddress:`` The management IP address of the end host, in dotted (or colon-delimited for IPv6) format.
| ``attribute type:`` string
| ``HostName:`` The NetMRI name of the end host; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``DeviceID:`` The NetMRI internal identifier for the switch.
| ``attribute type:`` number
| ``DeviceType:`` The NetMRI-determined device type of the switch.
| ``attribute type:`` string
| ``DeviceName:`` The NetMRI name of the switch; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
| ``attribute type:`` string
| ``InterfaceID:`` The internal NetMRI identifier for the interface on the switch configured with this address.
| ``attribute type:`` number
| ``ifIndex:`` The SNMP interface index of the interface on the switch configured with this address.
| ``attribute type:`` string
| ``Interface:`` The interface on the switch configured with this address.
| ``attribute type:`` string
| ``ifMAC:`` The interface Media Access Controller (MAC) address of this interface.
| ``attribute type:`` string
| ``ifOperStatus:`` The operational status (up/down) of this interface.
| ``attribute type:`` string
| ``VlanIndex:`` The numerical VLAN number (VLAN ID).
| ``attribute type:`` number
| ``VlanName:`` The name of the VLAN on the root bridge.
| ``attribute type:`` string
| ``VlanID:`` The internal NetMRI identifier of the VLAN.
| ``attribute type:`` number
| ``VTPDomain:`` Management domain name if VLAN is VTP managed.
| ``attribute type:`` string
| ``Network:`` The name of the Network View associated.
| ``attribute type:`` string
| ``VirtualNetworkID:`` Internal identifier for the network view.
| ``attribute type:`` number
| ``HostMAC:`` The MAC Address of the end host.
| ``attribute type:`` string
"""
properties = ("id",
"FirstSeen",
"LastSeen",
"HostIPNumeric",
"HostIPAddress",
"HostName",
"DeviceID",
"DeviceType",
"DeviceName",
"InterfaceID",
"ifIndex",
"Interface",
"ifMAC",
"ifOperStatus",
"VlanIndex",
"VlanName",
"VlanID",
"VTPDomain",
"Network",
"VirtualNetworkID",
"HostMAC",
)
| 30.857143 | 157 | 0.566176 | from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SpmHistoryEndHostHistoryGridRemote(RemoteModel):
properties = ("id",
"FirstSeen",
"LastSeen",
"HostIPNumeric",
"HostIPAddress",
"HostName",
"DeviceID",
"DeviceType",
"DeviceName",
"InterfaceID",
"ifIndex",
"Interface",
"ifMAC",
"ifOperStatus",
"VlanIndex",
"VlanName",
"VlanID",
"VTPDomain",
"Network",
"VirtualNetworkID",
"HostMAC",
)
| true | true |
7901d50f7fedfeb739023373fe99074d27094808 | 1,584 | py | Python | app/integrations/IntegrationDiscordDriver.py | josephmancuso/gbaleague-masonite2 | b3dd5ec3f20c07eaabcc3129b0c50379a946a82b | [
"MIT"
] | null | null | null | app/integrations/IntegrationDiscordDriver.py | josephmancuso/gbaleague-masonite2 | b3dd5ec3f20c07eaabcc3129b0c50379a946a82b | [
"MIT"
] | 3 | 2018-07-25T17:36:43.000Z | 2020-01-06T18:52:51.000Z | app/integrations/IntegrationDiscordDriver.py | josephmancuso/gbaleague-masonite2 | b3dd5ec3f20c07eaabcc3129b0c50379a946a82b | [
"MIT"
] | null | null | null | import requests
import os
class IntegrationDiscordDriver:
_scope = ''
_state = ''
def scopes(self, scopes):
pass
def send(self, request, state='', scopes=('identify',)):
self._scope = scopes
self._state = state
return request.redirect('https://discordapp.com/api/oauth2/authorize?response_type=code&client_id={}&scope={}&state={}&redirect_uri={}'.format(
os.getenv('DISCORD_CLIENT'),
' '.join(self._scope),
self._state,
os.getenv('DISCORD_REDIRECT'),
))
def user(self, request):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'authorization_code',
'code': request.input('code'),
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
def refresh(self, refresh_token):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
| 31.058824 | 151 | 0.576389 | import requests
import os
class IntegrationDiscordDriver:
_scope = ''
_state = ''
def scopes(self, scopes):
pass
def send(self, request, state='', scopes=('identify',)):
self._scope = scopes
self._state = state
return request.redirect('https://discordapp.com/api/oauth2/authorize?response_type=code&client_id={}&scope={}&state={}&redirect_uri={}'.format(
os.getenv('DISCORD_CLIENT'),
' '.join(self._scope),
self._state,
os.getenv('DISCORD_REDIRECT'),
))
def user(self, request):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'authorization_code',
'code': request.input('code'),
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
def refresh(self, refresh_token):
data = {
'client_id': os.getenv('DISCORD_CLIENT'),
'client_secret': os.getenv('DISCORD_SECRET'),
'grant_type': 'refresh_token',
'refresh_token': refresh_token,
'redirect_uri': os.getenv('DISCORD_REDIRECT')
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
return requests.post('https://discordapp.com/api/oauth2/token', data, headers).json()
| true | true |
7901d53d2e08763fe745142794119729afd1c8b7 | 15,783 | py | Python | wiselib2/Noise.py | WISE-Project/wiselib2 | 9daf7b3b72e81d154fe094c05000406ee203c3de | [
"MIT"
] | null | null | null | wiselib2/Noise.py | WISE-Project/wiselib2 | 9daf7b3b72e81d154fe094c05000406ee203c3de | [
"MIT"
] | null | null | null | wiselib2/Noise.py | WISE-Project/wiselib2 | 9daf7b3b72e81d154fe094c05000406ee203c3de | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 07 14:08:31 2016
@author: Mic
"""
from __future__ import division
from wiselib2.must import *
import numpy as np
import wiselib2.Rayman as rm
Gauss1d = lambda x ,y : None
from scipy import interpolate as interpolate
from matplotlib import pyplot as plt
class PsdFuns:
'''
Ensemble of possible Psd Functions.
Each element is a callable Psd.
Most used are
PsdFuns.PowerLaw(x,a,b)
PsdFuns.Interp(x, xData, yData)
'''
@staticmethod
def Flat(x, *args):
N = len(x)
return np.zeros([1,N]) +1
@staticmethod
def PowerLaw(x,a,b):
return a*x**b
@staticmethod
def Gaussian(x,sigma, x0=0):
return np.exp(-0.5 * (x-x0)**2/sigma**2)
@staticmethod
def Interp(x, xData, yData):
f = interpolate.interp1d(xData, yData)
return f(x)
def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs):
'''
Generates a noise pattern based an the Power spectral density returned
by PsdFun
'''
x = np.arange(0,N//2+1, dx)
yHalf = PsdFun(x, *PsdArgs)
y = Psd2NoisePattern_1d(yHalf, Semiaxis = True )
return x,y
#============================================================================
# FUN: PsdArray2Noise_1d_v2
#============================================================================
def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N):
'''
Returns meters
'''
from scipy import interpolate
log=np.log
fft = np.fft.fft
fftshift = np.fft.fftshift
ff = f_in
yy = Psd_in
L = L_mm
N = int(N)
N2 = int(N//2)
L =300 # (mm)
L_um = L*1e3
L_nm = L*1e6
fMin = 1/L_um
##vecchia riga
##fSpline = (np.array(range(N2))+1)/L_um # um^-1
fSpline = np.arange(N2)/N2 * (max(ff) - min(ff)) + min(ff)
fun = interpolate.splrep(log(ff), log(yy), s=2)
yPsd_log = interpolate.splev(log(fSpline), fun)
ySpline = np.exp(yPsd_log)
yPsd = ySpline
# tolgo
yPsd[fSpline<ff[0]] = 200
n = len(yPsd)
plt.plot(fSpline, yPsd,'-')
plt.plot(ff, yy,'x')
plt.legend(['ySpline','Data'])
ax = plt.axes()
#ax.set_yscale('log')
#ax.set_xscale('log')
#% controllo RMS integrando la yPsd
import scipy.integrate as integrate
RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000))
#% Modo Manfredda style
#yPsdNorm = np.sqrt(yPsd/L_um/1000)
#yPsdNorm_reverse = yPsdNorm[::-1]
yPsd_reverse = yPsd[::-1]
ell= 1/(fSpline[1] - fSpline[0])
if N%2 == 0:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1]))
else:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd))
##yPsd2Norm = np.sqrt(yPsd2/ell/1000/2)
yPsd2Norm = np.sqrt(yPsd2/ell/1000)
n_ = len(yPsd2)
print('len(yPsd2) = %0.2d' % len(yPsd2Norm))
phi = 2*np.pi * np.random.rand(n_)
r = np.exp(1j*phi)
yPsd2Norm_ = fftshift(yPsd2Norm)
#yPsd2Norm_[len(yPsd2Norm_)//2] = 0
yRaf = np.fft.fft(r*yPsd2Norm_)
yRaf = np.real(yRaf)
print('Rms = %0.2e nm' % np.std(yRaf))
plt.plot(yPsd2Norm_)
print('max yPsd_ = %d nm' % max(yPsd2))
print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm))
print('Rms yRaf2 = %0.2e nm' % np.std(yRaf))
return yRaf * 1e-9
#============================================================================
# FUN: Psd2Noise
#============================================================================
def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True):
'''
Generates a noise pattern whose Power Spectral density is given by Psd.
Parameters
---------------------
Psd : 1d array
Contains the numeric Psd (treated as evenly spaced array)
Semiaxis :
0 : does nothing
1 : halvens Pds, then replicates the halven part for left frequencies,
producing an output as long as Psd
2 : replicates all Pds for lef frequencies as well, producing an output
twice as long as Psd
Real : boolean
If True, the real part of the output is returned (default)
Returns:
---------------------
An array of the same length of Psd
'''
if Semiaxis == True:
yHalf = PsdArray
PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf))
idelta = len(PsdArrayNew) - N
if idelta == 1:# piu lungo
PsdArrayNew = PsdArrayNew[0:-1] # uguale
elif idelta == 0:
pass
else:
print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta)
y = np.fft.fftshift(PsdArrayNew)
r = 2*np.pi * np.random.rand(len(PsdArrayNew))
f = np.fft.ifft(y * np.exp(1j*r))
if Real:
return np.real(f)
else:
return f
Psd2Noise_1d = PsdArray2Noise_1d
#============================================================================
# FUN: NoNoise_1d
#============================================================================
def NoNoise_1d(N, *args):
return np.zeros([1,N])
#============================================================================
# FUN: GaussianNoise_1d
#============================================================================
def GaussianNoise_1d(N,dx, Sigma):
'''
PSD(f) = np.exp(-0.5^f/Sigma^2)
'''
x = np.linspace( - N//2 *dx, N//2-1 * dx,N)
y = np.exp(-0.5*x**2/Sigma**2)
return Psd2NoisePattern_1d(y)
#============================================================================
# FUN: PowerLawNoise_1d
#============================================================================
def PowerLawNoise_1d(N, dx, a, b):
'''
PSD(x) = a*x^b
'''
x = np.arange(0,N//2+1, dx)
yHalf = a * x**b
# y = np.hstack((yHalf[-1:0:-1], 0, yHalf[1:-1]))
return Psd2NoisePattern_1d(y, Semiaxis = True)
#============================================================================
# FUN: CustomNoise_1d
#============================================================================
def CustomNoise_1d(N, dx, xPsd, yPsd):
xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N)
return Psd2NoisePattern_1d(yPsd_, Semiaxis = True)
#============================================================================
# CLASS: NoiseGenerator
#============================================================================
class PsdGenerator:
NoNoise = staticmethod(NoNoise_1d)
Gauss = staticmethod(GaussianNoise_1d)
PowerLaw = staticmethod(PowerLawNoise_1d)
NumericArray = staticmethod(CustomNoise_1d)
#============================================================================
# FUN: FitPowerLaw
#============================================================================
def FitPowerLaw(x,y):
'''
Fits the input data in the form
y = a*x^b
returns a,b
'''
import scipy.optimize as optimize
fFit = lambda p, x: p[0] * x ** p[1]
fErr = lambda p, x, y: (y - fFit(p, x))
p0 = [max(y), -1.0]
out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1)
pOut = out[0]
b = pOut[1]
a = pOut[0]
# indexErr = np.np.sqrt( covar[0][0] )
# ampErr = np.np.sqrt( covar[1][1] ) * amp
return a,b
#==============================================================================
# CLASS: RoughnessMaker
#==============================================================================
class RoughnessMaker(object):
class Options():
FIT_NUMERIC_DATA_WITH_POWER_LAW = True
AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True
AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True
AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True
def __init__(self):
self.PsdType = PsdFuns.PowerLaw
self.PsdParams = np.array([1,1])
self._IsNumericPsdInFreq = None
self.CutoffLowHigh = [None, None]
self.ProfileScaling = 1
return None
@property
def PsdType(self):
return self._PsdType
@PsdType.setter
def PsdType(self, Val):
'''
Note: each time that the Property value is set, self.CutoffLowHigh is
reset, is specified by options
'''
self. _PsdType = Val
if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True:
self.PsdCutoffLowHigh = [None, None]
#======================================================================
# FUN: PdfEval
#======================================================================
def PsdEval(self, N, df, CutoffLowHigh = [None, None]):
'''
Evals the PSD in the range [0 - N*df]
It's good custom to have PSD[0] = 0, so that the noise pattern is
zero-mean.
Parameters:
----------------------
N : int
#of samples
df : float
spacing of spatial frequencies (df=1/TotalLength)
CutoffLowHigh : [LowCutoff, HighCutoff]
if >0, then Psd(f<Cutoff) is set to 0.
if None, then LowCutoff = min()
Returns : fAll, yPsdAll
----------------------
fAll : 1d array
contains the spatial frequencies
yPsd : 1d array
contains the Psd
'''
'''
The Pdf is evaluated only within LowCutoff and HoghCutoff
If the Pdf is PsdFuns.Interp, then LowCutoff and HighCutoff are
automatically set to min and max values of the experimental data
'''
StrMessage = ''
def GetInRange(fAll, LowCutoff, HighCutoff):
_tmpa = fAll >= LowCutoff
_tmpb = fAll <= HighCutoff
fMid_Pos = np.all([_tmpa, _tmpb],0)
fMid = fAll[fMid_Pos]
return fMid_Pos, fMid
LowCutoff, HighCutoff = CutoffLowHigh
fMin = 0
fMax = (N-1)*df
fAll = np.linspace(0, fMax, N)
yPsdAll = fAll* 0 # init
LowCutoff = 0 if LowCutoff is None else LowCutoff
HighCutoff = N*df if HighCutoff is None else HighCutoff
# Numeric PSD
# Note: by default returned yPsd is always 0 outside the input data range
if self.PsdType == PsdFuns.Interp:
# Use Auto-Fit + PowerLaw
if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
xFreq,y = self.NumericPsdGetXY()
p = FitPowerLaw(1/xFreq,y)
_PsdParams = p[0], -p[1]
LowCutoff = np.amin(self._PsdNumericX)
HighCutoff = np.amin(self._PsdNumericX)
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams )
# Use Interpolation
else:
# check Cutoff
LowVal = np.amin(self._PsdNumericX)
HighVal = np.amax(self._PsdNumericX)
LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff
HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff
# Get the list of good frequency values (fMid) and their positions
# (fMid_Pos)
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
##yPsd = self.PsdType(fMid, *self.PsdParams)
## non funziona, rimpiazzo a mano
yPsd = PsdFuns.Interp(fMid, self._PsdNumericX, self._PsdNumericY)
# Analytical Psd
else:
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = self.PsdType(fMid, *self.PsdParams)
# copying array subset
yPsdAll[fMid_Pos] = yPsd
return fAll, yPsdAll
#======================================================================
# FUN: _FitNumericPsdWithPowerLaw
#======================================================================
# in disusos
def _FitNumericPsdWithPowerLaw(self):
x,y = self.NumericPsdGetXY()
if self._IsNumericPsdInFreq == True:
p = FitPowerLaw(1/x,y)
self.PsdParams = p[0], -p[1]
else:
p = FitPowerLaw(x,y)
self.PsdParams = p[0], p[1]
#======================================================================
# FUN: MakeProfile
#======================================================================
def MakeProfile(self, L,N):
'''
Evaluates the psd according to .PsdType, .PsdParams and .Options directives
Returns an evenly-spaced array.
If PsdType = NumericArray, linear interpolation is performed.
:PARAM: N: # of samples
:PARAM: dx: grid spacing (spatial frequency)
returns:
1d arr
'''
if self.PsdType == PsdFuns.Interp:
# chiama codice ad hoc
L_mm = L*1e3
yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N)
else:
print('Irreversible error. The code was not completed to handle this instance')
return yRoughness * self.ProfileScaling
# f, yPsd = self.PsdEval(N//2 + 1,df)
# Special case
# if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
# self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY()))
# yPsd = PsdFuns.PowerLaw(x, *self.PsdParams)
# else: # general calse
# yPsd = self.PsdType(x, *self.PsdParams)
# yRoughness = Psd2Noise_1d(yPsd, N, Semiaxis = True)
# x = np.linspace(0, N*dx,N)
# # Special case
# if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
# self.PsdParams = list(FitPowerLaw(*self.NumericPsdGetXY()))
# y = PowerLawNoise_1d(N, dx, *self.PsdParams)
# else: # general calse
# y = self.PsdType(N,dx, *self.PsdParams)
# return y
Generate = MakeProfile
#======================================================================
# FUN: NumericPsdSetXY
#======================================================================
def NumericPsdSetXY(self,x,y):
self._PsdNumericX = x
self._PsdNumericY = y
#======================================================================
# FUN: NumericPsdGetXY
#======================================================================
def NumericPsdGetXY(self):
try:
return self._PsdNumericX, self._PsdNumericY
except:
print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded')
#======================================================================
# FUN: NumericPsdLoadXY
#======================================================================
def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True):
''' @TODO: specificare formati e tipi di file
Parameters
----------------------------
xIsSpatialFreq : bool
true If the first column (Read_x_values) contains spatial
frequencies. False if it contains lenghts. Default = True
xScaling, yScaling: floats
Read_x_values => Read_x_values * xScaling
Read_y_values => Read_y_values * yScaling
Sometimes, properly setting the x and y scaling values may be confusing (although just matter of high-school considerations). On this purpose, the property .RoughnessMaker.ProfileScaling property can be used also..ProfileScaling is the scale factor that acts on the output of MakeProfile() function only.
remarks
--------
pippo
'''
try:
self._IsNumericPsdInFreq = xIsSpatialFreq
s = np.loadtxt(FilePath)
x = s[:,0]
y = s[:,1]
x = x * xScaling
y = y * yScaling
# inversion of x-axis if not spatial frequencies
if xIsSpatialFreq == False:
f = 1/x
else:
f = x
# array sorting
i = np.argsort(f)
f = f[i]
y = y[i]
# I set the Cutoff value of the class according to available data
self.PsdCutoffLowHigh = [np.amin, np.amax(f)]
# I set class operating variables
self.PsdType = PsdFuns.Interp
self.PsdParams = [f,y]
# Auto-set
# fill 0-value (DC Component)
# if self.Options.AUTO_FILL_NUMERIC_DATA_WITH_ZERO == True:
# if np.amin(x >0):
# x = np.insert(x,0,0)
# y = np.insert(y,0,0) # 0 in psd => 0-mean value in the noise pattern
# sync other class values
self.NumericPsdSetXY(f, y)
except:
pass
def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]):
'''
Parameters
N: # of output samples
dx: step of the x axis
Note: generates an evenly spaced array
'''
L = dx * N
df = 1/L
fPsd, yPsd = self.PsdEval(N//2 +1 , df = df,
CutoffLowHigh = CutoffLowHigh )
h = Psd2Noise_1d(yPsd, Semiaxis = True)
return h
#======================================================================
# FUN: NumericPsdCheck
#======================================================================
def NumericPsdCheck(self, N, L):
df = 1/L
# Stored data
ff,yy = self.NumericPsdGetXY()
# Evaluated data
fPsd, yPsd = self.PsdEval(N, df)
plt.plot(fPsd, np.log10(yPsd),'x')
plt.plot(ff, np.log10(yy),'.r')
plt.legend(['Evaluated data', 'Stored data'])
plt.suptitle('Usage of stored data (PSD)')
fMax = df*(N//2)
fMin = df
StrMsg = ''
_max = np.max(ff)
_min = np.min(ff)
print('fMax query = %0.1e m^-1' % fMax )
print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) ))
print('fMin query= %0.1e m^-1' % fMin )
print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) ))
return StrMsg | 28.234347 | 310 | 0.563138 |
from __future__ import division
from wiselib2.must import *
import numpy as np
import wiselib2.Rayman as rm
Gauss1d = lambda x ,y : None
from scipy import interpolate as interpolate
from matplotlib import pyplot as plt
class PsdFuns:
@staticmethod
def Flat(x, *args):
N = len(x)
return np.zeros([1,N]) +1
@staticmethod
def PowerLaw(x,a,b):
return a*x**b
@staticmethod
def Gaussian(x,sigma, x0=0):
return np.exp(-0.5 * (x-x0)**2/sigma**2)
@staticmethod
def Interp(x, xData, yData):
f = interpolate.interp1d(xData, yData)
return f(x)
def PsdFun2Noise_1d(N,dx, PsdFun, PsdArgs):
x = np.arange(0,N//2+1, dx)
yHalf = PsdFun(x, *PsdArgs)
y = Psd2NoisePattern_1d(yHalf, Semiaxis = True )
return x,y
def PsdArray2Noise_1d_v2(f_in, Psd_in, L_mm,N):
from scipy import interpolate
log=np.log
fft = np.fft.fft
fftshift = np.fft.fftshift
ff = f_in
yy = Psd_in
L = L_mm
N = int(N)
N2 = int(N//2)
L =300
L_um = L*1e3
L_nm = L*1e6
fMin = 1/L_um
n = interpolate.splrep(log(ff), log(yy), s=2)
yPsd_log = interpolate.splev(log(fSpline), fun)
ySpline = np.exp(yPsd_log)
yPsd = ySpline
yPsd[fSpline<ff[0]] = 200
n = len(yPsd)
plt.plot(fSpline, yPsd,'-')
plt.plot(ff, yy,'x')
plt.legend(['ySpline','Data'])
ax = plt.axes()
import scipy.integrate as integrate
RMS = np.sqrt(integrate.trapz(yPsd, fSpline/1000))
yPsd_reverse = yPsd[::-1]
ell= 1/(fSpline[1] - fSpline[0])
if N%2 == 0:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd[0:-1]))
else:
yPsd2 = np.hstack((yPsd_reverse ,0,yPsd))
n_ = len(yPsd2)
print('len(yPsd2) = %0.2d' % len(yPsd2Norm))
phi = 2*np.pi * np.random.rand(n_)
r = np.exp(1j*phi)
yPsd2Norm_ = fftshift(yPsd2Norm)
yRaf = np.fft.fft(r*yPsd2Norm_)
yRaf = np.real(yRaf)
print('Rms = %0.2e nm' % np.std(yRaf))
plt.plot(yPsd2Norm_)
print('max yPsd_ = %d nm' % max(yPsd2))
print('max yPsd2Norm = %0.4f nm' % max(yPsd2Norm))
print('Rms yRaf2 = %0.2e nm' % np.std(yRaf))
return yRaf * 1e-9
def PsdArray2Noise_1d(PsdArray, N, Semiaxis = True, Real = True):
if Semiaxis == True:
yHalf = PsdArray
PsdArrayNew = np.hstack((yHalf[-1:0:-1], yHalf))
idelta = len(PsdArrayNew) - N
if idelta == 1:
PsdArrayNew = PsdArrayNew[0:-1]
elif idelta == 0:
pass
else:
print('Error! len(PsdArrayNew) - len(PsdArray) = %0d' % idelta)
y = np.fft.fftshift(PsdArrayNew)
r = 2*np.pi * np.random.rand(len(PsdArrayNew))
f = np.fft.ifft(y * np.exp(1j*r))
if Real:
return np.real(f)
else:
return f
Psd2Noise_1d = PsdArray2Noise_1d
def NoNoise_1d(N, *args):
return np.zeros([1,N])
def GaussianNoise_1d(N,dx, Sigma):
x = np.linspace( - N//2 *dx, N//2-1 * dx,N)
y = np.exp(-0.5*x**2/Sigma**2)
return Psd2NoisePattern_1d(y)
def PowerLawNoise_1d(N, dx, a, b):
x = np.arange(0,N//2+1, dx)
yHalf = a * x**b
return Psd2NoisePattern_1d(y, Semiaxis = True)
def CustomNoise_1d(N, dx, xPsd, yPsd):
xPsd_, yPsd_ = rm.FastResample1d(xPsd, yPsd,N)
return Psd2NoisePattern_1d(yPsd_, Semiaxis = True)
class PsdGenerator:
NoNoise = staticmethod(NoNoise_1d)
Gauss = staticmethod(GaussianNoise_1d)
PowerLaw = staticmethod(PowerLawNoise_1d)
NumericArray = staticmethod(CustomNoise_1d)
def FitPowerLaw(x,y):
import scipy.optimize as optimize
fFit = lambda p, x: p[0] * x ** p[1]
fErr = lambda p, x, y: (y - fFit(p, x))
p0 = [max(y), -1.0]
out = optimize.leastsq(fErr, p0, args=(x, y), full_output=1)
pOut = out[0]
b = pOut[1]
a = pOut[0]
return a,b
class RoughnessMaker(object):
class Options():
FIT_NUMERIC_DATA_WITH_POWER_LAW = True
AUTO_ZERO_MEAN_FOR_NUMERIC_DATA = True
AUTO_FILL_NUMERIC_DATA_WITH_ZERO = True
AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE = True
def __init__(self):
self.PsdType = PsdFuns.PowerLaw
self.PsdParams = np.array([1,1])
self._IsNumericPsdInFreq = None
self.CutoffLowHigh = [None, None]
self.ProfileScaling = 1
return None
@property
def PsdType(self):
return self._PsdType
@PsdType.setter
def PsdType(self, Val):
self. _PsdType = Val
if self.Options.AUTO_RESET_CUTOFF_ON_PSDTYPE_CHANGE == True:
self.PsdCutoffLowHigh = [None, None]
def PsdEval(self, N, df, CutoffLowHigh = [None, None]):
StrMessage = ''
def GetInRange(fAll, LowCutoff, HighCutoff):
_tmpa = fAll >= LowCutoff
_tmpb = fAll <= HighCutoff
fMid_Pos = np.all([_tmpa, _tmpb],0)
fMid = fAll[fMid_Pos]
return fMid_Pos, fMid
LowCutoff, HighCutoff = CutoffLowHigh
fMin = 0
fMax = (N-1)*df
fAll = np.linspace(0, fMax, N)
yPsdAll = fAll* 0
LowCutoff = 0 if LowCutoff is None else LowCutoff
HighCutoff = N*df if HighCutoff is None else HighCutoff
if self.PsdType == PsdFuns.Interp:
if self.Options.FIT_NUMERIC_DATA_WITH_POWER_LAW == True:
xFreq,y = self.NumericPsdGetXY()
p = FitPowerLaw(1/xFreq,y)
_PsdParams = p[0], -p[1]
LowCutoff = np.amin(self._PsdNumericX)
HighCutoff = np.amin(self._PsdNumericX)
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = PsdFuns.PowerLaw(fMid, *_PsdParams )
else:
LowVal = np.amin(self._PsdNumericX)
HighVal = np.amax(self._PsdNumericX)
LowCutoff = LowVal if LowCutoff <= LowVal else LowCutoff
HighCutoff = HighVal if HighCutoff >= HighVal else HighCutoff
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
)
else:
fMid_Pos, fMid = GetInRange(fAll, LowCutoff, HighCutoff)
yPsd = self.PsdType(fMid, *self.PsdParams)
yPsdAll[fMid_Pos] = yPsd
return fAll, yPsdAll
def _FitNumericPsdWithPowerLaw(self):
x,y = self.NumericPsdGetXY()
if self._IsNumericPsdInFreq == True:
p = FitPowerLaw(1/x,y)
self.PsdParams = p[0], -p[1]
else:
p = FitPowerLaw(x,y)
self.PsdParams = p[0], p[1]
def MakeProfile(self, L,N):
if self.PsdType == PsdFuns.Interp:
L_mm = L*1e3
yRoughness = PsdArray2Noise_1d_v2(self._PsdNumericX, self._PsdNumericY, L_mm, N)
else:
print('Irreversible error. The code was not completed to handle this instance')
return yRoughness * self.ProfileScaling
def NumericPsdSetXY(self,x,y):
self._PsdNumericX = x
self._PsdNumericY = y
def NumericPsdGetXY(self):
try:
return self._PsdNumericX, self._PsdNumericY
except:
print('Error in RoughnessMaker.NumericPsdGetXY. Maybe the data file was not properly loaded')
def NumericPsdLoadXY(self, FilePath, xScaling = 1, yScaling = 1 , xIsSpatialFreq = True):
try:
self._IsNumericPsdInFreq = xIsSpatialFreq
s = np.loadtxt(FilePath)
x = s[:,0]
y = s[:,1]
x = x * xScaling
y = y * yScaling
if xIsSpatialFreq == False:
f = 1/x
else:
f = x
i = np.argsort(f)
f = f[i]
y = y[i]
self.PsdCutoffLowHigh = [np.amin, np.amax(f)]
self.PsdType = PsdFuns.Interp
self.PsdParams = [f,y]
pass
def Generate(self, N = None, dx = None, CutoffLowHigh = [None, None]):
L = dx * N
df = 1/L
fPsd, yPsd = self.PsdEval(N//2 +1 , df = df,
CutoffLowHigh = CutoffLowHigh )
h = Psd2Noise_1d(yPsd, Semiaxis = True)
return h
def NumericPsdCheck(self, N, L):
df = 1/L
ff,yy = self.NumericPsdGetXY()
fPsd, yPsd = self.PsdEval(N, df)
plt.plot(fPsd, np.log10(yPsd),'x')
plt.plot(ff, np.log10(yy),'.r')
plt.legend(['Evaluated data', 'Stored data'])
plt.suptitle('Usage of stored data (PSD)')
fMax = df*(N//2)
fMin = df
StrMsg = ''
_max = np.max(ff)
_min = np.min(ff)
print('fMax query = %0.1e m^-1' % fMax )
print('fMax data= %0.1e m^-1 = %0.2e um^-1' % (_max, (_max * 1e6) ))
print('fMin query= %0.1e m^-1' % fMin )
print('fMin data= %0.1e m^-1 = %0.2e um^-1' % (_min, (_min * 1e6) ))
return StrMsg | true | true |
7901d5795f2fc04bc8fb23c5e83916a13c572858 | 1,477 | py | Python | src/lib/interface.py | hgiesel/anki_text_wrapper | 6cc24d0785b6ea65816c1b633acaf00077cb6a7a | [
"MIT"
] | null | null | null | src/lib/interface.py | hgiesel/anki_text_wrapper | 6cc24d0785b6ea65816c1b633acaf00077cb6a7a | [
"MIT"
] | null | null | null | src/lib/interface.py | hgiesel/anki_text_wrapper | 6cc24d0785b6ea65816c1b633acaf00077cb6a7a | [
"MIT"
] | null | null | null | from typing import List, Literal, Union, Callable, Tuple
from dataclasses import dataclass, replace
from .config_types import (
TWInterface,
TWSettingStorage, TWSettingBool, TWSetting,
WrapType, Fields, AnkiModel, LabelText, WhichField, Tags, Falsifiable,
)
ScriptKeys = Literal[
'enabled',
'name',
'version',
'description',
'conditions',
'code',
]
def __list_to_tw_bool(prototype, vals: List[ScriptKeys]):
return replace(
prototype,
**dict([(key, True) for key in vals])
)
def make_interface(
# name for the type of the interface
tag: str,
prototype: WrapType,
getter: Callable[[str, TWSettingStorage], TWSetting],
# result is used for storing,
setter: Callable[[str, TWSetting], Union[bool, TWSetting]],
wrapper: Callable[[str, TWSettingStorage, AnkiModel, Fields, WhichField, slice, Tags], Tuple[Fields, Tags]],
label: Falsifiable(Callable[[str, TWSettingStorage], LabelText]),
reset: Falsifiable(Callable[[str, TWSettingStorage], TWSetting]),
deletable: Falsifiable(Callable[[str, TWSettingStorage], bool]),
# list of values that are readonly,,
readonly: TWSettingBool,
# list of values or stored in `storage` field,
store: TWSettingBool,
) -> TWInterface:
return TWInterface(
tag,
prototype,
getter,
setter,
wrapper,
label,
reset,
deletable,
readonly,
store,
)
| 26.854545 | 112 | 0.654705 | from typing import List, Literal, Union, Callable, Tuple
from dataclasses import dataclass, replace
from .config_types import (
TWInterface,
TWSettingStorage, TWSettingBool, TWSetting,
WrapType, Fields, AnkiModel, LabelText, WhichField, Tags, Falsifiable,
)
ScriptKeys = Literal[
'enabled',
'name',
'version',
'description',
'conditions',
'code',
]
def __list_to_tw_bool(prototype, vals: List[ScriptKeys]):
return replace(
prototype,
**dict([(key, True) for key in vals])
)
def make_interface(
tag: str,
prototype: WrapType,
getter: Callable[[str, TWSettingStorage], TWSetting],
setter: Callable[[str, TWSetting], Union[bool, TWSetting]],
wrapper: Callable[[str, TWSettingStorage, AnkiModel, Fields, WhichField, slice, Tags], Tuple[Fields, Tags]],
label: Falsifiable(Callable[[str, TWSettingStorage], LabelText]),
reset: Falsifiable(Callable[[str, TWSettingStorage], TWSetting]),
deletable: Falsifiable(Callable[[str, TWSettingStorage], bool]),
readonly: TWSettingBool,
store: TWSettingBool,
) -> TWInterface:
return TWInterface(
tag,
prototype,
getter,
setter,
wrapper,
label,
reset,
deletable,
readonly,
store,
)
| true | true |
7901d627b1a76729123800d05a8c8708c88f68cb | 1,234 | py | Python | var/spack/repos/builtin/packages/prank/package.py | robertsawko/spack | 135cf4835f5b646c4aaa0e2eb5552c80fc3a5ce8 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-09-15T23:55:48.000Z | 2019-09-15T23:55:48.000Z | var/spack/repos/builtin/packages/prank/package.py | vlkale/spack | 011b6e684d3978c956e5ea60d2982ccc0bac5d6d | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/prank/package.py | vlkale/spack | 011b6e684d3978c956e5ea60d2982ccc0bac5d6d | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2017-01-21T17:19:32.000Z | 2017-01-21T17:19:32.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Prank(Package):
"""A powerful multiple sequence alignment browser."""
homepage = "http://wasabiapp.org/software/prank/"
url = "http://wasabiapp.org/download/prank/prank.source.170427.tgz"
version('170427', sha256='623eb5e9b5cb0be1f49c3bf715e5fabceb1059b21168437264bdcd5c587a8859')
depends_on('mafft')
depends_on('exonerate')
depends_on('bpp-suite') # for bppancestor
conflicts('%gcc@7.2.0', when='@:150803')
def install(self, spec, prefix):
with working_dir('src'):
filter_file('gcc', '{0}'.format(spack_cc),
'Makefile', string=True)
filter_file('g++', '{0}'.format(spack_cxx),
'Makefile', string=True)
if not spec.target.family == 'x86_64':
filter_file('-m64', '', 'Makefile', string=True)
filter_file('-pipe', '', 'Makefile', string=True)
make()
mkdirp(prefix.bin)
install('prank', prefix.bin)
| 34.277778 | 96 | 0.613452 |
from spack import *
class Prank(Package):
homepage = "http://wasabiapp.org/software/prank/"
url = "http://wasabiapp.org/download/prank/prank.source.170427.tgz"
version('170427', sha256='623eb5e9b5cb0be1f49c3bf715e5fabceb1059b21168437264bdcd5c587a8859')
depends_on('mafft')
depends_on('exonerate')
depends_on('bpp-suite')
conflicts('%gcc@7.2.0', when='@:150803')
def install(self, spec, prefix):
with working_dir('src'):
filter_file('gcc', '{0}'.format(spack_cc),
'Makefile', string=True)
filter_file('g++', '{0}'.format(spack_cxx),
'Makefile', string=True)
if not spec.target.family == 'x86_64':
filter_file('-m64', '', 'Makefile', string=True)
filter_file('-pipe', '', 'Makefile', string=True)
make()
mkdirp(prefix.bin)
install('prank', prefix.bin)
| true | true |
7901d632bbec3cafeb8c4ffbe2ea51c8f4d51367 | 9,809 | py | Python | Packs/Intezer/Integrations/IntezerV2/IntezerV2.py | ryantoddtq/content | 50027658da7189e37e9514fc03057d1c1bc3209f | [
"MIT"
] | 2 | 2020-07-27T10:35:41.000Z | 2020-12-14T15:44:18.000Z | Packs/Intezer/Integrations/IntezerV2/IntezerV2.py | Axonius/content | e058add82b7422338015cf14591512b9aad4d3e9 | [
"MIT"
] | 22 | 2022-03-23T10:39:16.000Z | 2022-03-31T11:31:37.000Z | Packs/Intezer/Integrations/IntezerV2/IntezerV2.py | adambaumeister/content | c6808d0b13d00edc4cd6268793c2ae0c2e39aed6 | [
"MIT"
] | null | null | null | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
API_KEY = demisto.getParam('APIKey')
SERVER_URL = 'https://analyze.intezer.com/api'
API_VERSION = '/v2-0'
BASE_URL = SERVER_URL + API_VERSION
IS_AVAILABLE_URL = 'is-available'
ERROR_PREFIX = 'Error from Intezer:'
ACCEPTABLE_HTTP_CODES = {200, 201, 202}
USE_SSL = not demisto.params().get('insecure', False)
http_status_to_error_massage = {
400: '400 Bad Request - Wrong or invalid parameters',
401: '401 Unauthorized - Wrong or invalid api key',
403: '403 Forbidden - The account is not allowed to preform this task',
404: '404 Not Found - Analysis was not found',
410: '410 Gone - Analysis no longer exists in the service',
500: '500 Internal Server Error - Internal error',
503: '503 Service Unavailable'
}
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def handle_response(response, acceptable_http_status_codes):
if response.status_code not in acceptable_http_status_codes:
error_msg = http_status_to_error_massage.get(response.status_code, "Failed to perform request")
return_error(f'{ERROR_PREFIX} {error_msg}')
try:
return response.json()
except json.decoder.JSONDecodeError:
# This error is unlikely to happen, as the return code should indicate of error beforehand
return_error(f'Response returned with no data. This might be an issue with Intezer.\nPlease try again later\n'
f'Response content:\n{response.content}')
def get_session():
response = requests.post(BASE_URL + '/get-access-token', json={'api_key': API_KEY}, verify=USE_SSL)
response = handle_response(response, {200})
session = requests.session()
session.headers['Authorization'] = f'Bearer {response["result"]}'
return session
''' COMMANDS '''
def check_is_available():
url = f'{SERVER_URL}/{IS_AVAILABLE_URL}'
result = SESSION.get(url, verify=USE_SSL)
return 'ok' if result.json()['is_available'] else None
def analyze_by_hash_command():
file_hash = demisto.getArg('file_hash')
response = make_analyze_by_hash_request(file_hash)
handle_analyze_by_hash_response(response, file_hash)
def get_latest_result_command():
file_hash = demisto.getArg('file_hash')
response = make_get_latest_report_request(file_hash)
handle_get_latest_result_response(response, file_hash)
def make_analyze_by_hash_request(file_hash):
data = {'hash': file_hash}
return SESSION.post(BASE_URL + '/analyze-by-hash', json=data, verify=USE_SSL)
def make_get_latest_report_request(file_hash):
return SESSION.get(f'{BASE_URL}/files/{file_hash}', verify=USE_SSL)
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
handle_analyze_response(response)
def handle_get_latest_result_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
analysis_result = response.json()
enrich_dbot_and_display_file_analysis_results(analysis_result['result'])
def analyze_by_uploaded_file_command():
response = make_analyze_by_file_request(demisto.getArg('file_entry_id'))
handle_analyze_response(response)
def make_analyze_by_file_request(file_id):
file_data = demisto.getFilePath(file_id)
with open(file_data['path'], 'rb') as file_to_upload:
files = {'file': (file_data['name'], file_to_upload)}
return SESSION.post(BASE_URL + '/analyze', files=files, verify=USE_SSL)
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(obj.ID === val.ID)': {'ID': analysis_id, 'Status': 'Created', 'type': 'File'}}
return_outputs('Analysis created successfully: {}'.format(analysis_id), context_json, response)
def check_analysis_status_and_get_results_command():
analysis_type = demisto.args().get('analysis_type', 'File')
analysis_ids = argToList(demisto.args().get('analysis_id'))
indicator_name = demisto.args().get('indicator_name')
for analysis_id in analysis_ids:
response = make_analysis_status_request(analysis_id, analysis_type)
analysis_result = handle_analysis_result(response)
if analysis_result and analysis_type == 'Endpoint':
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name)
elif analysis_result and analysis_type == 'File':
enrich_dbot_and_display_file_analysis_results(analysis_result)
def make_analysis_status_request(analysis_id, analysis_type):
analysis_endpoint = 'endpoint-analyses/' if analysis_type == 'Endpoint' else 'analyses/'
result_url = f'{BASE_URL}/{analysis_endpoint}{analysis_id}'
return SESSION.get(result_url, verify=USE_SSL)
def handle_analysis_result(response):
json_response = handle_response(response, ACCEPTABLE_HTTP_CODES)
if response.status_code != 200:
result_url = json_response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id,
'Status': 'InProgress'}}
return_outputs('Analysis is still in progress', context_json)
return
return json_response['result']
def enrich_dbot_and_display_file_analysis_results(result):
verdict = result.get('verdict')
sha256 = result.get('sha256')
analysis_id = result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({result["sub_verdict"]})\n'
if 'family_name' in result:
presentable_result += f'Family: **{result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}},
'HumanReadable': presentable_result,
'ContentsFormat': formats['json'],
'Contents': result
})
def enrich_dbot_and_display_endpoint_analysis_results(result, indicator_name=None):
verdict = result['verdict']
computer_name = result['computer_name']
analysis_id = result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if result.get('families') is not None:
presentable_result += f'Families: **{result["families"]}**\n'
presentable_result += f' Scan Time: {result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
ec = {
'DBotScore': dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
return_outputs(presentable_result, ec, result)
''' EXECUTION CODE '''
try:
SESSION = get_session()
except Exception as e:
return_error(str(e))
def main():
try:
handle_proxy()
if demisto.command() == 'test-module':
demisto.results(check_is_available())
elif demisto.command() == 'intezer-analyze-by-hash':
analyze_by_hash_command()
elif demisto.command() == 'intezer-analyze-by-file':
analyze_by_uploaded_file_command()
elif demisto.command() == 'intezer-get-latest-report':
get_latest_result_command()
elif demisto.command() == 'intezer-get-analysis-result':
check_analysis_status_and_get_results_command()
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 34.059028 | 118 | 0.672036 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import requests
requests.packages.urllib3.disable_warnings()
API_KEY = demisto.getParam('APIKey')
SERVER_URL = 'https://analyze.intezer.com/api'
API_VERSION = '/v2-0'
BASE_URL = SERVER_URL + API_VERSION
IS_AVAILABLE_URL = 'is-available'
ERROR_PREFIX = 'Error from Intezer:'
ACCEPTABLE_HTTP_CODES = {200, 201, 202}
USE_SSL = not demisto.params().get('insecure', False)
http_status_to_error_massage = {
400: '400 Bad Request - Wrong or invalid parameters',
401: '401 Unauthorized - Wrong or invalid api key',
403: '403 Forbidden - The account is not allowed to preform this task',
404: '404 Not Found - Analysis was not found',
410: '410 Gone - Analysis no longer exists in the service',
500: '500 Internal Server Error - Internal error',
503: '503 Service Unavailable'
}
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
def handle_response(response, acceptable_http_status_codes):
if response.status_code not in acceptable_http_status_codes:
error_msg = http_status_to_error_massage.get(response.status_code, "Failed to perform request")
return_error(f'{ERROR_PREFIX} {error_msg}')
try:
return response.json()
except json.decoder.JSONDecodeError:
return_error(f'Response returned with no data. This might be an issue with Intezer.\nPlease try again later\n'
f'Response content:\n{response.content}')
def get_session():
response = requests.post(BASE_URL + '/get-access-token', json={'api_key': API_KEY}, verify=USE_SSL)
response = handle_response(response, {200})
session = requests.session()
session.headers['Authorization'] = f'Bearer {response["result"]}'
return session
def check_is_available():
url = f'{SERVER_URL}/{IS_AVAILABLE_URL}'
result = SESSION.get(url, verify=USE_SSL)
return 'ok' if result.json()['is_available'] else None
def analyze_by_hash_command():
file_hash = demisto.getArg('file_hash')
response = make_analyze_by_hash_request(file_hash)
handle_analyze_by_hash_response(response, file_hash)
def get_latest_result_command():
file_hash = demisto.getArg('file_hash')
response = make_get_latest_report_request(file_hash)
handle_get_latest_result_response(response, file_hash)
def make_analyze_by_hash_request(file_hash):
data = {'hash': file_hash}
return SESSION.post(BASE_URL + '/analyze-by-hash', json=data, verify=USE_SSL)
def make_get_latest_report_request(file_hash):
return SESSION.get(f'{BASE_URL}/files/{file_hash}', verify=USE_SSL)
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
handle_analyze_response(response)
def handle_get_latest_result_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
analysis_result = response.json()
enrich_dbot_and_display_file_analysis_results(analysis_result['result'])
def analyze_by_uploaded_file_command():
response = make_analyze_by_file_request(demisto.getArg('file_entry_id'))
handle_analyze_response(response)
def make_analyze_by_file_request(file_id):
file_data = demisto.getFilePath(file_id)
with open(file_data['path'], 'rb') as file_to_upload:
files = {'file': (file_data['name'], file_to_upload)}
return SESSION.post(BASE_URL + '/analyze', files=files, verify=USE_SSL)
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(obj.ID === val.ID)': {'ID': analysis_id, 'Status': 'Created', 'type': 'File'}}
return_outputs('Analysis created successfully: {}'.format(analysis_id), context_json, response)
def check_analysis_status_and_get_results_command():
analysis_type = demisto.args().get('analysis_type', 'File')
analysis_ids = argToList(demisto.args().get('analysis_id'))
indicator_name = demisto.args().get('indicator_name')
for analysis_id in analysis_ids:
response = make_analysis_status_request(analysis_id, analysis_type)
analysis_result = handle_analysis_result(response)
if analysis_result and analysis_type == 'Endpoint':
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name)
elif analysis_result and analysis_type == 'File':
enrich_dbot_and_display_file_analysis_results(analysis_result)
def make_analysis_status_request(analysis_id, analysis_type):
analysis_endpoint = 'endpoint-analyses/' if analysis_type == 'Endpoint' else 'analyses/'
result_url = f'{BASE_URL}/{analysis_endpoint}{analysis_id}'
return SESSION.get(result_url, verify=USE_SSL)
def handle_analysis_result(response):
json_response = handle_response(response, ACCEPTABLE_HTTP_CODES)
if response.status_code != 200:
result_url = json_response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id,
'Status': 'InProgress'}}
return_outputs('Analysis is still in progress', context_json)
return
return json_response['result']
def enrich_dbot_and_display_file_analysis_results(result):
verdict = result.get('verdict')
sha256 = result.get('sha256')
analysis_id = result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({result["sub_verdict"]})\n'
if 'family_name' in result:
presentable_result += f'Family: **{result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}},
'HumanReadable': presentable_result,
'ContentsFormat': formats['json'],
'Contents': result
})
def enrich_dbot_and_display_endpoint_analysis_results(result, indicator_name=None):
verdict = result['verdict']
computer_name = result['computer_name']
analysis_id = result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if result.get('families') is not None:
presentable_result += f'Families: **{result["families"]}**\n'
presentable_result += f' Scan Time: {result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
ec = {
'DBotScore': dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
return_outputs(presentable_result, ec, result)
try:
SESSION = get_session()
except Exception as e:
return_error(str(e))
def main():
try:
handle_proxy()
if demisto.command() == 'test-module':
demisto.results(check_is_available())
elif demisto.command() == 'intezer-analyze-by-hash':
analyze_by_hash_command()
elif demisto.command() == 'intezer-analyze-by-file':
analyze_by_uploaded_file_command()
elif demisto.command() == 'intezer-get-latest-report':
get_latest_result_command()
elif demisto.command() == 'intezer-get-analysis-result':
check_analysis_status_and_get_results_command()
except Exception as e:
return_error(str(e))
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| true | true |
7901d6e75c1f1c7e0b513ee5acacf722fd8de2fb | 368 | py | Python | tests/test_hangman.py | julia-shenshina/hangman | ecb6101932710e0d66c40f78c46a31fa8b257a63 | [
"MIT"
] | null | null | null | tests/test_hangman.py | julia-shenshina/hangman | ecb6101932710e0d66c40f78c46a31fa8b257a63 | [
"MIT"
] | null | null | null | tests/test_hangman.py | julia-shenshina/hangman | ecb6101932710e0d66c40f78c46a31fa8b257a63 | [
"MIT"
] | null | null | null | def test_positive_guess(patched_hangman):
decision = patched_hangman.guess("e")
assert decision is True
def test_negative_guess(patched_hangman):
decision = patched_hangman.guess("r")
assert decision is False
def test_none_guess(patched_hangman):
patched_hangman.guess("e")
decision = patched_hangman.guess("e")
assert decision is None
| 24.533333 | 41 | 0.75 | def test_positive_guess(patched_hangman):
decision = patched_hangman.guess("e")
assert decision is True
def test_negative_guess(patched_hangman):
decision = patched_hangman.guess("r")
assert decision is False
def test_none_guess(patched_hangman):
patched_hangman.guess("e")
decision = patched_hangman.guess("e")
assert decision is None
| true | true |
7901d80b9d016c3aae85d03ac9eafc1f59895465 | 890 | py | Python | posts/forms.py | BastaAditya/Quiver | a6d29ec67341bd9cdb8a193ce1efcfd699aa4a96 | [
"MIT"
] | 1 | 2020-07-22T18:51:16.000Z | 2020-07-22T18:51:16.000Z | posts/forms.py | BastaAditya/Quiver | a6d29ec67341bd9cdb8a193ce1efcfd699aa4a96 | [
"MIT"
] | 18 | 2020-02-13T23:04:20.000Z | 2021-03-31T19:25:43.000Z | posts/forms.py | BastaAditya/Quiver | a6d29ec67341bd9cdb8a193ce1efcfd699aa4a96 | [
"MIT"
] | 5 | 2020-06-11T17:28:20.000Z | 2021-12-20T18:36:04.000Z | from django.forms import ModelForm
from .models import Post, Comment
from loginsignup.utils import getBeaverInstance
class PostForm(ModelForm):
class Meta:
model = Post
exclude = ["likes", "posted_on", "post_creator"]
def checkPost(self, request):
if self.is_valid():
post = self.save(commit=False)
beaver = getBeaverInstance(request)
post.post_creator = beaver
post.save()
return True
return False
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ["comment"]
def checkComment(self, request, post):
if self.is_valid():
comment = self.save(commit=False)
comment.comment_creator = getBeaverInstance(request)
comment.post = post
comment.save()
return True
return False
| 26.176471 | 64 | 0.602247 | from django.forms import ModelForm
from .models import Post, Comment
from loginsignup.utils import getBeaverInstance
class PostForm(ModelForm):
class Meta:
model = Post
exclude = ["likes", "posted_on", "post_creator"]
def checkPost(self, request):
if self.is_valid():
post = self.save(commit=False)
beaver = getBeaverInstance(request)
post.post_creator = beaver
post.save()
return True
return False
class CommentForm(ModelForm):
class Meta:
model = Comment
fields = ["comment"]
def checkComment(self, request, post):
if self.is_valid():
comment = self.save(commit=False)
comment.comment_creator = getBeaverInstance(request)
comment.post = post
comment.save()
return True
return False
| true | true |
7901d96685e00b053a75a199f71593efd3026547 | 856 | py | Python | python_know/normal/demo9_7.py | xuguoliang1995/leetCodePython | 9e4a96efd21506e8b0443a52be16c1280643b48c | [
"Apache-2.0"
] | null | null | null | python_know/normal/demo9_7.py | xuguoliang1995/leetCodePython | 9e4a96efd21506e8b0443a52be16c1280643b48c | [
"Apache-2.0"
] | null | null | null | python_know/normal/demo9_7.py | xuguoliang1995/leetCodePython | 9e4a96efd21506e8b0443a52be16c1280643b48c | [
"Apache-2.0"
] | null | null | null | # 右侧加法和原处加法: __radd__和__iadd__
"""
__add__并不支持+运算符右侧使用实例对象。要实现一并编写__radd__方法。
只有当+右侧的对象是实例,而左边对象不是类实例时,Python才会调用__radd++,
在其他情况下则是由左侧对象调用__add__方法。
"""
class Commuter:
def __init__(self, val):
self.val = val
def __add__(self, other):
# 如果没有instance测试,当两个实例相加并且__add__触发
# __radd__的时候,我们最终得到一个Commuter,其val是另一个Commuter
if isinstance(other, Commuter): other = other.val
print("add")
return self.val + other
def __radd__(self, other):
print("radd")
# 注意和__add__顺序不一样
return other + self.val
# 原处加法 编写__iadd__或__add__如果前者空缺使用后者
class Number:
def __init__(self, val):
self.val = val
def __add__(self, other):
return Number(self.val + other)
x = Commuter(89)
y = Commuter(99)
print(x + 1)
print(x + y)
X = Number(5)
X += 1
X += 1
print(X.val) | 19.454545 | 57 | 0.653037 |
class Commuter:
def __init__(self, val):
self.val = val
def __add__(self, other):
if isinstance(other, Commuter): other = other.val
print("add")
return self.val + other
def __radd__(self, other):
print("radd")
return other + self.val
class Number:
def __init__(self, val):
self.val = val
def __add__(self, other):
return Number(self.val + other)
x = Commuter(89)
y = Commuter(99)
print(x + 1)
print(x + y)
X = Number(5)
X += 1
X += 1
print(X.val) | true | true |
7901da3a7dd2558688492c5904687578e9214782 | 802 | py | Python | tests/plugins/task/test_nulltask.py | codito/pomito | aa936982737e5ffe8ff808197d0896ee6e5239a8 | [
"MIT"
] | 1 | 2019-09-10T16:06:50.000Z | 2019-09-10T16:06:50.000Z | tests/plugins/task/test_nulltask.py | codito/pomito | aa936982737e5ffe8ff808197d0896ee6e5239a8 | [
"MIT"
] | 3 | 2017-09-11T14:11:42.000Z | 2017-09-14T02:14:22.000Z | tests/plugins/task/test_nulltask.py | codito/pomito | aa936982737e5ffe8ff808197d0896ee6e5239a8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for NullTask plugin."""
import unittest
from pomito.plugins.task import nulltask, TaskPlugin
class NullTaskTests(unittest.TestCase):
"""Tests for NullTask."""
def setUp(self):
self.task = nulltask.NullTask(None)
def test_nulltask_is_a_task_plugin(self):
assert issubclass(nulltask.NullTask, TaskPlugin)
def test_nulltask_initialize_should_not_throw(self):
self.task.initialize()
def test_nulltask_get_tasks_returns_empty_list(self):
assert len(self.task.get_tasks()) == 0
def test_nulltask_get_tasks_by_filter_returns_empty_list(self):
assert len(self.task.get_tasks_by_filter("")) == 0
def test_nulltask_get_task_by_id_returns_none(self):
assert self.task.get_task_by_id(1) is None
| 27.655172 | 67 | 0.724439 |
import unittest
from pomito.plugins.task import nulltask, TaskPlugin
class NullTaskTests(unittest.TestCase):
def setUp(self):
self.task = nulltask.NullTask(None)
def test_nulltask_is_a_task_plugin(self):
assert issubclass(nulltask.NullTask, TaskPlugin)
def test_nulltask_initialize_should_not_throw(self):
self.task.initialize()
def test_nulltask_get_tasks_returns_empty_list(self):
assert len(self.task.get_tasks()) == 0
def test_nulltask_get_tasks_by_filter_returns_empty_list(self):
assert len(self.task.get_tasks_by_filter("")) == 0
def test_nulltask_get_task_by_id_returns_none(self):
assert self.task.get_task_by_id(1) is None
| true | true |
7901db5ea6b000538c04931f432b84b72e194629 | 2,423 | py | Python | scripts/translations/base_string_script.py | scanshop/FirebaseUI-Android | bb90590a83dfe100052a77335fbda3b508c9df99 | [
"Apache-2.0"
] | 4,975 | 2015-07-20T20:04:54.000Z | 2022-03-31T05:18:32.000Z | scripts/translations/base_string_script.py | scanshop/FirebaseUI-Android | bb90590a83dfe100052a77335fbda3b508c9df99 | [
"Apache-2.0"
] | 1,807 | 2015-08-20T04:09:59.000Z | 2022-03-30T23:39:08.000Z | scripts/translations/base_string_script.py | scanshop/FirebaseUI-Android | bb90590a83dfe100052a77335fbda3b508c9df99 | [
"Apache-2.0"
] | 2,392 | 2015-09-02T07:04:53.000Z | 2022-03-31T05:18:34.000Z | # coding=UTF-8
import os
import re
import sys
class BaseStringScript:
# State
STATE_SEARCHING='STATE_SEARCHING'
STATE_IN_STR='STATE_IN_STR'
STATE_IN_PLUR='STATE_IN_PLUR'
# Tag types
TYPE_STR='TYPE_STR'
TYPE_PLUR='TYPE_PLUR'
# String tag start/end
START_STR = '<string'
END_STR = '</string'
# Plurals tag start/end
START_PLUR='<plurals'
END_PLUR = '</plurals'
def ProcessTag(self, line, type):
"""
Process a single string tag.
:param line: an array of lines making a single string tag.
:param type: the tag type, such as TYPE_STR or TYPE_PLUR
:return: an array of lines representing the processed tag.
"""
return line
def ProcessFile(self, file_name):
"""
Process and write a file of string resources.
:param file_name: path to the file to process.
:return: None.
"""
lines = []
state = self.STATE_SEARCHING
curr_tag = []
pending_process_type = None
with open(file_name, 'r') as myfile:
data = myfile.read()
for line in data.split('\n'):
# Searching for a new tag
if state == self.STATE_SEARCHING:
if self.START_STR in line:
state = self.STATE_IN_STR
elif self.START_PLUR in line:
state = self.STATE_IN_PLUR
else:
lines.append(line)
# Inside of a string tag
if state == self.STATE_IN_STR:
curr_tag.append(line)
if self.END_STR in line:
pending_process_type = self.TYPE_STR
# Inside of a plurals tag
if state == self.STATE_IN_PLUR:
curr_tag.append(line)
if self.END_PLUR in line:
pending_process_type = self.TYPE_PLUR
# Some processing needs doing
if pending_process_type:
# Do processing
lines += self.ProcessTag(curr_tag, pending_process_type)
# Reset processing state
pending_process_type = None
state = self.STATE_SEARCHING
curr_tag = []
# Write back to the file
self.WriteFile(file_name, '\n'.join(lines))
def WriteFile(self, file_name, file_contents):
"""
Overwrite the contents of a file.
:param file_name: path to the file to write.
:param file_contents: string containing new file contents.
:return: None
"""
with open(file_name, 'w') as myfile:
myfile.write(file_contents)
| 24.72449 | 66 | 0.626083 |
import os
import re
import sys
class BaseStringScript:
STATE_SEARCHING='STATE_SEARCHING'
STATE_IN_STR='STATE_IN_STR'
STATE_IN_PLUR='STATE_IN_PLUR'
TYPE_STR='TYPE_STR'
TYPE_PLUR='TYPE_PLUR'
START_STR = '<string'
END_STR = '</string'
START_PLUR='<plurals'
END_PLUR = '</plurals'
def ProcessTag(self, line, type):
return line
def ProcessFile(self, file_name):
lines = []
state = self.STATE_SEARCHING
curr_tag = []
pending_process_type = None
with open(file_name, 'r') as myfile:
data = myfile.read()
for line in data.split('\n'):
if state == self.STATE_SEARCHING:
if self.START_STR in line:
state = self.STATE_IN_STR
elif self.START_PLUR in line:
state = self.STATE_IN_PLUR
else:
lines.append(line)
if state == self.STATE_IN_STR:
curr_tag.append(line)
if self.END_STR in line:
pending_process_type = self.TYPE_STR
if state == self.STATE_IN_PLUR:
curr_tag.append(line)
if self.END_PLUR in line:
pending_process_type = self.TYPE_PLUR
if pending_process_type:
lines += self.ProcessTag(curr_tag, pending_process_type)
pending_process_type = None
state = self.STATE_SEARCHING
curr_tag = []
self.WriteFile(file_name, '\n'.join(lines))
def WriteFile(self, file_name, file_contents):
with open(file_name, 'w') as myfile:
myfile.write(file_contents)
| true | true |
7901dce134864cb1364aac3ebffdeef195dbae26 | 1,855 | py | Python | opencv/pycv_tutorial/color_space.py | OYukiya/PyIntroduction | 4e799cacb858823ee7fcae9e38e2279c88daa7a7 | [
"MIT"
] | 31 | 2017-07-12T08:21:30.000Z | 2021-05-20T04:07:32.000Z | opencv/pycv_tutorial/color_space.py | szkny/PyIntroduction | 433142b25de36552867b209649b17113ca2e11c6 | [
"MIT"
] | null | null | null | opencv/pycv_tutorial/color_space.py | szkny/PyIntroduction | 433142b25de36552867b209649b17113ca2e11c6 | [
"MIT"
] | 10 | 2017-01-24T23:34:35.000Z | 2021-04-25T11:55:31.000Z |
# -*- coding: utf-8 -*-
## @package pycv_tutorial.color_space
#
# 画像処理: 色空間の変換
# @author tody
# @date 2016/06/27
import cv2
import matplotlib.pyplot as plt
# RGB画像の表示
def showImageRGB(image_file):
image_bgr = cv2.imread(image_file)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
plt.title('RGB')
plt.imshow(image_rgb)
plt.axis('off')
plt.show()
# グレースケール画像の表示
def showImageGray(image_file):
image_gray = cv2.imread(image_file, 0)
plt.title('Gray')
plt.gray()
plt.imshow(image_gray)
plt.axis('off')
plt.show()
# HSVチャンネルの表示
def showImageHSV(image_file):
image_bgr = cv2.imread(image_file)
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
H = image_hsv[:, :, 0]
S = image_hsv[:, :, 1]
V = image_hsv[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('Hue')
plt.gray()
plt.imshow(H)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Saturation')
plt.gray()
plt.imshow(S)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Value')
plt.gray()
plt.imshow(V)
plt.axis('off')
plt.show()
# Labチャンネルの表示
def showImageLab(image_file):
image_bgr = cv2.imread(image_file)
image_Lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2LAB)
L = image_Lab[:, :, 0]
a = image_Lab[:, :, 1]
b = image_Lab[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('L')
plt.gray()
plt.imshow(L)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('a')
plt.gray()
plt.imshow(a)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('b')
plt.gray()
plt.imshow(b)
plt.axis('off')
plt.show()
if __name__ == '__main__':
image_file = "images/peppers.png"
showImageRGB(image_file)
showImageGray(image_file)
showImageHSV(image_file)
showImageLab(image_file) | 19.123711 | 58 | 0.60593 |
yplot as plt
def showImageRGB(image_file):
image_bgr = cv2.imread(image_file)
image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
plt.title('RGB')
plt.imshow(image_rgb)
plt.axis('off')
plt.show()
def showImageGray(image_file):
image_gray = cv2.imread(image_file, 0)
plt.title('Gray')
plt.gray()
plt.imshow(image_gray)
plt.axis('off')
plt.show()
def showImageHSV(image_file):
image_bgr = cv2.imread(image_file)
image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)
H = image_hsv[:, :, 0]
S = image_hsv[:, :, 1]
V = image_hsv[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('Hue')
plt.gray()
plt.imshow(H)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('Saturation')
plt.gray()
plt.imshow(S)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('Value')
plt.gray()
plt.imshow(V)
plt.axis('off')
plt.show()
def showImageLab(image_file):
image_bgr = cv2.imread(image_file)
image_Lab = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2LAB)
L = image_Lab[:, :, 0]
a = image_Lab[:, :, 1]
b = image_Lab[:, :, 2]
plt.subplot(1, 3, 1)
plt.title('L')
plt.gray()
plt.imshow(L)
plt.axis('off')
plt.subplot(1, 3, 2)
plt.title('a')
plt.gray()
plt.imshow(a)
plt.axis('off')
plt.subplot(1, 3, 3)
plt.title('b')
plt.gray()
plt.imshow(b)
plt.axis('off')
plt.show()
if __name__ == '__main__':
image_file = "images/peppers.png"
showImageRGB(image_file)
showImageGray(image_file)
showImageHSV(image_file)
showImageLab(image_file) | true | true |
7901dd2dffe1d105536f36bbc2799fcafdabae8e | 3,493 | py | Python | scripts/sample_quests.py | JohnnySun8/TextWorld | 9a54e9d642f7605a0f3ebba3285cdd04047975e2 | [
"MIT"
] | 307 | 2019-05-07T01:51:55.000Z | 2022-03-31T19:35:47.000Z | scripts/sample_quests.py | JohnnySun8/TextWorld | 9a54e9d642f7605a0f3ebba3285cdd04047975e2 | [
"MIT"
] | 84 | 2019-05-08T14:24:36.000Z | 2022-03-31T14:35:16.000Z | scripts/sample_quests.py | JohnnySun8/TextWorld | 9a54e9d642f7605a0f3ebba3285cdd04047975e2 | [
"MIT"
] | 70 | 2019-05-21T21:36:56.000Z | 2022-02-28T12:04:27.000Z | #!/usr/bin/env python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import os
import argparse
from os.path import join as pjoin
import numpy as np
import networkx as nx
from textworld.render import visualize
from textworld.generator import Game
from textworld.generator.inform7 import Inform7Game
from textworld.generator.chaining import ChainingOptions
from textworld.generator.chaining import sample_quest
from textworld.utils import save_graph_to_svg
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("game",
help="Use initial state of the provided game.")
parser.add_argument("--output", default="./",
help="Output folder where to sample the images. Default: %(default)s")
parser.add_argument("--quest-length", type=int, default=5,
help="Minimum nb. of actions required to complete the quest. Default: %(default)s")
parser.add_argument("--quest-breadth", type=int, default=1,
help="Control how non-linear a quest can be.")
parser.add_argument("--nb-quests", type=int, default=10,
help="Number of quests to sample. Default: %(default)s")
parser.add_argument("--seed", type=int,
help="Seed for random generator. Default: always different.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print more information.")
return parser.parse_args()
def build_tree_from_chains(chains, inform7):
G = nx.DiGraph()
root = "root"
labels = {}
for chain in chains:
commands = [root] + inform7.gen_commands_from_actions(chain.actions)
G.add_nodes_from(commands)
G.add_edges_from(zip(commands[:-1], commands[1:]))
labels.update(dict(zip(commands, commands)))
return G, labels
def print_chains(chains, inform7):
for i, chain in enumerate(chains):
commands = inform7.gen_commands_from_actions(chain.actions)
print("{:2d}. {}".format(i + 1, " > ".join(commands)))
def main():
args = parse_args()
# Load game for which to sample quests for.
game = Game.load(args.game.replace(".ulx", ".json"))
options = ChainingOptions()
options.backward = False
options.max_depth = args.quest_length
options.max_breadth = args.quest_breadth
options.rules_per_depth = {}
options.create_variables = False
options.rng = np.random.RandomState(args.seed)
# Sample quests.
chains = []
for i in range(args.nb_quests):
chain = sample_quest(game.world.state, options)
chains.append(chain)
inform7 = Inform7Game(game)
print_chains(chains, inform7)
# Convert chains to networkx graph/tree
filename_world = pjoin(args.output, "sample_world.png")
filename_tree = pjoin(args.output, "sample_tree.svg")
filename_graph = pjoin(args.output, "sample_graph.svg")
G, labels = build_tree_from_chains(chains, inform7)
if len(G) > 0:
image = visualize(game)
image.save(filename_world)
tree = nx.bfs_tree(G, "root")
save_graph_to_svg(tree, labels, filename_tree)
save_graph_to_svg(G, labels, filename_graph)
else:
try:
os.remove(filename_world)
os.remove(filename_tree)
os.remove(filename_graph)
except OSError:
pass
if __name__ == "__main__":
main()
| 32.95283 | 107 | 0.658746 |
import os
import argparse
from os.path import join as pjoin
import numpy as np
import networkx as nx
from textworld.render import visualize
from textworld.generator import Game
from textworld.generator.inform7 import Inform7Game
from textworld.generator.chaining import ChainingOptions
from textworld.generator.chaining import sample_quest
from textworld.utils import save_graph_to_svg
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("game",
help="Use initial state of the provided game.")
parser.add_argument("--output", default="./",
help="Output folder where to sample the images. Default: %(default)s")
parser.add_argument("--quest-length", type=int, default=5,
help="Minimum nb. of actions required to complete the quest. Default: %(default)s")
parser.add_argument("--quest-breadth", type=int, default=1,
help="Control how non-linear a quest can be.")
parser.add_argument("--nb-quests", type=int, default=10,
help="Number of quests to sample. Default: %(default)s")
parser.add_argument("--seed", type=int,
help="Seed for random generator. Default: always different.")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print more information.")
return parser.parse_args()
def build_tree_from_chains(chains, inform7):
G = nx.DiGraph()
root = "root"
labels = {}
for chain in chains:
commands = [root] + inform7.gen_commands_from_actions(chain.actions)
G.add_nodes_from(commands)
G.add_edges_from(zip(commands[:-1], commands[1:]))
labels.update(dict(zip(commands, commands)))
return G, labels
def print_chains(chains, inform7):
for i, chain in enumerate(chains):
commands = inform7.gen_commands_from_actions(chain.actions)
print("{:2d}. {}".format(i + 1, " > ".join(commands)))
def main():
args = parse_args()
game = Game.load(args.game.replace(".ulx", ".json"))
options = ChainingOptions()
options.backward = False
options.max_depth = args.quest_length
options.max_breadth = args.quest_breadth
options.rules_per_depth = {}
options.create_variables = False
options.rng = np.random.RandomState(args.seed)
chains = []
for i in range(args.nb_quests):
chain = sample_quest(game.world.state, options)
chains.append(chain)
inform7 = Inform7Game(game)
print_chains(chains, inform7)
filename_world = pjoin(args.output, "sample_world.png")
filename_tree = pjoin(args.output, "sample_tree.svg")
filename_graph = pjoin(args.output, "sample_graph.svg")
G, labels = build_tree_from_chains(chains, inform7)
if len(G) > 0:
image = visualize(game)
image.save(filename_world)
tree = nx.bfs_tree(G, "root")
save_graph_to_svg(tree, labels, filename_tree)
save_graph_to_svg(G, labels, filename_graph)
else:
try:
os.remove(filename_world)
os.remove(filename_tree)
os.remove(filename_graph)
except OSError:
pass
if __name__ == "__main__":
main()
| true | true |
7901dd5186e46232ff6fef91c7950bde9a4ca0c1 | 2,374 | py | Python | torch_cluster/fps.py | zuru/pytorch_cluster | 442e8d9c8cec0c7621966dc45f9f7dd151209044 | [
"MIT"
] | 522 | 2018-01-13T13:20:39.000Z | 2022-03-30T08:59:13.000Z | torch_cluster/fps.py | zuru/pytorch_cluster | 442e8d9c8cec0c7621966dc45f9f7dd151209044 | [
"MIT"
] | 116 | 2018-04-09T11:57:10.000Z | 2022-03-28T07:56:26.000Z | torch_cluster/fps.py | zuru/pytorch_cluster | 442e8d9c8cec0c7621966dc45f9f7dd151209044 | [
"MIT"
] | 104 | 2018-02-11T13:57:09.000Z | 2022-03-22T11:24:15.000Z | from typing import Optional
import torch
from torch import Tensor
@torch.jit._overload # noqa
def fps(src, batch=None, ratio=None, random_start=True): # noqa
# type: (Tensor, Optional[Tensor], Optional[float], bool) -> Tensor
pass # pragma: no cover
@torch.jit._overload # noqa
def fps(src, batch=None, ratio=None, random_start=True): # noqa
# type: (Tensor, Optional[Tensor], Optional[Tensor], bool) -> Tensor
pass # pragma: no cover
def fps(src: torch.Tensor, batch=None, ratio=None, random_start=True): # noqa
r""""A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature
Learning on Point Sets in a Metric Space"
<https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the
most distant point with regard to the rest points.
Args:
src (Tensor): Point feature matrix
:math:`\mathbf{X} \in \mathbb{R}^{N \times F}`.
batch (LongTensor, optional): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example. (default: :obj:`None`)
ratio (float or Tensor, optional): Sampling ratio.
(default: :obj:`0.5`)
random_start (bool, optional): If set to :obj:`False`, use the first
node in :math:`\mathbf{X}` as starting node. (default: obj:`True`)
:rtype: :class:`LongTensor`
.. code-block:: python
import torch
from torch_cluster import fps
src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])
batch = torch.tensor([0, 0, 0, 0])
index = fps(src, batch, ratio=0.5)
"""
r: Optional[Tensor] = None
if ratio is None:
r = torch.tensor(0.5, dtype=src.dtype, device=src.device)
elif isinstance(ratio, float):
r = torch.tensor(ratio, dtype=src.dtype, device=src.device)
else:
r = ratio
assert r is not None
if batch is not None:
assert src.size(0) == batch.numel()
batch_size = int(batch.max()) + 1
deg = src.new_zeros(batch_size, dtype=torch.long)
deg.scatter_add_(0, batch, torch.ones_like(batch))
ptr = deg.new_zeros(batch_size + 1)
torch.cumsum(deg, 0, out=ptr[1:])
else:
ptr = torch.tensor([0, src.size(0)], device=src.device)
return torch.ops.torch_cluster.fps(src, ptr, r, random_start)
| 33.43662 | 78 | 0.617102 | from typing import Optional
import torch
from torch import Tensor
@torch.jit._overload
def fps(src, batch=None, ratio=None, random_start=True):
pass
@torch.jit._overload
def fps(src, batch=None, ratio=None, random_start=True):
pass
def fps(src: torch.Tensor, batch=None, ratio=None, random_start=True):
r: Optional[Tensor] = None
if ratio is None:
r = torch.tensor(0.5, dtype=src.dtype, device=src.device)
elif isinstance(ratio, float):
r = torch.tensor(ratio, dtype=src.dtype, device=src.device)
else:
r = ratio
assert r is not None
if batch is not None:
assert src.size(0) == batch.numel()
batch_size = int(batch.max()) + 1
deg = src.new_zeros(batch_size, dtype=torch.long)
deg.scatter_add_(0, batch, torch.ones_like(batch))
ptr = deg.new_zeros(batch_size + 1)
torch.cumsum(deg, 0, out=ptr[1:])
else:
ptr = torch.tensor([0, src.size(0)], device=src.device)
return torch.ops.torch_cluster.fps(src, ptr, r, random_start)
| true | true |
7901dde09c5e17fb0a0f128f94e07d7d1cde44cf | 2,899 | py | Python | vizier/api/client/cli/command.py | VizierDB/web-api-async | e99f43df3df80ad5647f57d805c339257336ac73 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-10-21T03:01:39.000Z | 2020-06-05T01:43:00.000Z | vizier/api/client/cli/command.py | VizierDB/web-api-async | e99f43df3df80ad5647f57d805c339257336ac73 | [
"ECL-2.0",
"Apache-2.0"
] | 56 | 2019-07-12T21:16:03.000Z | 2020-11-06T23:29:22.000Z | vizier/api/client/cli/command.py | VizierDB/web-api-async | e99f43df3df80ad5647f57d805c339257336ac73 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-02-07T19:56:55.000Z | 2020-08-07T11:17:51.000Z | # Copyright (C) 2017-2019 New York University,
# University at Buffalo,
# Illinois Institute of Technology.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for interpreter commands. Each command has to implement two
methods:
- eval(list(string)): Given a list of tokens check whether the tokens reference
the given command. If yes, evaluate the command and return True. Otherwise,
return False.
- help: Print a simple help statement
"""
from abc import abstractmethod
from typing import List
class Command(object):
"""Abstract class for interpreter commands."""
@abstractmethod
def eval(self, tokens: List[str]) -> bool:
"""If the given tokens sequence matches the given command execute it
and return True. Otherwise, return False.
Parameters
----------
tokens: list(string)
List of tokens in the command line
Returns
-------
bool
"""
raise NotImplementedError()
@abstractmethod
def help(self) -> None:
"""Print a simple help statement for the command."""
raise NotImplementedError()
def output(self, rows):
"""Output the given rows in tabular format. Each rows is a list of
string values. All rows are expected to have the sam elength. The first
row is the table header.
Parameters
----------
rows: list(string)
List of rows in the table
"""
# Determine the longest value for each column.
columns = [0] * len(rows[0])
for row in rows:
for col in range(len(columns)):
count = len(row[col])
if count > columns[col]:
columns[col] = count
# Create format string
format = None
divider = list()
for col_len in columns:
f = '%-' + str(col_len) + 's'
if format is None:
format = f
else:
format += ' | ' + f
if len(divider) in [0, len(columns) - 1]:
i = 1
else:
i = 2
divider.append('-' * (col_len + i))
# Print fomrated rows
print(format % tuple(rows[0]))
print('|'.join(divider))
for row in rows[1:]:
print(format % tuple(row))
| 32.943182 | 79 | 0.588134 |
from abc import abstractmethod
from typing import List
class Command(object):
@abstractmethod
def eval(self, tokens: List[str]) -> bool:
raise NotImplementedError()
@abstractmethod
def help(self) -> None:
raise NotImplementedError()
def output(self, rows):
columns = [0] * len(rows[0])
for row in rows:
for col in range(len(columns)):
count = len(row[col])
if count > columns[col]:
columns[col] = count
format = None
divider = list()
for col_len in columns:
f = '%-' + str(col_len) + 's'
if format is None:
format = f
else:
format += ' | ' + f
if len(divider) in [0, len(columns) - 1]:
i = 1
else:
i = 2
divider.append('-' * (col_len + i))
print(format % tuple(rows[0]))
print('|'.join(divider))
for row in rows[1:]:
print(format % tuple(row))
| true | true |
7901e0e13cad3880c17746b9aae87b01553ab090 | 33,981 | py | Python | mmedit/models/inpaintors/vic/common.py | f74066357/Image_Inpainting | 1c89cdadcf420633d29136c8bdcbd280f2546769 | [
"Apache-2.0"
] | null | null | null | mmedit/models/inpaintors/vic/common.py | f74066357/Image_Inpainting | 1c89cdadcf420633d29136c8bdcbd280f2546769 | [
"Apache-2.0"
] | null | null | null | mmedit/models/inpaintors/vic/common.py | f74066357/Image_Inpainting | 1c89cdadcf420633d29136c8bdcbd280f2546769 | [
"Apache-2.0"
] | 2 | 2021-09-07T05:21:18.000Z | 2021-09-17T22:34:54.000Z | """
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)
| 38.010067 | 184 | 0.613872 |
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
from .colors import *
NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
g.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
d tar_type == 'gray':
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB':
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB':
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y':
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB':
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
def ycbcr2rgb(img, only_y=True):
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def modcrop(img_in, scale):
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def augment(img_list, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1)
if vflip: img = np.flip(img, axis=0)
if rot90: img = np.rot90(img, 1) mg) for img in img_list]
.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
.max
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float()
if bgr2rgb:
if img.shape[0] == 3:
img = bgr_to_rgb(img)
elif img.shape[0] == 4:
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0)
if normalize:
img = norm(img)
return img
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
img = img[0,...]
else:
img = img.squeeze()
if len(img.shape) < 3:
img = img.unsqueeze(dim=0)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr:
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr:
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
img_np = np.clip(data_range*img_np,0,data_range).round()
return img_np.astype(imtype)
_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
kernel_width = kernel_width / scale
x = torch.linspace(1, out_length, out_length)
u = x / scale + 0.5 * (1 - 1 / scale)
left = torch.floor(u - kernel_width / 2)
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)
| true | true |
7901e150a7ff46090793627452f856bd146bba06 | 21,927 | py | Python | pandas/core/construction.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T17:32:26.000Z | 2020-10-29T17:32:26.000Z | pandas/core/construction.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | null | null | null | pandas/core/construction.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | 1 | 2022-03-08T15:07:11.000Z | 2022-03-08T15:07:11.000Z | """
Constructor functions intended to be shared by pd.array, Series.__init__,
and Index.__new__.
These should not depend on core.internals.
"""
from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj
from pandas.core.dtypes.base import ExtensionDtype, registry
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
if TYPE_CHECKING:
from pandas import ExtensionArray, Index, Series
def array(
data: Union[Sequence[object], AnyArrayLike],
dtype: Optional[Dtype] = None,
copy: bool = True,
) -> ExtensionArray:
"""
Create an array.
.. versionadded:: 0.24.0
Parameters
----------
data : Sequence of objects
The scalars inside `data` should be instances of the
scalar type for `dtype`. It's expected that `data`
represents a 1-dimensional array of data.
When `data` is an Index or Series, the underlying array
will be extracted from `data`.
dtype : str, np.dtype, or ExtensionDtype, optional
The dtype to use for the array. This may be a NumPy
dtype or an extension type registered with pandas using
:meth:`pandas.api.extensions.register_extension_dtype`.
If not specified, there are two possibilities:
1. When `data` is a :class:`Series`, :class:`Index`, or
:class:`ExtensionArray`, the `dtype` will be taken
from the data.
2. Otherwise, pandas will attempt to infer the `dtype`
from the data.
Note that when `data` is a NumPy array, ``data.dtype`` is
*not* used for inferring the array type. This is because
NumPy cannot represent all the types of data that can be
held in extension arrays.
Currently, pandas will infer an extension dtype for sequences of
============================== =====================================
Scalar Type Array Type
============================== =====================================
:class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`
:class:`pandas.Period` :class:`pandas.arrays.PeriodArray`
:class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`
:class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`
:class:`int` :class:`pandas.arrays.IntegerArray`
:class:`float` :class:`pandas.arrays.FloatingArray`
:class:`str` :class:`pandas.arrays.StringArray`
:class:`bool` :class:`pandas.arrays.BooleanArray`
============================== =====================================
For all other cases, NumPy's usual inference rules will be used.
.. versionchanged:: 1.0.0
Pandas infers nullable-integer dtype for integer data,
string dtype for string data, and nullable-boolean dtype
for boolean data.
.. versionchanged:: 1.2.0
Pandas now also infers nullable-floating dtype for float-like
input data
copy : bool, default True
Whether to copy the data, even if not necessary. Depending
on the type of `data`, creating the new array may require
copying data, even if ``copy=False``.
Returns
-------
ExtensionArray
The newly created array.
Raises
------
ValueError
When `data` is not 1-dimensional.
See Also
--------
numpy.array : Construct a NumPy array.
Series : Construct a pandas Series.
Index : Construct a pandas Index.
arrays.PandasArray : ExtensionArray wrapping a NumPy array.
Series.array : Extract the array stored within a Series.
Notes
-----
Omitting the `dtype` argument means pandas will attempt to infer the
best array type from the values in the data. As new array types are
added by pandas and 3rd party libraries, the "best" array type may
change. We recommend specifying `dtype` to ensure that
1. the correct array type for the data is returned
2. the returned array type doesn't change as new extension types
are added by pandas and third-party libraries
Additionally, if the underlying memory representation of the returned
array matters, we recommend specifying the `dtype` as a concrete object
rather than a string alias or allowing it to be inferred. For example,
a future version of pandas or a 3rd-party library may include a
dedicated ExtensionArray for string data. In this event, the following
would no longer return a :class:`arrays.PandasArray` backed by a NumPy
array.
>>> pd.array(['a', 'b'], dtype=str)
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
This would instead return the new ExtensionArray dedicated for string
data. If you really need the new array to be backed by a NumPy array,
specify that in the dtype.
>>> pd.array(['a', 'b'], dtype=np.dtype("<U1"))
<PandasArray>
['a', 'b']
Length: 2, dtype: str32
Finally, Pandas has arrays that mostly overlap with NumPy
* :class:`arrays.DatetimeArray`
* :class:`arrays.TimedeltaArray`
When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is
passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``
rather than a ``PandasArray``. This is for symmetry with the case of
timezone-aware data, which NumPy does not natively support.
>>> pd.array(['2015', '2016'], dtype='datetime64[ns]')
<DatetimeArray>
['2015-01-01 00:00:00', '2016-01-01 00:00:00']
Length: 2, dtype: datetime64[ns]
>>> pd.array(["1H", "2H"], dtype='timedelta64[ns]')
<TimedeltaArray>
['0 days 01:00:00', '0 days 02:00:00']
Length: 2, dtype: timedelta64[ns]
Examples
--------
If a dtype is not specified, pandas will infer the best dtype from the values.
See the description of `dtype` for the types pandas infers for.
>>> pd.array([1, 2])
<IntegerArray>
[1, 2]
Length: 2, dtype: Int64
>>> pd.array([1, 2, np.nan])
<IntegerArray>
[1, 2, <NA>]
Length: 3, dtype: Int64
>>> pd.array([1.1, 2.2])
<FloatingArray>
[1.1, 2.2]
Length: 2, dtype: Float64
>>> pd.array(["a", None, "c"])
<StringArray>
['a', <NA>, 'c']
Length: 3, dtype: string
>>> pd.array([pd.Period('2000', freq="D"), pd.Period("2000", freq="D")])
<PeriodArray>
['2000-01-01', '2000-01-01']
Length: 2, dtype: period[D]
You can use the string alias for `dtype`
>>> pd.array(['a', 'b', 'a'], dtype='category')
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Or specify the actual dtype
>>> pd.array(['a', 'b', 'a'],
... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))
['a', 'b', 'a']
Categories (3, object): ['a' < 'b' < 'c']
If pandas does not infer a dedicated extension type a
:class:`arrays.PandasArray` is returned.
>>> pd.array([1 + 1j, 3 + 2j])
<PandasArray>
[(1+1j), (3+2j)]
Length: 2, dtype: complex128
As mentioned in the "Notes" section, new extension types may be added
in the future (by pandas or 3rd party libraries), causing the return
value to no longer be a :class:`arrays.PandasArray`. Specify the `dtype`
as a NumPy dtype if you need to ensure there's no future change in
behavior.
>>> pd.array([1, 2], dtype=np.dtype("int32"))
<PandasArray>
[1, 2]
Length: 2, dtype: int32
`data` must be 1-dimensional. A ValueError is raised when the input
has the wrong dimensionality.
>>> pd.array(1)
Traceback (most recent call last):
...
ValueError: Cannot pass scalar '1' to 'pandas.array'.
"""
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
FloatingArray,
IntegerArray,
IntervalArray,
PandasArray,
StringArray,
TimedeltaArray,
period_array,
)
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
if dtype is None and isinstance(
data, (ABCSeries, ABCIndexClass, ABCExtensionArray)
):
dtype = data.dtype
data = extract_array(data, extract_numpy=True)
# this returns None for not-found dtypes.
if isinstance(dtype, str):
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
except IncompatibleFrequency:
# We may have a mixture of frequencies.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype == "interval":
try:
return IntervalArray(data, copy=copy)
except ValueError:
# We may have a mixture of `closed` here.
# We choose to return an ndarray, rather than raising.
pass
elif inferred_dtype.startswith("datetime"):
# datetime, datetime64
try:
return DatetimeArray._from_sequence(data, copy=copy)
except ValueError:
# Mixture of timezones, fall back to PandasArray
pass
elif inferred_dtype.startswith("timedelta"):
# timedelta, timedelta64
return TimedeltaArray._from_sequence(data, copy=copy)
elif inferred_dtype == "string":
return StringArray._from_sequence(data, copy=copy)
elif inferred_dtype == "integer":
return IntegerArray._from_sequence(data, copy=copy)
elif inferred_dtype in ("floating", "mixed-integer-float"):
return FloatingArray._from_sequence(data, copy=copy)
elif inferred_dtype == "boolean":
return BooleanArray._from_sequence(data, copy=copy)
# Pandas overrides NumPy for
# 1. datetime64[ns]
# 2. timedelta64[ns]
# so that a DatetimeArray is returned.
if is_datetime64_ns_dtype(dtype):
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
elif is_timedelta64_ns_dtype(dtype):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike:
"""
Extract the ndarray or ExtensionArray from a Series or Index.
For all other types, `obj` is just returned as is.
Parameters
----------
obj : object
For Series / Index, the underlying ExtensionArray is unboxed.
For Numpy-backed ExtensionArrays, the ndarray is extracted.
extract_numpy : bool, default False
Whether to extract the ndarray from a PandasArray
Returns
-------
arr : object
Examples
--------
>>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category'))
['a', 'b', 'c']
Categories (3, object): ['a', 'b', 'c']
Other objects like lists, arrays, and DataFrames are just passed through.
>>> extract_array([1, 2, 3])
[1, 2, 3]
For an ndarray-backed Series / Index a PandasArray is returned.
>>> extract_array(pd.Series([1, 2, 3]))
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
To extract all the way down to the ndarray, pass ``extract_numpy=True``.
>>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True)
array([1, 2, 3])
"""
if isinstance(obj, (ABCIndexClass, ABCSeries)):
obj = obj.array
if extract_numpy and isinstance(obj, ABCPandasArray):
obj = obj.to_numpy()
# error: Incompatible return value type (got "Index", expected "ExtensionArray")
# error: Incompatible return value type (got "Series", expected "ExtensionArray")
return obj # type: ignore[return-value]
def sanitize_array(
data,
index: Optional[Index],
dtype: Optional[DtypeObj] = None,
copy: bool = False,
raise_cast_failure: bool = False,
) -> ArrayLike:
"""
Sanitize input data to an ndarray or ExtensionArray, copy if specified,
coerce to the dtype if specified.
"""
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
# extract ndarray or ExtensionArray, ensure we have no PandasArray
data = extract_array(data, extract_numpy=True)
# GH#846
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
# possibility of nan -> garbage
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
# we will try to copy be-definition here
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ABCExtensionArray):
# it is already ensured above this is not a PandasArray
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0:
if isinstance(data, set):
# Raise only for unordered sets, e.g., not for dict_keys
raise TypeError("Set type is unordered")
data = list(data)
if dtype is not None:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
# GH#16804
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
elif lib.is_scalar(data) and index is not None and dtype is not None:
data = maybe_cast_to_datetime(data, dtype)
if not lib.is_scalar(data):
data = data[0]
subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
# scalar like, GH
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# figure out the dtype from the value (upcast if necessary)
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
# need to possibly convert the value here
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
# the result that we want
elif subarr.ndim == 1:
if index is not None:
# a 1-element ndarray
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, str):
# GH#16605
# If not empty convert the data to dtype
# GH#19853: If data is a scalar, subarr has already the result
if not lib.is_scalar(data):
if not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype)
if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype:
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred in {"interval", "period"}:
subarr = array(subarr)
return subarr
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
"""
Convert input to numpy ndarray and optionally cast to a given dtype.
Parameters
----------
arr : ndarray, scalar, list, tuple, iterator (catchall)
Excludes: ExtensionArray, Series, Index.
dtype : np.dtype, ExtensionDtype or None
copy : bool
If False, don't copy the data if not needed.
raise_cast_failure : bool
If True, and if a dtype is specified, raise errors during casting.
Otherwise an object array is returned.
"""
# perf shortcut as this is the most common case
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
# create an extension array from its dtype
# DatetimeTZ case needs to go through maybe_cast_to_datetime but
# SparseDtype does not
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
try:
# GH#15832: Check if we are requesting a numeric dtype and
# that we can convert the data to the requested dtype.
if is_integer_dtype(dtype):
# this will raise if we have e.g. floats
maybe_cast_to_integer_array(arr, dtype)
subarr = arr
else:
subarr = maybe_cast_to_datetime(arr, dtype)
# Take care in creating object arrays (but iterators are not
# supported):
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_array_dtype(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
# in case of out of bound datetime64 -> always raise
raise
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
def is_empty_data(data: Any) -> bool:
"""
Utility to check if a Series is instantiated with empty data,
which does not contain dtype information.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
Returns
-------
bool
"""
is_none = data is None
is_list_like_without_dtype = is_list_like(data) and not hasattr(data, "dtype")
is_simple_empty = is_list_like_without_dtype and not data
return is_none or is_simple_empty
def create_series_with_explicit_dtype(
data: Any = None,
index: Optional[Union[ArrayLike, Index]] = None,
dtype: Optional[Dtype] = None,
name: Optional[str] = None,
copy: bool = False,
fastpath: bool = False,
dtype_if_empty: Dtype = object,
) -> Series:
"""
Helper to pass an explicit dtype when instantiating an empty Series.
This silences a DeprecationWarning described in GitHub-17261.
Parameters
----------
data : Mirrored from Series.__init__
index : Mirrored from Series.__init__
dtype : Mirrored from Series.__init__
name : Mirrored from Series.__init__
copy : Mirrored from Series.__init__
fastpath : Mirrored from Series.__init__
dtype_if_empty : str, numpy.dtype, or ExtensionDtype
This dtype will be passed explicitly if an empty Series will
be instantiated.
Returns
-------
Series
"""
from pandas.core.series import Series
if is_empty_data(data) and dtype is None:
dtype = dtype_if_empty
return Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
| 33.578867 | 88 | 0.626123 | from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Optional, Sequence, Union, cast
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime
from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj
from pandas.core.dtypes.base import ExtensionDtype, registry
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
construct_1d_object_array_from_listlike,
infer_dtype_from_scalar,
maybe_cast_to_datetime,
maybe_cast_to_integer_array,
maybe_castable,
maybe_convert_platform,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_datetime64_ns_dtype,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_iterator,
is_list_like,
is_object_dtype,
is_sparse,
is_string_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.generic import (
ABCExtensionArray,
ABCIndexClass,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
import pandas.core.common as com
if TYPE_CHECKING:
from pandas import ExtensionArray, Index, Series
def array(
data: Union[Sequence[object], AnyArrayLike],
dtype: Optional[Dtype] = None,
copy: bool = True,
) -> ExtensionArray:
from pandas.core.arrays import (
BooleanArray,
DatetimeArray,
FloatingArray,
IntegerArray,
IntervalArray,
PandasArray,
StringArray,
TimedeltaArray,
period_array,
)
if lib.is_scalar(data):
msg = f"Cannot pass scalar '{data}' to 'pandas.array'."
raise ValueError(msg)
if dtype is None and isinstance(
data, (ABCSeries, ABCIndexClass, ABCExtensionArray)
):
dtype = data.dtype
data = extract_array(data, extract_numpy=True)
if isinstance(dtype, str):
dtype = registry.find(dtype) or dtype
if is_extension_array_dtype(dtype):
cls = cast(ExtensionDtype, dtype).construct_array_type()
return cls._from_sequence(data, dtype=dtype, copy=copy)
if dtype is None:
inferred_dtype = lib.infer_dtype(data, skipna=True)
if inferred_dtype == "period":
try:
return period_array(data, copy=copy)
except IncompatibleFrequency:
pass
elif inferred_dtype == "interval":
try:
return IntervalArray(data, copy=copy)
except ValueError:
pass
elif inferred_dtype.startswith("datetime"):
try:
return DatetimeArray._from_sequence(data, copy=copy)
except ValueError:
pass
elif inferred_dtype.startswith("timedelta"):
return TimedeltaArray._from_sequence(data, copy=copy)
elif inferred_dtype == "string":
return StringArray._from_sequence(data, copy=copy)
elif inferred_dtype == "integer":
return IntegerArray._from_sequence(data, copy=copy)
elif inferred_dtype in ("floating", "mixed-integer-float"):
return FloatingArray._from_sequence(data, copy=copy)
elif inferred_dtype == "boolean":
return BooleanArray._from_sequence(data, copy=copy)
if is_datetime64_ns_dtype(dtype):
return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy)
elif is_timedelta64_ns_dtype(dtype):
return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy)
result = PandasArray._from_sequence(data, dtype=dtype, copy=copy)
return result
def extract_array(obj: AnyArrayLike, extract_numpy: bool = False) -> ArrayLike:
if isinstance(obj, (ABCIndexClass, ABCSeries)):
obj = obj.array
if extract_numpy and isinstance(obj, ABCPandasArray):
obj = obj.to_numpy()
return obj
def sanitize_array(
data,
index: Optional[Index],
dtype: Optional[DtypeObj] = None,
copy: bool = False,
raise_cast_failure: bool = False,
) -> ArrayLike:
if isinstance(data, ma.MaskedArray):
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask()
data[mask] = fill_value
else:
data = data.copy()
data = extract_array(data, extract_numpy=True)
if isinstance(data, np.ndarray):
if dtype is not None and is_float_dtype(data.dtype) and is_integer_dtype(dtype):
try:
subarr = _try_cast(data, dtype, copy, True)
except ValueError:
if copy:
subarr = data.copy()
else:
subarr = np.array(data, copy=False)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
elif isinstance(data, ABCExtensionArray):
subarr = data
if dtype is not None:
subarr = subarr.astype(dtype, copy=copy)
elif copy:
subarr = subarr.copy()
return subarr
elif isinstance(data, (list, tuple, abc.Set, abc.ValuesView)) and len(data) > 0:
if isinstance(data, set):
raise TypeError("Set type is unordered")
data = list(data)
if dtype is not None:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
else:
subarr = maybe_convert_platform(data)
subarr = maybe_cast_to_datetime(subarr, dtype)
elif isinstance(data, range):
arr = np.arange(data.start, data.stop, data.step, dtype="int64")
subarr = _try_cast(arr, dtype, copy, raise_cast_failure)
elif lib.is_scalar(data) and index is not None and dtype is not None:
data = maybe_cast_to_datetime(data, dtype)
if not lib.is_scalar(data):
data = data[0]
subarr = construct_1d_arraylike_from_scalar(data, len(index), dtype)
else:
subarr = _try_cast(data, dtype, copy, raise_cast_failure)
if getattr(subarr, "ndim", 0) == 0:
if isinstance(data, list):
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
else:
value = maybe_cast_to_datetime(value, dtype)
subarr = construct_1d_arraylike_from_scalar(value, len(index), dtype)
else:
return subarr.item()
elif subarr.ndim == 1:
if index is not None:
if len(subarr) != len(index) and len(subarr) == 1:
subarr = construct_1d_arraylike_from_scalar(
subarr[0], len(index), subarr.dtype
)
elif subarr.ndim > 1:
if isinstance(data, np.ndarray):
raise ValueError("Data must be 1-dimensional")
else:
subarr = com.asarray_tuplesafe(data, dtype=dtype)
if not (is_extension_array_dtype(subarr.dtype) or is_extension_array_dtype(dtype)):
if issubclass(subarr.dtype.type, str):
f not np.all(isna(data)):
data = np.array(data, dtype=dtype, copy=False)
subarr = np.array(data, dtype=object, copy=copy)
is_object_or_str_dtype = is_object_dtype(dtype) or is_string_dtype(dtype)
if is_object_dtype(subarr.dtype) and not is_object_or_str_dtype:
inferred = lib.infer_dtype(subarr, skipna=False)
if inferred in {"interval", "period"}:
subarr = array(subarr)
return subarr
def _try_cast(arr, dtype: Optional[DtypeObj], copy: bool, raise_cast_failure: bool):
if isinstance(arr, np.ndarray):
if maybe_castable(arr) and not copy and dtype is None:
return arr
if isinstance(dtype, ExtensionDtype) and (dtype.kind != "M" or is_sparse(dtype)):
array_type = dtype.construct_array_type()._from_sequence
subarr = array_type(arr, dtype=dtype, copy=copy)
return subarr
try:
maybe_cast_to_integer_array(arr, dtype)
subarr = arr
else:
subarr = maybe_cast_to_datetime(arr, dtype)
if is_object_dtype(dtype) and (
is_list_like(subarr)
and not (is_iterator(subarr) or isinstance(subarr, np.ndarray))
):
subarr = construct_1d_object_array_from_listlike(subarr)
elif not is_extension_array_dtype(subarr):
subarr = construct_1d_ndarray_preserving_na(subarr, dtype, copy=copy)
except OutOfBoundsDatetime:
raise
except (ValueError, TypeError):
if dtype is not None and raise_cast_failure:
raise
else:
subarr = np.array(arr, dtype=object, copy=copy)
return subarr
def is_empty_data(data: Any) -> bool:
is_none = data is None
is_list_like_without_dtype = is_list_like(data) and not hasattr(data, "dtype")
is_simple_empty = is_list_like_without_dtype and not data
return is_none or is_simple_empty
def create_series_with_explicit_dtype(
data: Any = None,
index: Optional[Union[ArrayLike, Index]] = None,
dtype: Optional[Dtype] = None,
name: Optional[str] = None,
copy: bool = False,
fastpath: bool = False,
dtype_if_empty: Dtype = object,
) -> Series:
from pandas.core.series import Series
if is_empty_data(data) and dtype is None:
dtype = dtype_if_empty
return Series(
data=data, index=index, dtype=dtype, name=name, copy=copy, fastpath=fastpath
)
| true | true |
7901e1b6faf4a2c2a57f79c8da7a355cdd49b556 | 10,176 | py | Python | test/api_gw_test.py | fredliporace/cbers-2-stac | c92924b19289e0896ee676064250f9c8a758b674 | [
"Apache-2.0"
] | 11 | 2018-08-18T04:47:58.000Z | 2021-12-10T18:12:30.000Z | test/api_gw_test.py | fredliporace/cbers-2-stac | c92924b19289e0896ee676064250f9c8a758b674 | [
"Apache-2.0"
] | 71 | 2018-06-07T14:29:58.000Z | 2022-03-03T14:38:14.000Z | test/api_gw_test.py | fredliporace/cbers-2-stac | c92924b19289e0896ee676064250f9c8a758b674 | [
"Apache-2.0"
] | 1 | 2020-08-09T03:57:16.000Z | 2020-08-09T03:57:16.000Z | """api_gw_test"""
# Remove warnings when using pytest fixtures
# pylint: disable=redefined-outer-name
import json
from test.conftest import ENDPOINT_URL
# warning disabled, this is used as a pylint fixture
from test.elasticsearch_test import ( # pylint: disable=unused-import
es_client,
populate_es_test_case_1,
)
from urllib.parse import urlencode
import boto3
import pytest
import requests
def to_localstack_url(api_id: str, url: str):
"""
Converts a API GW url to localstack
"""
return url.replace("4566", f"4566/restapis/{api_id}").replace(
"dev", "dev/_user_request_"
)
def api_gw_lambda_integrate_deploy(
api_client,
api: dict,
api_resource: dict,
lambda_func: dict,
http_method: str = "GET",
) -> str:
"""
Integrate lambda with api gw method and deploy api.
Return the invokation URL
"""
lambda_integration_arn = (
"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
f"{lambda_func['FunctionArn']}/invocations"
)
api_client.put_integration(
restApiId=api["id"],
resourceId=api_resource["id"],
httpMethod=http_method,
type="AWS",
integrationHttpMethod="POST",
uri=lambda_integration_arn,
)
api_client.create_deployment(
restApiId=api["id"], stageName="dev",
)
return f"http://localhost:4566/restapis/{api['id']}/dev/_user_request_{api_resource['path']}"
@pytest.fixture
def api_gw_method(request):
"""api gw for testing"""
marker = request.node.get_closest_marker("api_gw_method_args")
put_method_args = marker.args[0]["put_method_args"]
put_method_response_args = marker.args[0]["put_method_response_args"]
api = None
def fin():
"""fixture finalizer"""
if api:
api_client.delete_rest_api(restApiId=api["id"])
# Hook teardown (finalizer) code
request.addfinalizer(fin)
api_client = boto3.client("apigateway", endpoint_url=ENDPOINT_URL)
api = api_client.create_rest_api(name="testapi")
root_resource_id = api_client.get_resources(restApiId=api["id"])["items"][0]["id"]
api_resource = api_client.create_resource(
restApiId=api["id"], parentId=root_resource_id, pathPart="test"
)
api_client.put_method(
restApiId=api["id"],
resourceId=api_resource["id"],
authorizationType="NONE",
**put_method_args,
)
api_client.put_method_response(
restApiId=api["id"],
resourceId=api_resource["id"],
statusCode="200",
**put_method_response_args,
)
return api_client, api, api_resource
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "stac_endpoint",
"handler": "code.handler",
"environment": {"CBERS_STAC_BUCKET": "bucket",},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_root(api_gw_method, lambda_function):
"""
test_root_endpoint
"""
# Based on
# https://stackoverflow.com/questions/58859917/creating-aws-lambda-integrated-api-gateway-resource-with-boto3
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
url = api_gw_lambda_integrate_deploy(api_client, api, api_resource, lambda_func)
req = requests.get(url)
assert req.status_code == 200
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_get(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals,too-many-statements
"""
test_item_search_get
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
# Empty GET, return all 2 items
original_url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func
)
req = requests.get(original_url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Single collection, return single item
url = f"{original_url}?collections=CBERS4-MUX"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["collection"] == "CBERS4-MUX"
# Two collections, return all items
url = f"{original_url}?collections=CBERS4-MUX,CBERS4-AWFI"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
# Paging, no next case
url = f"{original_url}"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
url = f"{original_url}?limit=1"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.get(next_href)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
url = f"{original_url}?ids=CBERS_4_MUX_20170528_090_084_L2"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# query extension
url = f"{original_url}?"
url += urlencode({"query": '{"cbers:data_type": {"eq":"L4"}}'})
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_AWFI_20170409_167_123_L4"
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "POST",},
"put_method_response_args": {"httpMethod": "POST",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_post(
api_gw_method, lambda_function, es_client
): # pylint: disable=too-many-locals
"""
test_item_search_post
"""
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function # pylint: disable=unused-variable
# ES_ENDPOINT is set by lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func, http_method="POST"
)
# POST with invalid bbox order, check error status code and message
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [160.6, -55.95, -170, -25.89],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 400, req.text
assert "First lon corner is not western" in req.text
# Same as above with fixed bbox
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [-170, -25.89, 160.6, -55.95],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 200, req.text
# Paging, no next case
req = requests.post(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
# Paging, next page
body = {"limit": 1}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.post(
next_href, data=json.dumps({**body, **fcol["links"][0]["body"]})
)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
# ids
body = {"ids": ["CBERS_4_MUX_20170528_090_084_L2"]}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
| 30.376119 | 113 | 0.622445 |
import json
from test.conftest import ENDPOINT_URL
from test.elasticsearch_test import (
es_client,
populate_es_test_case_1,
)
from urllib.parse import urlencode
import boto3
import pytest
import requests
def to_localstack_url(api_id: str, url: str):
return url.replace("4566", f"4566/restapis/{api_id}").replace(
"dev", "dev/_user_request_"
)
def api_gw_lambda_integrate_deploy(
api_client,
api: dict,
api_resource: dict,
lambda_func: dict,
http_method: str = "GET",
) -> str:
lambda_integration_arn = (
"arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/"
f"{lambda_func['FunctionArn']}/invocations"
)
api_client.put_integration(
restApiId=api["id"],
resourceId=api_resource["id"],
httpMethod=http_method,
type="AWS",
integrationHttpMethod="POST",
uri=lambda_integration_arn,
)
api_client.create_deployment(
restApiId=api["id"], stageName="dev",
)
return f"http://localhost:4566/restapis/{api['id']}/dev/_user_request_{api_resource['path']}"
@pytest.fixture
def api_gw_method(request):
marker = request.node.get_closest_marker("api_gw_method_args")
put_method_args = marker.args[0]["put_method_args"]
put_method_response_args = marker.args[0]["put_method_response_args"]
api = None
def fin():
if api:
api_client.delete_rest_api(restApiId=api["id"])
request.addfinalizer(fin)
api_client = boto3.client("apigateway", endpoint_url=ENDPOINT_URL)
api = api_client.create_rest_api(name="testapi")
root_resource_id = api_client.get_resources(restApiId=api["id"])["items"][0]["id"]
api_resource = api_client.create_resource(
restApiId=api["id"], parentId=root_resource_id, pathPart="test"
)
api_client.put_method(
restApiId=api["id"],
resourceId=api_resource["id"],
authorizationType="NONE",
**put_method_args,
)
api_client.put_method_response(
restApiId=api["id"],
resourceId=api_resource["id"],
statusCode="200",
**put_method_response_args,
)
return api_client, api, api_resource
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "stac_endpoint",
"handler": "code.handler",
"environment": {"CBERS_STAC_BUCKET": "bucket",},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_root(api_gw_method, lambda_function):
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function
url = api_gw_lambda_integrate_deploy(api_client, api, api_resource, lambda_func)
req = requests.get(url)
assert req.status_code == 200
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "GET",},
"put_method_response_args": {"httpMethod": "GET",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_get(
api_gw_method, lambda_function, es_client
):
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
original_url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func
)
req = requests.get(original_url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
url = f"{original_url}?collections=CBERS4-MUX"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["collection"] == "CBERS4-MUX"
url = f"{original_url}?collections=CBERS4-MUX,CBERS4-AWFI"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 2
url = f"{original_url}"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
url = f"{original_url}?limit=1"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.get(next_href)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
url = f"{original_url}?ids=CBERS_4_MUX_20170528_090_084_L2"
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
url = f"{original_url}?"
url += urlencode({"query": '{"cbers:data_type": {"eq":"L4"}}'})
req = requests.get(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_AWFI_20170409_167_123_L4"
@pytest.mark.api_gw_method_args(
{
"put_method_args": {"httpMethod": "POST",},
"put_method_response_args": {"httpMethod": "POST",},
}
)
@pytest.mark.lambda_function_args(
{
"name": "elasticsearch",
"handler": "es.stac_search_endpoint_handler",
"environment": {},
"timeout": 30,
"layers": (
{
"output_dir": "./test",
"layer_dir": "./cbers2stac/layers/common",
"tag": "common",
},
),
}
)
def test_item_search_post(
api_gw_method, lambda_function, es_client
):
api_client, api, api_resource = api_gw_method
lambda_client, lambda_func = lambda_function
lambda_client.update_function_configuration(
FunctionName=lambda_func["FunctionName"],
Environment={"Variables": {"ES_PORT": "4571", "ES_SSL": "NO",}},
)
populate_es_test_case_1(es_client)
url = api_gw_lambda_integrate_deploy(
api_client, api, api_resource, lambda_func, http_method="POST"
)
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [160.6, -55.95, -170, -25.89],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 400, req.text
assert "First lon corner is not western" in req.text
req = requests.post(
url,
data=json.dumps(
{
"collections": ["mycollection"],
"bbox": [-170, -25.89, 160.6, -55.95],
"limit": 100,
"datetime": "2019-01-01T00:00:00Z/2019-01-01T23:59:59Z",
}
),
)
assert req.status_code == 200, req.text
req = requests.post(url)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
body = {"limit": 1}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" in fcol.keys()
assert len(fcol["links"]) == 1
next_href = to_localstack_url(api["id"], fcol["links"][0]["href"])
req = requests.post(
next_href, data=json.dumps({**body, **fcol["links"][0]["body"]})
)
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert "links" not in fcol.keys()
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
body = {"ids": ["CBERS_4_MUX_20170528_090_084_L2"]}
req = requests.post(url, data=json.dumps(body))
assert req.status_code == 200, req.text
fcol = json.loads(req.text)
assert len(fcol["features"]) == 1
assert fcol["features"][0]["id"] == "CBERS_4_MUX_20170528_090_084_L2"
| true | true |
7901e1c5f397246379f72ceab53eea570b7b21af | 3,647 | py | Python | fastai/callback/neptune.py | PalaashAgrawal/fastai | 6148ff303d9b8a7fa8730ec01e81820af0515be3 | [
"Apache-2.0"
] | 23,140 | 2017-09-09T18:23:40.000Z | 2022-03-31T11:49:36.000Z | fastai/callback/neptune.py | PalaashAgrawal/fastai | 6148ff303d9b8a7fa8730ec01e81820af0515be3 | [
"Apache-2.0"
] | 3,077 | 2017-09-16T07:08:31.000Z | 2022-03-31T20:14:16.000Z | fastai/callback/neptune.py | PalaashAgrawal/fastai | 6148ff303d9b8a7fa8730ec01e81820af0515be3 | [
"Apache-2.0"
] | 8,740 | 2017-09-11T02:19:40.000Z | 2022-03-31T11:29:18.000Z | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/72_callback.neptune.ipynb (unless otherwise specified).
__all__ = ['NeptuneCallback']
# Cell
import tempfile
from ..basics import *
from ..learner import Callback
# Cell
import neptune
# Cell
class NeptuneCallback(Callback):
"Log losses, metrics, model weights, model architecture summary to neptune"
order = Recorder.order+1
def __init__(self, log_model_weights=True, keep_experiment_running=False):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.experiment = None
if neptune.project is None:
raise ValueError('You did not initialize project in neptune.\n',
'Please invoke `neptune.init("USERNAME/PROJECT_NAME")` before this callback.')
def before_fit(self):
try:
self.experiment = neptune.get_experiment()
except ValueError:
print('No active experiment. Please invoke `neptune.create_experiment()` before this callback.')
try:
self.experiment.set_property('n_epoch', str(self.learn.n_epoch))
self.experiment.set_property('model_class', str(type(self.learn.model)))
except: print(f'Did not log all properties. Check properties in the {neptune.get_experiment()}.')
try:
with tempfile.NamedTemporaryFile(mode='w') as f:
with open(f.name, 'w') as g: g.write(repr(self.learn.model))
self.experiment.log_artifact(f.name, 'model_summary.txt')
except: print('Did not log model summary. Check if your model is PyTorch model.')
if self.log_model_weights and not hasattr(self.learn, 'save_model'):
print('Unable to log model to Neptune.\n',
'Use "SaveModelCallback" to save model checkpoints that will be logged to Neptune.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.experiment.log_metric('batch__smooth_loss', self.learn.smooth_loss)
self.experiment.log_metric('batch__loss', self.learn.loss)
self.experiment.log_metric('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items(): self.experiment.log_metric(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']: self.experiment.log_metric(f'epoch__{n}', v)
if n == 'time': self.experiment.log_text(f'epoch__{n}', str(v))
# log model weights
if self.log_model_weights and hasattr(self.learn, 'save_model'):
if self.learn.save_model.every_epoch:
_file = join_path_file(f'{self.learn.save_model.fname}_{self.learn.save_model.epoch}',
self.learn.path / self.learn.model_dir, ext='.pth')
else:
_file = join_path_file(self.learn.save_model.fname,
self.learn.path / self.learn.model_dir, ext='.pth')
self.experiment.log_artifact(_file)
def after_fit(self):
if not self.keep_experiment_running:
try: self.experiment.stop()
except: print('No neptune experiment to stop.')
else:
print(f'Your experiment (id: {self.experiment.id}, name: {self.experiment.name}) is left in the running state.\n',
'You can log more data to it, like this: `neptune.log_metric()`') | 46.75641 | 126 | 0.639155 |
__all__ = ['NeptuneCallback']
import tempfile
from ..basics import *
from ..learner import Callback
import neptune
class NeptuneCallback(Callback):
order = Recorder.order+1
def __init__(self, log_model_weights=True, keep_experiment_running=False):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.experiment = None
if neptune.project is None:
raise ValueError('You did not initialize project in neptune.\n',
'Please invoke `neptune.init("USERNAME/PROJECT_NAME")` before this callback.')
def before_fit(self):
try:
self.experiment = neptune.get_experiment()
except ValueError:
print('No active experiment. Please invoke `neptune.create_experiment()` before this callback.')
try:
self.experiment.set_property('n_epoch', str(self.learn.n_epoch))
self.experiment.set_property('model_class', str(type(self.learn.model)))
except: print(f'Did not log all properties. Check properties in the {neptune.get_experiment()}.')
try:
with tempfile.NamedTemporaryFile(mode='w') as f:
with open(f.name, 'w') as g: g.write(repr(self.learn.model))
self.experiment.log_artifact(f.name, 'model_summary.txt')
except: print('Did not log model summary. Check if your model is PyTorch model.')
if self.log_model_weights and not hasattr(self.learn, 'save_model'):
print('Unable to log model to Neptune.\n',
'Use "SaveModelCallback" to save model checkpoints that will be logged to Neptune.')
def after_batch(self):
if self.learn.training:
self.experiment.log_metric('batch__smooth_loss', self.learn.smooth_loss)
self.experiment.log_metric('batch__loss', self.learn.loss)
self.experiment.log_metric('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items(): self.experiment.log_metric(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']: self.experiment.log_metric(f'epoch__{n}', v)
if n == 'time': self.experiment.log_text(f'epoch__{n}', str(v))
if self.log_model_weights and hasattr(self.learn, 'save_model'):
if self.learn.save_model.every_epoch:
_file = join_path_file(f'{self.learn.save_model.fname}_{self.learn.save_model.epoch}',
self.learn.path / self.learn.model_dir, ext='.pth')
else:
_file = join_path_file(self.learn.save_model.fname,
self.learn.path / self.learn.model_dir, ext='.pth')
self.experiment.log_artifact(_file)
def after_fit(self):
if not self.keep_experiment_running:
try: self.experiment.stop()
except: print('No neptune experiment to stop.')
else:
print(f'Your experiment (id: {self.experiment.id}, name: {self.experiment.name}) is left in the running state.\n',
'You can log more data to it, like this: `neptune.log_metric()`') | true | true |
7901e25f00983d62b794779f94b90fceddb8c4ec | 3,226 | py | Python | test.py | tangku006/cnn-text-classification-tf-master | 510a39d2726a23b0b94a5e6f4fc83014b0e0fa30 | [
"Apache-2.0"
] | null | null | null | test.py | tangku006/cnn-text-classification-tf-master | 510a39d2726a23b0b94a5e6f4fc83014b0e0fa30 | [
"Apache-2.0"
] | null | null | null | test.py | tangku006/cnn-text-classification-tf-master | 510a39d2726a23b0b94a5e6f4fc83014b0e0fa30 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
# Parameters
# ==================================================
# Data loading params 语料文件路径定义
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters 定义网络超参数
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Data Preparation
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
# Build vocabulary
max_document_length = max([len(x.split(" ")) for x in x_text])
# 将词向量填充至max_length的长度
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
print(x[:10])
# Randomly shuffle data
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary: ", vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
print(x_train.shape[0], x_train.shape[1]) | 42.447368 | 125 | 0.725976 |
import tensorflow as tf
import numpy as np
import os
import time
import datetime
import data_helpers
from text_cnn import TextCNN
from tensorflow.contrib import learn
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
tf.flags.DEFINE_integer("embedding_dim", 128, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularization lambda (default: 0.0)")
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 200, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
max_document_length = max([len(x.split(" ")) for x in x_text])
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
x = np.array(list(vocab_processor.fit_transform(x_text)))
print(x[:10])
np.random.seed(10)
shuffle_indices = np.random.permutation(np.arange(len(y)))
x_shuffled = x[shuffle_indices]
y_shuffled = y[shuffle_indices]
dev_sample_index = -1 * int(FLAGS.dev_sample_percentage * float(len(y)))
x_train, x_dev = x_shuffled[:dev_sample_index], x_shuffled[dev_sample_index:]
y_train, y_dev = y_shuffled[:dev_sample_index], y_shuffled[dev_sample_index:]
del x, y, x_shuffled, y_shuffled
print("Vocabulary: ", vocab_processor.vocabulary_)
print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
print("Train/Dev split: {:d}/{:d}".format(len(y_train), len(y_dev)))
print(x_train.shape[0], x_train.shape[1]) | true | true |
7901e3292be28204f718d38f7f0373aaa955271b | 572 | py | Python | Session1_2018/coinChange.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | 1 | 2018-09-21T10:51:15.000Z | 2018-09-21T10:51:15.000Z | Session1_2018/coinChange.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | null | null | null | Session1_2018/coinChange.py | vedantc6/LCode | 43aec4da9cc22ef43e877a16dbee380b98d9089f | [
"MIT"
] | null | null | null | class Solution:
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
# max value taken as amount+1 because in worst case, it can be amount - when denoms of only 1
res = [amount+1]*(amount+1)
res[0] = 0
for i in range(1, amount+1):
for j in coins:
if j <= i:
res[i] = min(res[i], res[i-j] + 1)
if res[amount] > amount:
return -1
else:
return res[amount]
| 27.238095 | 101 | 0.461538 | class Solution:
def coinChange(self, coins, amount):
res = [amount+1]*(amount+1)
res[0] = 0
for i in range(1, amount+1):
for j in coins:
if j <= i:
res[i] = min(res[i], res[i-j] + 1)
if res[amount] > amount:
return -1
else:
return res[amount]
| true | true |
7901e4b7f0c9a43f1cbd6ffb932d2b71f1d325e0 | 1,194 | py | Python | cracking-code-interview/chapter_03/3-3_stack_of_plates.py | italo-batista/problems-solving | f83ad34f0abebd52925c4020635556f20743ba06 | [
"MIT"
] | null | null | null | cracking-code-interview/chapter_03/3-3_stack_of_plates.py | italo-batista/problems-solving | f83ad34f0abebd52925c4020635556f20743ba06 | [
"MIT"
] | null | null | null | cracking-code-interview/chapter_03/3-3_stack_of_plates.py | italo-batista/problems-solving | f83ad34f0abebd52925c4020635556f20743ba06 | [
"MIT"
] | null | null | null | import sys
sys.path.append('./datastructures')
from datastructures import Stack, StackNode
class SetOfStacks:
LIMIT_PER_STACK = 2
def __init__(self):
self.main_stack = Stack()
def pop(self):
if self.is_empty():
return None
elif self._top_stack().is_empty():
self.main_stack.pop()
self.pop()
return self._top_stack().pop()
def push(self, item):
if self.is_empty():
self.main_stack.push(Stack())
self._top_stack().push(item)
def is_empty(self):
return self.main_stack.is_empty()
def peek(self):
if self.is_empty():
return None
return self._top_stack().peek().value
def _top_stack(self):
return self.main_stack.peek()
if __name__ == '__main__': # tests
stacks = SetOfStacks()
assert stacks.peek() is None
stacks.push(StackNode(1))
assert stacks.peek() == 1
stacks.push(StackNode(2))
assert stacks.peek() == 2
stacks.push(StackNode(3))
assert stacks.pop().value == 3
assert stacks.pop().value == 2
assert stacks.pop().value == 1
assert stacks.is_empty() is not None | 20.586207 | 45 | 0.600503 | import sys
sys.path.append('./datastructures')
from datastructures import Stack, StackNode
class SetOfStacks:
LIMIT_PER_STACK = 2
def __init__(self):
self.main_stack = Stack()
def pop(self):
if self.is_empty():
return None
elif self._top_stack().is_empty():
self.main_stack.pop()
self.pop()
return self._top_stack().pop()
def push(self, item):
if self.is_empty():
self.main_stack.push(Stack())
self._top_stack().push(item)
def is_empty(self):
return self.main_stack.is_empty()
def peek(self):
if self.is_empty():
return None
return self._top_stack().peek().value
def _top_stack(self):
return self.main_stack.peek()
if __name__ == '__main__':
stacks = SetOfStacks()
assert stacks.peek() is None
stacks.push(StackNode(1))
assert stacks.peek() == 1
stacks.push(StackNode(2))
assert stacks.peek() == 2
stacks.push(StackNode(3))
assert stacks.pop().value == 3
assert stacks.pop().value == 2
assert stacks.pop().value == 1
assert stacks.is_empty() is not None | true | true |
7901e4cdbc5cbcad0949ab16d8975c98613f9659 | 785 | py | Python | pyp/wxrobot-master/config.py | ChriXChan/tools | 5c989d71d8ef621a9c36f3c0c92951ab59f01548 | [
"MIT"
] | null | null | null | pyp/wxrobot-master/config.py | ChriXChan/tools | 5c989d71d8ef621a9c36f3c0c92951ab59f01548 | [
"MIT"
] | null | null | null | pyp/wxrobot-master/config.py | ChriXChan/tools | 5c989d71d8ef621a9c36f3c0c92951ab59f01548 | [
"MIT"
] | null | null | null | """项目配置"""
# 图灵机器人,99元一月付费版,尽情享用!
tuling_api_key = '88f17f853d974387af64955bed9466f4'
# 自动回复
is_friend_auto_reply = False # 好友自动回复
is_group_reply = False # 此项表示群中是否回复
is_group_at_reply = False # 上一项开启后此项才生效
is_forward_revoke_msg = True # 开启防撤回模式
is_forward_group_at_msg = False # 转发群@我的消息
# 机器人主人
bot_master_name = '' # 使用备注名更安全,只允许一个,可远程控制机器人,如果不设置(空)则将文件助手设置为管理员,但不具备远程控制功能
# 监听某些好友群聊,如老板
is_listen_friend = False
listen_friend_names = '猪哥' # 需要监听的人名称,使用备注名更安全,允许多个用|分隔,如:主管|项目经理|产品狗
listen_friend_groups = 'Python新手交流' # 在这些群里监听好友说的话,匹配模式:包含“唯一集团工作群”的群
# 转发信息至群
is_forward_mode = False # 打开转发模式,主人发送给机器人的消息都将转发至forward_groups群
forward_groups = 'Python新手交流' # 需要将消息转发的群,匹配模式同上
# 群分享监控
is_listen_sharing = False
listen_sharing_groups = 'Python新手交流' # 监控群分享,匹配模式同上
| 25.322581 | 79 | 0.782166 |
tuling_api_key = '88f17f853d974387af64955bed9466f4'
is_friend_auto_reply = False
is_group_reply = False
is_group_at_reply = False
is_forward_revoke_msg = True
is_forward_group_at_msg = False
bot_master_name = ''
is_listen_friend = False
listen_friend_names = '猪哥'
listen_friend_groups = 'Python新手交流'
is_forward_mode = False
forward_groups = 'Python新手交流'
is_listen_sharing = False
listen_sharing_groups = 'Python新手交流'
| true | true |
7901e5694b45feb02e1544ec860de25f4dc1e15a | 32,665 | py | Python | mtools/util/logevent.py | sindbach/mtools | e65ce879cbcd6b7795ff6fd29e26a6d3b42a6c82 | [
"Apache-2.0"
] | null | null | null | mtools/util/logevent.py | sindbach/mtools | e65ce879cbcd6b7795ff6fd29e26a6d3b42a6c82 | [
"Apache-2.0"
] | 1 | 2018-04-17T05:37:29.000Z | 2018-04-17T05:37:29.000Z | mtools/util/logevent.py | sindbach/mtools | e65ce879cbcd6b7795ff6fd29e26a6d3b42a6c82 | [
"Apache-2.0"
] | null | null | null | #!/bin/python
import json
import re
import sys
from datetime import datetime
import dateutil.parser
from dateutil.tz import tzutc
from six.moves import range
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
"""Custom datetime encoder for json output."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
"""
Extract information from log line and store properties/variables.
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
command: the type of command, if the operation was a "command"
pattern: the query pattern for queries, updates, counts, etc
...
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
"""
# datetime handler for json encoding
dthandler = lambda obj: obj.isoformat() if isinstance(obj,
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
log_operations = ['query', 'insert', 'update', 'remove', 'getmore',
'command']
log_levels = ['D', 'F', 'E', 'W', 'I', 'U']
log_components = ['-', 'ACCESS', 'COMMAND', 'CONTROL', 'GEO', 'INDEX',
'NETWORK', 'QUERY', 'REPL', 'SHARDING', 'STORAGE',
'JOURNAL', 'WRITE', 'TOTAL']
def __init__(self, doc_or_str):
self._year_rollover = False
if isinstance(doc_or_str, bytes):
doc_or_str = doc_or_str.decode("utf-8")
if isinstance(doc_or_str, str) or (sys.version_info.major == 2 and
isinstance(doc_or_str, unicode)):
# create from string, remove line breaks at end of _line_str
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
# docs don't need to be parsed lazily, they are fast
self._parse_document()
def _reset(self):
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._command_calculated = False
self._command = None
self._counters_calculated = False
# TODO: refactor from the legacy names to modern
# (eg: nscanned => keysExamined). Currently _extract_counters()
# maps newer property names into legacy equivalents for
# broader log file support.
self._nscanned = None # keysExamined
self._nscannedObjects = None # docsExamined
self._ntoreturn = None
self._nupdated = None # nModified
self._nreturned = None # nReturned or nMatched (updates)
self._ninserted = None # nInserted
self._ndeleted = None # nDeleted
self._numYields = None
self._planSummary = None
self._writeConflicts = None
self._r = None
self._w = None
self._conn = None
self._level_calculated = False
self._level = None
self._component = None
self.merge_marker_str = ''
def set_line_str(self, line_str):
"""
Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents.
"""
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from "
"system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
"""Return line_str depending on source, logfile or system.profile."""
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str,
self._datetime_str,
self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str,
self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
"""Calculate duration if available (lazy)."""
if not self._duration_calculated:
self._duration_calculated = True
# split_tokens = self.split_tokens
line_str = self.line_str
if (line_str
and line_str.endswith('ms')
and 'Scheduled new oplog query' not in line_str):
try:
# find duration from end
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ") +
1:-2].replace(',', ''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms',
self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration
@property
def datetime(self):
"""Extract datetime if available (lazy)."""
if not self._datetime_calculated:
self._datetime_calculated = True
# if no datetime after 10 tokens, break to avoid parsing
# very long lines
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs + 4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
# separate datetime str and linestr
self._line_str = (' '.join(self.split_tokens
[self._datetime_nextpos:]))
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos is None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
# Fast check if timestamp format changed.
# If it has, trigger datetime evaluation.
if format.startswith('ctime'):
if (len(self.split_tokens) < 4 or
self.split_tokens[self._datetime_nextpos - 4] not in
self.weekdays):
_ = self.datetime
return False
return True
else:
if len(self.split_tokens) == 0:
# empty line, no need to parse datetime
self._datetime_calculated = True
return False
try:
if not (self.split_tokens[self._datetime_nextpos - 1][0]
.isdigit()):
# not the timestamp format that was hinted
_ = self.datetime
return False
except Exception:
pass
return True
def _match_datetime_pattern(self, tokens):
"""
Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
"""
# first check: less than 4 tokens can't be ctime
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if (len(tokens) < 4 or (weekday not in self.weekdays) or
(month not in self.months) or not day.isdigit()):
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}',
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover
# is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[: 4]),
default=datetime(year, 1, 1))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year - 1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt
@property
def thread(self):
"""Extract thread name if available (lazy)."""
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos:
return None
if len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
if self._thread is not None:
if self._thread in ['initandlisten', 'mongosMain']:
if len(split_tokens) >= 5 and split_tokens[-5][0] == '#':
self._conn = 'conn' + split_tokens[-5][1:]
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread
@property
def conn(self):
r"""
Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'.
"""
self.thread
return self._conn
@property
def operation(self):
"""
Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command
"""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
"""Extract namespace if available (lazy)."""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
"""
Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list.
"""
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of thread to get access to datetime_offset and
# to protect from changes due to line truncation.
_ = self.thread
if not self._datetime_nextpos or (len(split_tokens) <=
self._datetime_nextpos + 2):
return
op = split_tokens[self._datetime_nextpos + 1].lower()
if op == 'warning:':
# check if this log line got truncated
if ("warning: log line attempted" in self._line_str and
"over max size" in self._line_str):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[self._datetime_nextpos + 1]
else:
# unknown warning, bail out
return
if op in self.log_operations:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
"""Extract query pattern from operations."""
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern
@property
def sort_pattern(self):
"""Extract query pattern from operations."""
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def command(self):
"""Extract query pattern from operations."""
if not self._command_calculated:
self._command_calculated = True
if self.operation == 'command':
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[command_idx + 1]
if command == '{':
# workaround for <= 2.2 log files,
# where command was not listed separately
command = self.split_tokens[command_idx + 2][:-1]
self._command = command.lower()
except ValueError:
pass
return self._command
@property
def nscanned(self):
"""Extract nscanned or keysExamined counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def nscannedObjects(self):
"""
Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects
@property
def ntoreturn(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def writeConflicts(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts
@property
def nreturned(self):
"""
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def ninserted(self):
"""Extract ninserted or nInserted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def ndeleted(self):
"""Extract ndeleted or nDeleted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def nupdated(self):
"""Extract nupdated or nModified counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def planSummary(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._planSummary
@property
def r(self):
"""Extract read lock (r) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def w(self):
"""Extract write lock (w) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
"""Extract counters like nscanned and nreturned from the logevent."""
# extract counters (if present)
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned',
'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields',
'planSummary', 'writeConflicts', 'keyUpdates']
# TODO: refactor mtools to use current counter names throughout
# Transitionary hack: mapping of current names into prior equivalents
counter_equiv = {
'docsExamined': 'nscannedObjects',
'keysExamined': 'nscanned',
'nDeleted': 'ndeleted',
'nInserted': 'ninserted',
'nMatched': 'nreturned',
'nModified': 'nupdated'
}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
# trigger operation evaluation to get access to offset
if self.operation:
for t, token in enumerate(split_tokens[self.datetime_nextpos +
2:]):
for counter in counters:
if token.startswith('%s:' % counter):
try:
# Remap counter to standard name, if applicable
counter = counter_equiv.get(counter, counter)
vars(self)['_' + counter] = int((token.split(':')
[-1]).replace(',',
''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space
# in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if (counter == 'numYields' and
token.startswith('numYields')):
try:
self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'planSummary' and
token.startswith('planSummary')):
try:
self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2]
except ValueError:
pass
# token not parsable, skip
break
@property
def level(self):
"""Extract log level if available (lazy)."""
if not self._level_calculated:
self._level_calculated = True
self._extract_level()
return self._level
@property
def component(self):
"""Extract log component if available (lazy)."""
self.level
return self._component
def _extract_level(self):
"""Extract level and component if available (lazy)."""
if self._level is None:
split_tokens = self.split_tokens
if not split_tokens:
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1])
if split_tokens[1] in self.log_levels else None)
if x is not None:
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False
def parse_all(self):
"""
Trigger extraction of all information.
These values are usually evaluated lazily.
"""
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r
def _find_pattern(self, trigger):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx + len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx + 1].strip()
if search_str:
return json2pattern(search_str)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc',
'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, '
'ctime-pre2.4, iso8601-utc, iso8601-local.')
if ((self.datetime_format is None or
(self.datetime_format == format and
self._datetime_str != '')) and not force):
return
elif self.datetime is None:
return
elif format.startswith('ctime'):
dt_string = (self.weekdays[self.datetime.weekday()] + ' ' +
self.datetime.strftime("%b %d %H:%M:%S"))
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if self.datetime.utcoffset() is None:
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond / 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no :
# in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)',
'.%s\\2\\3\\4' % ms_str, dt_string, count=1)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-"
"%dT%H:"
"%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
"""Default string conversion for LogEvent object is its line_str."""
return str(self.line_str)
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output
def to_json(self, labels=None):
"""Convert LogEvent object to valid JSON."""
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
"""Parse system.profile doc, copy all values to member variables."""
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = ("[{thread}] {operation} {namespace} {payload} "
"{scanned} {yields} locks(micros) {locks} "
"{duration}".format(datetime=self.datetime,
thread=self.thread,
operation=self.operation,
namespace=self.namespace,
payload=payload, scanned=scanned,
yields=yields, locks=locks,
duration=duration))
| 36.661055 | 125 | 0.54015 |
import json
import re
import sys
from datetime import datetime
import dateutil.parser
from dateutil.tz import tzutc
from six.moves import range
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
dthandler = lambda obj: obj.isoformat() if isinstance(obj,
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
log_operations = ['query', 'insert', 'update', 'remove', 'getmore',
'command']
log_levels = ['D', 'F', 'E', 'W', 'I', 'U']
log_components = ['-', 'ACCESS', 'COMMAND', 'CONTROL', 'GEO', 'INDEX',
'NETWORK', 'QUERY', 'REPL', 'SHARDING', 'STORAGE',
'JOURNAL', 'WRITE', 'TOTAL']
def __init__(self, doc_or_str):
self._year_rollover = False
if isinstance(doc_or_str, bytes):
doc_or_str = doc_or_str.decode("utf-8")
if isinstance(doc_or_str, str) or (sys.version_info.major == 2 and
isinstance(doc_or_str, unicode)):
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
self._parse_document()
def _reset(self):
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._command_calculated = False
self._command = None
self._counters_calculated = False
# TODO: refactor from the legacy names to modern
# (eg: nscanned => keysExamined). Currently _extract_counters()
# maps newer property names into legacy equivalents for
# broader log file support.
self._nscanned = None # keysExamined
self._nscannedObjects = None # docsExamined
self._ntoreturn = None
self._nupdated = None # nModified
self._nreturned = None # nReturned or nMatched (updates)
self._ninserted = None # nInserted
self._ndeleted = None # nDeleted
self._numYields = None
self._planSummary = None
self._writeConflicts = None
self._r = None
self._w = None
self._conn = None
self._level_calculated = False
self._level = None
self._component = None
self.merge_marker_str = ''
def set_line_str(self, line_str):
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from "
"system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str,
self._datetime_str,
self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str,
self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
if not self._split_tokens_calculated:
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
if not self._duration_calculated:
self._duration_calculated = True
line_str = self.line_str
if (line_str
and line_str.endswith('ms')
and 'Scheduled new oplog query' not in line_str):
try:
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ") +
1:-2].replace(',', ''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms',
self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration
@property
def datetime(self):
if not self._datetime_calculated:
self._datetime_calculated = True
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs + 4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
self._line_str = (' '.join(self.split_tokens
[self._datetime_nextpos:]))
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos is None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
if format.startswith('ctime'):
if (len(self.split_tokens) < 4 or
self.split_tokens[self._datetime_nextpos - 4] not in
self.weekdays):
_ = self.datetime
return False
return True
else:
if len(self.split_tokens) == 0:
self._datetime_calculated = True
return False
try:
if not (self.split_tokens[self._datetime_nextpos - 1][0]
.isdigit()):
_ = self.datetime
return False
except Exception:
pass
return True
def _match_datetime_pattern(self, tokens):
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if (len(tokens) < 4 or (weekday not in self.weekdays) or
(month not in self.months) or not day.isdigit()):
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}',
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover
# is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[: 4]),
default=datetime(year, 1, 1))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year - 1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt
@property
def thread(self):
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos:
return None
if len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
if self._thread is not None:
if self._thread in ['initandlisten', 'mongosMain']:
if len(split_tokens) >= 5 and split_tokens[-5][0] == '
self._conn = 'conn' + split_tokens[-5][1:]
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread
@property
def conn(self):
self.thread
return self._conn
@property
def operation(self):
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of thread to get access to datetime_offset and
# to protect from changes due to line truncation.
_ = self.thread
if not self._datetime_nextpos or (len(split_tokens) <=
self._datetime_nextpos + 2):
return
op = split_tokens[self._datetime_nextpos + 1].lower()
if op == 'warning:':
# check if this log line got truncated
if ("warning: log line attempted" in self._line_str and
"over max size" in self._line_str):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[self._datetime_nextpos + 1]
else:
# unknown warning, bail out
return
if op in self.log_operations:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern
@property
def sort_pattern(self):
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def command(self):
if not self._command_calculated:
self._command_calculated = True
if self.operation == 'command':
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[command_idx + 1]
if command == '{':
# workaround for <= 2.2 log files,
# where command was not listed separately
command = self.split_tokens[command_idx + 2][:-1]
self._command = command.lower()
except ValueError:
pass
return self._command
@property
def nscanned(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def nscannedObjects(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects
@property
def ntoreturn(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def writeConflicts(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts
@property
def nreturned(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def ninserted(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def ndeleted(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def nupdated(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def planSummary(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._planSummary
@property
def r(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def w(self):
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
# extract counters (if present)
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned',
'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields',
'planSummary', 'writeConflicts', 'keyUpdates']
# TODO: refactor mtools to use current counter names throughout
# Transitionary hack: mapping of current names into prior equivalents
counter_equiv = {
'docsExamined': 'nscannedObjects',
'keysExamined': 'nscanned',
'nDeleted': 'ndeleted',
'nInserted': 'ninserted',
'nMatched': 'nreturned',
'nModified': 'nupdated'
}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
# trigger operation evaluation to get access to offset
if self.operation:
for t, token in enumerate(split_tokens[self.datetime_nextpos +
2:]):
for counter in counters:
if token.startswith('%s:' % counter):
try:
# Remap counter to standard name, if applicable
counter = counter_equiv.get(counter, counter)
vars(self)['_' + counter] = int((token.split(':')
[-1]).replace(',',
''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space
# in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if (counter == 'numYields' and
token.startswith('numYields')):
try:
self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'planSummary' and
token.startswith('planSummary')):
try:
self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2]
except ValueError:
pass
# token not parsable, skip
break
@property
def level(self):
if not self._level_calculated:
self._level_calculated = True
self._extract_level()
return self._level
@property
def component(self):
self.level
return self._component
def _extract_level(self):
if self._level is None:
split_tokens = self.split_tokens
if not split_tokens:
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1])
if split_tokens[1] in self.log_levels else None)
if x is not None:
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False
def parse_all(self):
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r
def _find_pattern(self, trigger):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx + len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx + 1].strip()
if search_str:
return json2pattern(search_str)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc',
'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, '
'ctime-pre2.4, iso8601-utc, iso8601-local.')
if ((self.datetime_format is None or
(self.datetime_format == format and
self._datetime_str != '')) and not force):
return
elif self.datetime is None:
return
elif format.startswith('ctime'):
dt_string = (self.weekdays[self.datetime.weekday()] + ' ' +
self.datetime.strftime("%b %d %H:%M:%S"))
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if self.datetime.utcoffset() is None:
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond / 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no :
# in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)',
'.%s\\2\\3\\4' % ms_str, dt_string, count=1)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-"
"%dT%H:"
"%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
return str(self.line_str)
def to_dict(self, labels=None):
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output
def to_json(self, labels=None):
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = ("[{thread}] {operation} {namespace} {payload} "
"{scanned} {yields} locks(micros) {locks} "
"{duration}".format(datetime=self.datetime,
thread=self.thread,
operation=self.operation,
namespace=self.namespace,
payload=payload, scanned=scanned,
yields=yields, locks=locks,
duration=duration))
| true | true |
7901e5a0cbbea36c770fee5295b4b112fcac68f8 | 222,664 | py | Python | mesonbuild/interpreter.py | tolnaisz/meson | 37bade6f8760a4e443a8daddbcf6acd4e84b5eab | [
"Apache-2.0"
] | null | null | null | mesonbuild/interpreter.py | tolnaisz/meson | 37bade6f8760a4e443a8daddbcf6acd4e84b5eab | [
"Apache-2.0"
] | null | null | null | mesonbuild/interpreter.py | tolnaisz/meson | 37bade6f8760a4e443a8daddbcf6acd4e84b5eab | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import mparser
from . import environment
from . import coredata
from . import dependencies
from . import mlog
from . import build
from . import optinterpreter
from . import compilers
from .wrap import wrap, WrapMode
from . import mesonlib
from .mesonlib import FileMode, MachineChoice, Popen_safe, listify, extract_as_list, has_path_sep, unholder
from .dependencies import ExternalProgram
from .dependencies import InternalDependency, Dependency, NotFoundDependency, DependencyException
from .depfile import DepFile
from .interpreterbase import InterpreterBase
from .interpreterbase import check_stringlist, flatten, noPosargs, noKwargs, stringArgs, permittedKwargs, noArgsFlattening
from .interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest
from .interpreterbase import InterpreterObject, MutableInterpreterObject, Disabler, disablerIfNotFound
from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs
from .interpreterbase import ObjectHolder
from .modules import ModuleReturnValue
from .cmake import CMakeInterpreter
from .backend.backends import TestProtocol
from pathlib import Path, PurePath
import os
import shutil
import uuid
import re
import shlex
import subprocess
import collections
import functools
import typing as T
import importlib
permitted_method_kwargs = {
'partial_dependency': {'compile_args', 'link_args', 'links', 'includes',
'sources'},
}
def stringifyUserArguments(args):
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args])
elif isinstance(args, dict):
return '{%s}' % ', '.join(['%s : %s' % (stringifyUserArguments(k), stringifyUserArguments(v)) for k, v in args.items()])
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return "'%s'" % args
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
class OverrideProgram(dependencies.ExternalProgram):
pass
class FeatureOptionHolder(InterpreterObject, ObjectHolder):
def __init__(self, env, name, option):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, option)
if option.is_auto():
self.held_object = env.coredata.builtins['auto_features']
self.name = name
self.methods.update({'enabled': self.enabled_method,
'disabled': self.disabled_method,
'auto': self.auto_method,
})
@noPosargs
@permittedKwargs({})
def enabled_method(self, args, kwargs):
return self.held_object.is_enabled()
@noPosargs
@permittedKwargs({})
def disabled_method(self, args, kwargs):
return self.held_object.is_disabled()
@noPosargs
@permittedKwargs({})
def auto_method(self, args, kwargs):
return self.held_object.is_auto()
def extract_required_kwarg(kwargs, subproject, feature_check=None, default=True):
val = kwargs.get('required', default)
disabled = False
required = False
feature = None
if isinstance(val, FeatureOptionHolder):
if not feature_check:
feature_check = FeatureNew('User option "feature"', '0.47.0')
feature_check.use(subproject)
option = val.held_object
feature = val.name
if option.is_disabled():
disabled = True
elif option.is_enabled():
required = True
elif isinstance(val, bool):
required = val
else:
raise InterpreterException('required keyword argument must be boolean or a feature option')
# Keep boolean value in kwargs to simplify other places where this kwarg is
# checked.
kwargs['required'] = required
return disabled, required, feature
def extract_search_dirs(kwargs):
search_dirs = mesonlib.stringlistify(kwargs.get('dirs', []))
search_dirs = [Path(d).expanduser() for d in search_dirs]
for d in search_dirs:
if mesonlib.is_windows() and d.root.startswith('\\'):
# a Unix-path starting with `/` that is not absolute on Windows.
# discard without failing for end-user ease of cross-platform directory arrays
continue
if not d.is_absolute():
raise InvalidCode('Search directory {} is not an absolute path.'.format(d))
return list(map(str, search_dirs))
class TryRunResultHolder(InterpreterObject):
def __init__(self, res):
super().__init__()
self.res = res
self.methods.update({'returncode': self.returncode_method,
'compiled': self.compiled_method,
'stdout': self.stdout_method,
'stderr': self.stderr_method,
})
@noPosargs
@permittedKwargs({})
def returncode_method(self, args, kwargs):
return self.res.returncode
@noPosargs
@permittedKwargs({})
def compiled_method(self, args, kwargs):
return self.res.compiled
@noPosargs
@permittedKwargs({})
def stdout_method(self, args, kwargs):
return self.res.stdout
@noPosargs
@permittedKwargs({})
def stderr_method(self, args, kwargs):
return self.res.stderr
class RunProcess(InterpreterObject):
def __init__(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir=False, check=False, capture=True):
super().__init__()
if not isinstance(cmd, ExternalProgram):
raise AssertionError('BUG: RunProcess must be passed an ExternalProgram')
self.capture = capture
pc, self.stdout, self.stderr = self.run_command(cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check)
self.returncode = pc.returncode
self.methods.update({'returncode': self.returncode_method,
'stdout': self.stdout_method,
'stderr': self.stderr_method,
})
def run_command(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check=False):
command_array = cmd.get_command() + args
menv = {'MESON_SOURCE_ROOT': source_dir,
'MESON_BUILD_ROOT': build_dir,
'MESON_SUBDIR': subdir,
'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in mesonintrospect]),
}
if in_builddir:
cwd = os.path.join(build_dir, subdir)
else:
cwd = os.path.join(source_dir, subdir)
child_env = os.environ.copy()
child_env.update(menv)
child_env = env.get_env(child_env)
stdout = subprocess.PIPE if self.capture else subprocess.DEVNULL
mlog.debug('Running command:', ' '.join(command_array))
try:
p, o, e = Popen_safe(command_array, stdout=stdout, env=child_env, cwd=cwd)
if self.capture:
mlog.debug('--- stdout ---')
mlog.debug(o)
else:
o = ''
mlog.debug('--- stdout disabled ---')
mlog.debug('--- stderr ---')
mlog.debug(e)
mlog.debug('')
if check and p.returncode != 0:
raise InterpreterException('Command "{}" failed with status {}.'.format(' '.join(command_array), p.returncode))
return p, o, e
except FileNotFoundError:
raise InterpreterException('Could not execute command "%s".' % ' '.join(command_array))
@noPosargs
@permittedKwargs({})
def returncode_method(self, args, kwargs):
return self.returncode
@noPosargs
@permittedKwargs({})
def stdout_method(self, args, kwargs):
return self.stdout
@noPosargs
@permittedKwargs({})
def stderr_method(self, args, kwargs):
return self.stderr
class ConfigureFileHolder(InterpreterObject, ObjectHolder):
def __init__(self, subdir, sourcename, targetname, configuration_data):
InterpreterObject.__init__(self)
obj = build.ConfigureFile(subdir, sourcename, targetname, configuration_data)
ObjectHolder.__init__(self, obj)
class EnvironmentVariablesHolder(MutableInterpreterObject, ObjectHolder):
def __init__(self, initial_values=None):
MutableInterpreterObject.__init__(self)
ObjectHolder.__init__(self, build.EnvironmentVariables())
self.methods.update({'set': self.set_method,
'append': self.append_method,
'prepend': self.prepend_method,
})
if isinstance(initial_values, dict):
for k, v in initial_values.items():
self.set_method([k, v], {})
elif isinstance(initial_values, list):
for e in initial_values:
if '=' not in e:
raise InterpreterException('Env var definition must be of type key=val.')
(k, val) = e.split('=', 1)
k = k.strip()
val = val.strip()
if ' ' in k:
raise InterpreterException('Env var key must not have spaces in it.')
self.set_method([k, val], {})
elif initial_values:
raise AssertionError('Unsupported EnvironmentVariablesHolder initial_values')
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.held_object.envvars)
def add_var(self, method, args, kwargs):
if not isinstance(kwargs.get("separator", ""), str):
raise InterpreterException("EnvironmentVariablesHolder methods 'separator'"
" argument needs to be a string.")
if len(args) < 2:
raise InterpreterException("EnvironmentVariablesHolder methods require at least"
"2 arguments, first is the name of the variable and"
" following one are values")
# Warn when someone tries to use append() or prepend() on an env var
# which already has an operation set on it. People seem to think that
# multiple append/prepend operations stack, but they don't.
if method != self.held_object.set and self.held_object.has_name(args[0]):
mlog.warning('Overriding previous value of environment variable {!r} with a new one'
.format(args[0]), location=self.current_node)
self.held_object.add_var(method, args[0], args[1:], kwargs)
@stringArgs
@permittedKwargs({'separator'})
def set_method(self, args, kwargs):
self.add_var(self.held_object.set, args, kwargs)
@stringArgs
@permittedKwargs({'separator'})
def append_method(self, args, kwargs):
self.add_var(self.held_object.append, args, kwargs)
@stringArgs
@permittedKwargs({'separator'})
def prepend_method(self, args, kwargs):
self.add_var(self.held_object.prepend, args, kwargs)
class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder):
def __init__(self, pv, initial_values=None):
MutableInterpreterObject.__init__(self)
self.used = False # These objects become immutable after use in configure_file.
ObjectHolder.__init__(self, build.ConfigurationData(), pv)
self.methods.update({'set': self.set_method,
'set10': self.set10_method,
'set_quoted': self.set_quoted_method,
'has': self.has_method,
'get': self.get_method,
'get_unquoted': self.get_unquoted_method,
'merge_from': self.merge_from_method,
})
if isinstance(initial_values, dict):
for k, v in initial_values.items():
self.set_method([k, v], {})
elif initial_values:
raise AssertionError('Unsupported ConfigurationDataHolder initial_values')
def is_used(self):
return self.used
def mark_used(self):
self.used = True
def validate_args(self, args, kwargs):
if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2:
mlog.deprecation('Passing a list as the single argument to '
'configuration_data.set is deprecated. This will '
'become a hard error in the future.',
location=self.current_node)
args = args[0]
if len(args) != 2:
raise InterpreterException("Configuration set requires 2 arguments.")
if self.used:
raise InterpreterException("Can not set values on configuration object that has been used.")
name, val = args
if not isinstance(val, (int, str)):
msg = 'Setting a configuration data value to {!r} is invalid, ' \
'and will fail at configure_file(). If you are using it ' \
'just to store some values, please use a dict instead.'
mlog.deprecation(msg.format(val), location=self.current_node)
desc = kwargs.get('description', None)
if not isinstance(name, str):
raise InterpreterException("First argument to set must be a string.")
if desc is not None and not isinstance(desc, str):
raise InterpreterException('Description must be a string.')
return name, val, desc
@noArgsFlattening
def set_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
self.held_object.values[name] = (val, desc)
def set_quoted_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
if not isinstance(val, str):
raise InterpreterException("Second argument to set_quoted must be a string.")
escaped_val = '\\"'.join(val.split('"'))
self.held_object.values[name] = ('"' + escaped_val + '"', desc)
def set10_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
if val:
self.held_object.values[name] = (1, desc)
else:
self.held_object.values[name] = (0, desc)
def has_method(self, args, kwargs):
return args[0] in self.held_object.values
@FeatureNew('configuration_data.get()', '0.38.0')
@noArgsFlattening
def get_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get method takes one or two arguments.')
name = args[0]
if name in self.held_object:
return self.held_object.get(name)[0]
if len(args) > 1:
return args[1]
raise InterpreterException('Entry %s not in configuration data.' % name)
@FeatureNew('configuration_data.get_unquoted()', '0.44.0')
def get_unquoted_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get method takes one or two arguments.')
name = args[0]
if name in self.held_object:
val = self.held_object.get(name)[0]
elif len(args) > 1:
val = args[1]
else:
raise InterpreterException('Entry %s not in configuration data.' % name)
if val[0] == '"' and val[-1] == '"':
return val[1:-1]
return val
def get(self, name):
return self.held_object.values[name] # (val, desc)
def keys(self):
return self.held_object.values.keys()
def merge_from_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Merge_from takes one positional argument.')
from_object = args[0]
if not isinstance(from_object, ConfigurationDataHolder):
raise InterpreterException('Merge_from argument must be a configuration data object.')
from_object = from_object.held_object
for k, v in from_object.values.items():
self.held_object.values[k] = v
# Interpreter objects can not be pickled so we must have
# these wrappers.
class DependencyHolder(InterpreterObject, ObjectHolder):
def __init__(self, dep, pv):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, dep, pv)
self.methods.update({'found': self.found_method,
'type_name': self.type_name_method,
'version': self.version_method,
'name': self.name_method,
'get_pkgconfig_variable': self.pkgconfig_method,
'get_configtool_variable': self.configtool_method,
'get_variable': self.variable_method,
'partial_dependency': self.partial_dependency_method,
'include_type': self.include_type_method,
'as_system': self.as_system_method,
})
def found(self):
return self.found_method([], {})
@noPosargs
@permittedKwargs({})
def type_name_method(self, args, kwargs):
return self.held_object.type_name
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
if self.held_object.type_name == 'internal':
return True
return self.held_object.found()
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return self.held_object.get_version()
@noPosargs
@permittedKwargs({})
def name_method(self, args, kwargs):
return self.held_object.get_name()
@permittedKwargs({'define_variable', 'default'})
def pkgconfig_method(self, args, kwargs):
args = listify(args)
if len(args) != 1:
raise InterpreterException('get_pkgconfig_variable takes exactly one argument.')
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Variable name must be a string.')
return self.held_object.get_pkgconfig_variable(varname, kwargs)
@FeatureNew('dep.get_configtool_variable', '0.44.0')
@permittedKwargs({})
def configtool_method(self, args, kwargs):
args = listify(args)
if len(args) != 1:
raise InterpreterException('get_configtool_variable takes exactly one argument.')
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Variable name must be a string.')
return self.held_object.get_configtool_variable(varname)
@FeatureNew('dep.partial_dependency', '0.46.0')
@noPosargs
@permittedKwargs(permitted_method_kwargs['partial_dependency'])
def partial_dependency_method(self, args, kwargs):
pdep = self.held_object.get_partial_dependency(**kwargs)
return DependencyHolder(pdep, self.subproject)
@FeatureNew('dep.get_variable', '0.51.0')
@noPosargs
@permittedKwargs({'cmake', 'pkgconfig', 'configtool', 'internal', 'default_value', 'pkgconfig_define'})
@FeatureNewKwargs('dep.get_variable', '0.54.0', ['internal'])
def variable_method(self, args, kwargs):
return self.held_object.get_variable(**kwargs)
@FeatureNew('dep.include_type', '0.52.0')
@noPosargs
@permittedKwargs({})
def include_type_method(self, args, kwargs):
return self.held_object.get_include_type()
@FeatureNew('dep.as_system', '0.52.0')
@permittedKwargs({})
def as_system_method(self, args, kwargs):
args = listify(args)
new_is_system = 'system'
if len(args) > 1:
raise InterpreterException('as_system takes only one optional value')
if len(args) == 1:
new_is_system = args[0]
new_dep = self.held_object.generate_system_dependency(new_is_system)
return DependencyHolder(new_dep, self.subproject)
class ExternalProgramHolder(InterpreterObject, ObjectHolder):
def __init__(self, ep, subproject, backend=None):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, ep)
self.subproject = subproject
self.backend = backend
self.methods.update({'found': self.found_method,
'path': self.path_method,
'full_path': self.full_path_method})
self.cached_version = None
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
@noPosargs
@permittedKwargs({})
def path_method(self, args, kwargs):
mlog.deprecation('path() method is deprecated and replaced by full_path()')
return self._full_path()
@noPosargs
@permittedKwargs({})
@FeatureNew('ExternalProgram.full_path', '0.55.0')
def full_path_method(self, args, kwargs):
return self._full_path()
def _full_path(self):
exe = self.held_object
if isinstance(exe, build.Executable):
return self.backend.get_target_filename_abs(exe)
return exe.get_path()
def found(self):
return isinstance(self.held_object, build.Executable) or self.held_object.found()
def get_command(self):
return self.held_object.get_command()
def get_name(self):
exe = self.held_object
if isinstance(exe, build.Executable):
return exe.name
return exe.get_name()
def get_version(self, interpreter):
if isinstance(self.held_object, build.Executable):
return self.held_object.project_version
if not self.cached_version:
raw_cmd = self.get_command() + ['--version']
cmd = [self, '--version']
res = interpreter.run_command_impl(interpreter.current_node, cmd, {}, True)
if res.returncode != 0:
m = 'Running {!r} failed'
raise InterpreterException(m.format(raw_cmd))
output = res.stdout.strip()
if not output:
output = res.stderr.strip()
match = re.search(r'([0-9][0-9\.]+)', output)
if not match:
m = 'Could not find a version number in output of {!r}'
raise InterpreterException(m.format(raw_cmd))
self.cached_version = match.group(1)
return self.cached_version
class ExternalLibraryHolder(InterpreterObject, ObjectHolder):
def __init__(self, el, pv):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, el, pv)
self.methods.update({'found': self.found_method,
'type_name': self.type_name_method,
'partial_dependency': self.partial_dependency_method,
})
def found(self):
return self.held_object.found()
@noPosargs
@permittedKwargs({})
def type_name_method(self, args, kwargs):
return self.held_object.type_name
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
def get_name(self):
return self.held_object.name
def get_compile_args(self):
return self.held_object.get_compile_args()
def get_link_args(self):
return self.held_object.get_link_args()
def get_exe_args(self):
return self.held_object.get_exe_args()
@FeatureNew('dep.partial_dependency', '0.46.0')
@noPosargs
@permittedKwargs(permitted_method_kwargs['partial_dependency'])
def partial_dependency_method(self, args, kwargs):
pdep = self.held_object.get_partial_dependency(**kwargs)
return DependencyHolder(pdep, self.subproject)
class GeneratorHolder(InterpreterObject, ObjectHolder):
@FeatureNewKwargs('generator', '0.43.0', ['capture'])
def __init__(self, interp, args, kwargs):
self.interpreter = interp
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, build.Generator(args, kwargs), interp.subproject)
self.methods.update({'process': self.process_method})
@FeatureNewKwargs('generator.process', '0.45.0', ['preserve_path_from'])
@permittedKwargs({'extra_args', 'preserve_path_from'})
def process_method(self, args, kwargs):
extras = mesonlib.stringlistify(kwargs.get('extra_args', []))
if 'preserve_path_from' in kwargs:
preserve_path_from = kwargs['preserve_path_from']
if not isinstance(preserve_path_from, str):
raise InvalidArguments('Preserve_path_from must be a string.')
preserve_path_from = os.path.normpath(preserve_path_from)
if not os.path.isabs(preserve_path_from):
# This is a bit of a hack. Fix properly before merging.
raise InvalidArguments('Preserve_path_from must be an absolute path for now. Sorry.')
else:
preserve_path_from = None
gl = self.held_object.process_files('Generator', args, self.interpreter,
preserve_path_from, extra_args=extras)
return GeneratedListHolder(gl)
class GeneratedListHolder(InterpreterObject, ObjectHolder):
def __init__(self, arg1, extra_args=None):
InterpreterObject.__init__(self)
if isinstance(arg1, GeneratorHolder):
ObjectHolder.__init__(self, build.GeneratedList(arg1.held_object, extra_args if extra_args is not None else []))
else:
ObjectHolder.__init__(self, arg1)
def __repr__(self):
r = '<{}: {!r}>'
return r.format(self.__class__.__name__, self.held_object.get_outputs())
def add_file(self, a):
self.held_object.add_file(a)
# A machine that's statically known from the cross file
class MachineHolder(InterpreterObject, ObjectHolder):
def __init__(self, machine_info):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, machine_info)
self.methods.update({'system': self.system_method,
'cpu': self.cpu_method,
'cpu_family': self.cpu_family_method,
'endian': self.endian_method,
})
@noPosargs
@permittedKwargs({})
def cpu_family_method(self, args, kwargs):
return self.held_object.cpu_family
@noPosargs
@permittedKwargs({})
def cpu_method(self, args, kwargs):
return self.held_object.cpu
@noPosargs
@permittedKwargs({})
def system_method(self, args, kwargs):
return self.held_object.system
@noPosargs
@permittedKwargs({})
def endian_method(self, args, kwargs):
return self.held_object.endian
class IncludeDirsHolder(InterpreterObject, ObjectHolder):
def __init__(self, idobj):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, idobj)
class Headers(InterpreterObject):
def __init__(self, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.install_subdir = kwargs.get('subdir', '')
if os.path.isabs(self.install_subdir):
mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.')
self.custom_install_dir = kwargs.get('install_dir', None)
self.custom_install_mode = kwargs.get('install_mode', None)
if self.custom_install_dir is not None:
if not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def set_install_subdir(self, subdir):
self.install_subdir = subdir
def get_install_subdir(self):
return self.install_subdir
def get_sources(self):
return self.sources
def get_custom_install_dir(self):
return self.custom_install_dir
def get_custom_install_mode(self):
return self.custom_install_mode
class DataHolder(InterpreterObject, ObjectHolder):
def __init__(self, data):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, data)
def get_source_subdir(self):
return self.held_object.source_subdir
def get_sources(self):
return self.held_object.sources
def get_install_dir(self):
return self.held_object.install_dir
class InstallDir(InterpreterObject):
def __init__(self, src_subdir, inst_subdir, install_dir, install_mode, exclude, strip_directory):
InterpreterObject.__init__(self)
self.source_subdir = src_subdir
self.installable_subdir = inst_subdir
self.install_dir = install_dir
self.install_mode = install_mode
self.exclude = exclude
self.strip_directory = strip_directory
class Man(InterpreterObject):
def __init__(self, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.validate_sources()
self.custom_install_dir = kwargs.get('install_dir', None)
self.custom_install_mode = kwargs.get('install_mode', None)
if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def validate_sources(self):
for s in self.sources:
try:
num = int(s.split('.')[-1])
except (IndexError, ValueError):
num = 0
if num < 1 or num > 8:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 8')
def get_custom_install_dir(self):
return self.custom_install_dir
def get_custom_install_mode(self):
return self.custom_install_mode
def get_sources(self):
return self.sources
class GeneratedObjectsHolder(InterpreterObject, ObjectHolder):
def __init__(self, held_object):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, held_object)
class TargetHolder(InterpreterObject, ObjectHolder):
def __init__(self, target, interp):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, target, interp.subproject)
self.interpreter = interp
class BuildTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'extract_objects': self.extract_objects_method,
'extract_all_objects': self.extract_all_objects_method,
'name': self.name_method,
'get_id': self.get_id_method,
'outdir': self.outdir_method,
'full_path': self.full_path_method,
'private_dir_include': self.private_dir_include_method,
})
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.filename)
def is_cross(self):
return not self.held_object.environment.machines.matches_build_machine(self.held_object.for_machine)
@noPosargs
@permittedKwargs({})
def private_dir_include_method(self, args, kwargs):
return IncludeDirsHolder(build.IncludeDirs('', [], False,
[self.interpreter.backend.get_target_private_dir(self.held_object)]))
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
@noPosargs
@permittedKwargs({})
def outdir_method(self, args, kwargs):
return self.interpreter.backend.get_target_dir(self.held_object)
@permittedKwargs({})
def extract_objects_method(self, args, kwargs):
gobjs = self.held_object.extract_objects(args)
return GeneratedObjectsHolder(gobjs)
@FeatureNewKwargs('extract_all_objects', '0.46.0', ['recursive'])
@noPosargs
@permittedKwargs({'recursive'})
def extract_all_objects_method(self, args, kwargs):
recursive = kwargs.get('recursive', False)
gobjs = self.held_object.extract_all_objects(recursive)
if gobjs.objlist and 'recursive' not in kwargs:
mlog.warning('extract_all_objects called without setting recursive '
'keyword argument. Meson currently defaults to '
'non-recursive to maintain backward compatibility but '
'the default will be changed in the future.',
location=self.current_node)
return GeneratedObjectsHolder(gobjs)
@noPosargs
@permittedKwargs({})
def get_id_method(self, args, kwargs):
return self.held_object.get_id()
@FeatureNew('name', '0.54.0')
@noPosargs
@permittedKwargs({})
def name_method(self, args, kwargs):
return self.held_object.name
class ExecutableHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class StaticLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class SharedLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
# Set to True only when called from self.func_shared_lib().
target.shared_library_only = False
class BothLibrariesHolder(BuildTargetHolder):
def __init__(self, shared_holder, static_holder, interp):
# FIXME: This build target always represents the shared library, but
# that should be configurable.
super().__init__(shared_holder.held_object, interp)
self.shared_holder = shared_holder
self.static_holder = static_holder
self.methods.update({'get_shared_lib': self.get_shared_lib_method,
'get_static_lib': self.get_static_lib_method,
})
def __repr__(self):
r = '<{} {}: {}, {}: {}>'
h1 = self.shared_holder.held_object
h2 = self.static_holder.held_object
return r.format(self.__class__.__name__, h1.get_id(), h1.filename, h2.get_id(), h2.filename)
@noPosargs
@permittedKwargs({})
def get_shared_lib_method(self, args, kwargs):
return self.shared_holder
@noPosargs
@permittedKwargs({})
def get_static_lib_method(self, args, kwargs):
return self.static_holder
class SharedModuleHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class JarHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class CustomTargetIndexHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'full_path': self.full_path_method,
})
@FeatureNew('custom_target[i].full_path', '0.54.0')
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
class CustomTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'full_path': self.full_path_method,
'to_list': self.to_list_method,
})
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.command)
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
@FeatureNew('custom_target.to_list', '0.54.0')
@noPosargs
@permittedKwargs({})
def to_list_method(self, args, kwargs):
result = []
for i in self.held_object:
result.append(CustomTargetIndexHolder(i, self.interpreter))
return result
def __getitem__(self, index):
return CustomTargetIndexHolder(self.held_object[index], self.interpreter)
def __setitem__(self, index, value): # lgtm[py/unexpected-raise-in-special-method]
raise InterpreterException('Cannot set a member of a CustomTarget')
def __delitem__(self, index): # lgtm[py/unexpected-raise-in-special-method]
raise InterpreterException('Cannot delete a member of a CustomTarget')
def outdir_include(self):
return IncludeDirsHolder(build.IncludeDirs('', [], False,
[os.path.join('@BUILD_ROOT@', self.interpreter.backend.get_target_dir(self.held_object))]))
class RunTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.command)
class Test(InterpreterObject):
def __init__(self, name: str, project: str, suite: T.List[str], exe: build.Executable,
depends: T.List[T.Union[build.CustomTarget, build.BuildTarget]],
is_parallel: bool, cmd_args: T.List[str], env: build.EnvironmentVariables,
should_fail: bool, timeout: int, workdir: T.Optional[str], protocol: str,
priority: int):
InterpreterObject.__init__(self)
self.name = name
self.suite = suite
self.project_name = project
self.exe = exe
self.depends = depends
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.protocol = TestProtocol.from_str(protocol)
self.priority = priority
def get_exe(self):
return self.exe
def get_name(self):
return self.name
class SubprojectHolder(InterpreterObject, ObjectHolder):
def __init__(self, subinterpreter, subproject_dir, name, warnings=0, disabled_feature=None,
exception=None):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, subinterpreter)
self.name = name
self.warnings = warnings
self.disabled_feature = disabled_feature
self.exception = exception
self.subproject_dir = subproject_dir
self.methods.update({'get_variable': self.get_variable_method,
'found': self.found_method,
})
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
def found(self):
return self.held_object is not None
@permittedKwargs({})
@noArgsFlattening
def get_variable_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get_variable takes one or two arguments.')
if not self.found():
raise InterpreterException('Subproject "%s/%s" disabled can\'t get_variable on it.' % (
self.subproject_dir, self.name))
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Get_variable first argument must be a string.')
try:
return self.held_object.variables[varname]
except KeyError:
pass
if len(args) == 2:
return args[1]
raise InvalidArguments('Requested variable "{0}" not found.'.format(varname))
header_permitted_kwargs = set([
'required',
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
])
find_library_permitted_kwargs = set([
'has_headers',
'required',
'dirs',
'static',
])
find_library_permitted_kwargs |= set(['header_' + k for k in header_permitted_kwargs])
class CompilerHolder(InterpreterObject):
def __init__(self, compiler, env, subproject):
InterpreterObject.__init__(self)
self.compiler = compiler
self.environment = env
self.subproject = subproject
self.methods.update({'compiles': self.compiles_method,
'links': self.links_method,
'get_id': self.get_id_method,
'get_linker_id': self.get_linker_id_method,
'compute_int': self.compute_int_method,
'sizeof': self.sizeof_method,
'get_define': self.get_define_method,
'check_header': self.check_header_method,
'has_header': self.has_header_method,
'has_header_symbol': self.has_header_symbol_method,
'run': self.run_method,
'has_function': self.has_function_method,
'has_member': self.has_member_method,
'has_members': self.has_members_method,
'has_type': self.has_type_method,
'alignment': self.alignment_method,
'version': self.version_method,
'cmd_array': self.cmd_array_method,
'find_library': self.find_library_method,
'has_argument': self.has_argument_method,
'has_function_attribute': self.has_func_attribute_method,
'get_supported_function_attributes': self.get_supported_function_attributes_method,
'has_multi_arguments': self.has_multi_arguments_method,
'get_supported_arguments': self.get_supported_arguments_method,
'first_supported_argument': self.first_supported_argument_method,
'has_link_argument': self.has_link_argument_method,
'has_multi_link_arguments': self.has_multi_link_arguments_method,
'get_supported_link_arguments': self.get_supported_link_arguments_method,
'first_supported_link_argument': self.first_supported_link_argument_method,
'unittest_args': self.unittest_args_method,
'symbols_have_underscore_prefix': self.symbols_have_underscore_prefix_method,
'get_argument_syntax': self.get_argument_syntax_method,
})
def _dep_msg(self, deps, endl):
msg_single = 'with dependency {}'
msg_many = 'with dependencies {}'
if not deps:
return endl
if endl is None:
endl = ''
tpl = msg_many if len(deps) > 1 else msg_single
names = []
for d in deps:
if isinstance(d, dependencies.ExternalLibrary):
name = '-l' + d.name
else:
name = d.name
names.append(name)
return tpl.format(', '.join(names)) + endl
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return self.compiler.version
@noPosargs
@permittedKwargs({})
def cmd_array_method(self, args, kwargs):
return self.compiler.exelist
def determine_args(self, kwargs, mode='link'):
nobuiltins = kwargs.get('no_builtin_args', False)
if not isinstance(nobuiltins, bool):
raise InterpreterException('Type of no_builtin_args not a boolean.')
args = []
incdirs = extract_as_list(kwargs, 'include_directories')
for i in incdirs:
if not isinstance(i, IncludeDirsHolder):
raise InterpreterException('Include directories argument must be an include_directories object.')
for idir in i.held_object.get_incdirs():
idir = os.path.join(self.environment.get_source_dir(),
i.held_object.get_curdir(), idir)
args += self.compiler.get_include_args(idir, False)
if not nobuiltins:
for_machine = Interpreter.machine_from_native_kwarg(kwargs)
opts = self.environment.coredata.compiler_options[for_machine][self.compiler.language]
args += self.compiler.get_option_compile_args(opts)
if mode == 'link':
args += self.compiler.get_option_link_args(opts)
args += mesonlib.stringlistify(kwargs.get('args', []))
return args
def determine_dependencies(self, kwargs, endl=':'):
deps = kwargs.get('dependencies', None)
if deps is not None:
deps = listify(deps)
final_deps = []
for d in deps:
try:
d = d.held_object
except Exception:
pass
if isinstance(d, InternalDependency) or not isinstance(d, Dependency):
raise InterpreterException('Dependencies must be external dependencies')
final_deps.append(d)
deps = final_deps
return deps, self._dep_msg(deps, endl)
@permittedKwargs({
'prefix',
'args',
'dependencies',
})
def alignment_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Alignment method takes exactly one positional argument.')
check_stringlist(args)
typename = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of alignment must be a string.')
extra_args = mesonlib.stringlistify(kwargs.get('args', []))
deps, msg = self.determine_dependencies(kwargs)
result = self.compiler.alignment(typename, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
mlog.log('Checking for alignment of', mlog.bold(typename, True), msg, result)
return result
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def run_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Run method takes exactly one positional argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result = self.compiler.run(code, self.environment, extra_args=extra_args,
dependencies=deps)
if len(testname) > 0:
if not result.compiled:
h = mlog.red('DID NOT COMPILE')
elif result.returncode == 0:
h = mlog.green('YES')
else:
h = mlog.red('NO (%d)' % result.returncode)
mlog.log('Checking if', mlog.bold(testname, True), msg, 'runs:', h)
return TryRunResultHolder(result)
@noPosargs
@permittedKwargs({})
def get_id_method(self, args, kwargs):
return self.compiler.get_id()
@noPosargs
@permittedKwargs({})
@FeatureNew('compiler.get_linker_id', '0.53.0')
def get_linker_id_method(self, args, kwargs):
return self.compiler.get_linker_id()
@noPosargs
@permittedKwargs({})
def symbols_have_underscore_prefix_method(self, args, kwargs):
'''
Check if the compiler prefixes _ (underscore) to global C symbols
See: https://en.wikipedia.org/wiki/Name_mangling#C
'''
return self.compiler.symbols_have_underscore_prefix(self.environment)
@noPosargs
@permittedKwargs({})
def unittest_args_method(self, args, kwargs):
'''
This function is deprecated and should not be used.
It can be removed in a future version of Meson.
'''
if not hasattr(self.compiler, 'get_feature_args'):
raise InterpreterException('This {} compiler has no feature arguments.'.format(self.compiler.get_display_language()))
build_to_src = os.path.relpath(self.environment.get_source_dir(), self.environment.get_build_dir())
return self.compiler.get_feature_args({'unittest': 'true'}, build_to_src)
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_member_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Has_member takes exactly two arguments.')
check_stringlist(args)
typename, membername = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_member must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_members(typename, [membername], prefix,
self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking whether type', mlog.bold(typename, True),
'has member', mlog.bold(membername, True), msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_members_method(self, args, kwargs):
if len(args) < 2:
raise InterpreterException('Has_members needs at least two arguments.')
check_stringlist(args)
typename, *membernames = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_members must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_members(typename, membernames, prefix,
self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
members = mlog.bold(', '.join(['"{}"'.format(m) for m in membernames]))
mlog.log('Checking whether type', mlog.bold(typename, True),
'has members', members, msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_function_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_function takes exactly one argument.')
check_stringlist(args)
funcname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_function must be a string.')
extra_args = self.determine_args(kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_function(funcname, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for function', mlog.bold(funcname, True), msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_type_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_type takes exactly one argument.')
check_stringlist(args)
typename = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_type must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_type(typename, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for type', mlog.bold(typename, True), msg, hadtxt, cached)
return had
@FeatureNew('compiler.compute_int', '0.40.0')
@permittedKwargs({
'prefix',
'low',
'high',
'guess',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def compute_int_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Compute_int takes exactly one argument.')
check_stringlist(args)
expression = args[0]
prefix = kwargs.get('prefix', '')
low = kwargs.get('low', None)
high = kwargs.get('high', None)
guess = kwargs.get('guess', None)
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of compute_int must be a string.')
if low is not None and not isinstance(low, int):
raise InterpreterException('Low argument of compute_int must be an int.')
if high is not None and not isinstance(high, int):
raise InterpreterException('High argument of compute_int must be an int.')
if guess is not None and not isinstance(guess, int):
raise InterpreterException('Guess argument of compute_int must be an int.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
res = self.compiler.compute_int(expression, low, high, guess, prefix,
self.environment, extra_args=extra_args,
dependencies=deps)
mlog.log('Computing int of', mlog.bold(expression, True), msg, res)
return res
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def sizeof_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Sizeof takes exactly one argument.')
check_stringlist(args)
element = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of sizeof must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
esize = self.compiler.sizeof(element, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
mlog.log('Checking for size of', mlog.bold(element, True), msg, esize)
return esize
@FeatureNew('compiler.get_define', '0.40.0')
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def get_define_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('get_define() takes exactly one argument.')
check_stringlist(args)
element = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of get_define() must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
value, cached = self.compiler.get_define(element, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Fetching value of define', mlog.bold(element, True), msg, value, cached)
return value
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def compiles_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('compiles method takes exactly one argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result, cached = self.compiler.compiles(code, self.environment,
extra_args=extra_args,
dependencies=deps)
if len(testname) > 0:
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Checking if', mlog.bold(testname, True), msg, 'compiles:', h, cached)
return result
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def links_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('links method takes exactly one argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result, cached = self.compiler.links(code, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if len(testname) > 0:
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Checking if', mlog.bold(testname, True), msg, 'links:', h, cached)
return result
@FeatureNew('compiler.check_header', '0.47.0')
@FeatureNewKwargs('compiler.check_header', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def check_header_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('check_header method takes exactly one argument.')
check_stringlist(args)
hname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Check usable header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.check_header(hname, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if required and not haz:
raise InterpreterException('{} header {!r} not usable'.format(self.compiler.get_display_language(), hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Check usable header', mlog.bold(hname, True), msg, h, cached)
return haz
@FeatureNewKwargs('compiler.has_header', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def has_header_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('has_header method takes exactly one argument.')
check_stringlist(args)
hname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Has header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.has_header(hname, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if required and not haz:
raise InterpreterException('{} header {!r} not found'.format(self.compiler.get_display_language(), hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Has header', mlog.bold(hname, True), msg, h, cached)
return haz
@FeatureNewKwargs('compiler.has_header_symbol', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def has_header_symbol_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('has_header_symbol method takes exactly two arguments.')
check_stringlist(args)
hname, symbol = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header_symbol must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.has_header_symbol(hname, symbol, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
if required and not haz:
raise InterpreterException('{} symbol {} not found in header {}'.format(self.compiler.get_display_language(), symbol, hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), msg, h, cached)
return haz
def notfound_library(self, libname):
lib = dependencies.ExternalLibrary(libname, None,
self.environment,
self.compiler.language,
silent=True)
return ExternalLibraryHolder(lib, self.subproject)
@FeatureNewKwargs('compiler.find_library', '0.51.0', ['static'])
@FeatureNewKwargs('compiler.find_library', '0.50.0', ['has_headers'])
@FeatureNewKwargs('compiler.find_library', '0.49.0', ['disabler'])
@disablerIfNotFound
@permittedKwargs(find_library_permitted_kwargs)
def find_library_method(self, args, kwargs):
# TODO add dependencies support?
if len(args) != 1:
raise InterpreterException('find_library method takes one argument.')
libname = args[0]
if not isinstance(libname, str):
raise InterpreterException('Library name not a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Library', mlog.bold(libname), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_library(libname)
has_header_kwargs = {k[7:]: v for k, v in kwargs.items() if k.startswith('header_')}
has_header_kwargs['required'] = required
headers = mesonlib.stringlistify(kwargs.get('has_headers', []))
for h in headers:
if not self.has_header_method([h], has_header_kwargs):
return self.notfound_library(libname)
search_dirs = extract_search_dirs(kwargs)
libtype = mesonlib.LibType.PREFER_SHARED
if 'static' in kwargs:
if not isinstance(kwargs['static'], bool):
raise InterpreterException('static must be a boolean')
libtype = mesonlib.LibType.STATIC if kwargs['static'] else mesonlib.LibType.SHARED
linkargs = self.compiler.find_library(libname, self.environment, search_dirs, libtype)
if required and not linkargs:
raise InterpreterException(
'{} library {!r} not found'.format(self.compiler.get_display_language(), libname))
lib = dependencies.ExternalLibrary(libname, linkargs, self.environment,
self.compiler.language)
return ExternalLibraryHolder(lib, self.subproject)
@permittedKwargs({})
def has_argument_method(self, args: T.Sequence[str], kwargs) -> bool:
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_argument takes exactly one argument.')
return self.has_multi_arguments_method(args, kwargs)
@permittedKwargs({})
def has_multi_arguments_method(self, args: T.Sequence[str], kwargs: dict):
args = mesonlib.stringlistify(args)
result, cached = self.compiler.has_multi_arguments(args, self.environment)
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log(
'Compiler for {} supports arguments {}:'.format(
self.compiler.get_display_language(), ' '.join(args)),
h, cached)
return result
@FeatureNew('compiler.get_supported_arguments', '0.43.0')
@permittedKwargs({})
def get_supported_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
supported_args = []
for arg in args:
if self.has_argument_method(arg, kwargs):
supported_args.append(arg)
return supported_args
@permittedKwargs({})
def first_supported_argument_method(self, args: T.Sequence[str], kwargs: dict) -> T.List[str]:
for arg in mesonlib.stringlistify(args):
if self.has_argument_method(arg, kwargs):
mlog.log('First supported argument:', mlog.bold(arg))
return [arg]
mlog.log('First supported argument:', mlog.red('None'))
return []
@FeatureNew('compiler.has_link_argument', '0.46.0')
@permittedKwargs({})
def has_link_argument_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_link_argument takes exactly one argument.')
return self.has_multi_link_arguments_method(args, kwargs)
@FeatureNew('compiler.has_multi_link_argument', '0.46.0')
@permittedKwargs({})
def has_multi_link_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
result, cached = self.compiler.has_multi_link_arguments(args, self.environment)
cached = mlog.blue('(cached)') if cached else ''
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log(
'Compiler for {} supports link arguments {}:'.format(
self.compiler.get_display_language(), ' '.join(args)),
h, cached)
return result
@FeatureNew('compiler.get_supported_link_arguments_method', '0.46.0')
@permittedKwargs({})
def get_supported_link_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
supported_args = []
for arg in args:
if self.has_link_argument_method(arg, kwargs):
supported_args.append(arg)
return supported_args
@FeatureNew('compiler.first_supported_link_argument_method', '0.46.0')
@permittedKwargs({})
def first_supported_link_argument_method(self, args, kwargs):
for i in mesonlib.stringlistify(args):
if self.has_link_argument_method(i, kwargs):
mlog.log('First supported link argument:', mlog.bold(i))
return [i]
mlog.log('First supported link argument:', mlog.red('None'))
return []
@FeatureNew('compiler.has_function_attribute', '0.48.0')
@permittedKwargs({})
def has_func_attribute_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_func_attribute takes exactly one argument.')
result, cached = self.compiler.has_func_attribute(args[0], self.environment)
cached = mlog.blue('(cached)') if cached else ''
h = mlog.green('YES') if result else mlog.red('NO')
mlog.log('Compiler for {} supports function attribute {}:'.format(self.compiler.get_display_language(), args[0]), h, cached)
return result
@FeatureNew('compiler.get_supported_function_attributes', '0.48.0')
@permittedKwargs({})
def get_supported_function_attributes_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
return [a for a in args if self.has_func_attribute_method(a, kwargs)]
@FeatureNew('compiler.get_argument_syntax_method', '0.49.0')
@noPosargs
@noKwargs
def get_argument_syntax_method(self, args, kwargs):
return self.compiler.get_argument_syntax()
ModuleState = collections.namedtuple('ModuleState', [
'source_root', 'build_to_src', 'subproject', 'subdir', 'current_lineno', 'environment',
'project_name', 'project_version', 'backend', 'targets',
'data', 'headers', 'man', 'global_args', 'project_args', 'build_machine',
'host_machine', 'target_machine', 'current_node'])
class ModuleHolder(InterpreterObject, ObjectHolder):
def __init__(self, modname, module, interpreter):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, module)
self.modname = modname
self.interpreter = interpreter
def method_call(self, method_name, args, kwargs):
try:
fn = getattr(self.held_object, method_name)
except AttributeError:
raise InvalidArguments('Module %s does not have method %s.' % (self.modname, method_name))
if method_name.startswith('_'):
raise InvalidArguments('Function {!r} in module {!r} is private.'.format(method_name, self.modname))
if not getattr(fn, 'no-args-flattening', False):
args = flatten(args)
# This is not 100% reliable but we can't use hash()
# because the Build object contains dicts and lists.
num_targets = len(self.interpreter.build.targets)
state = ModuleState(
source_root = self.interpreter.environment.get_source_dir(),
build_to_src=mesonlib.relpath(self.interpreter.environment.get_source_dir(),
self.interpreter.environment.get_build_dir()),
subproject=self.interpreter.subproject,
subdir=self.interpreter.subdir,
current_lineno=self.interpreter.current_lineno,
environment=self.interpreter.environment,
project_name=self.interpreter.build.project_name,
project_version=self.interpreter.build.dep_manifest[self.interpreter.active_projectname],
# The backend object is under-used right now, but we will need it:
# https://github.com/mesonbuild/meson/issues/1419
backend=self.interpreter.backend,
targets=self.interpreter.build.targets,
data=self.interpreter.build.data,
headers=self.interpreter.build.get_headers(),
man=self.interpreter.build.get_man(),
#global_args_for_build = self.interpreter.build.global_args.build,
global_args = self.interpreter.build.global_args.host,
#project_args_for_build = self.interpreter.build.projects_args.build.get(self.interpreter.subproject, {}),
project_args = self.interpreter.build.projects_args.host.get(self.interpreter.subproject, {}),
build_machine=self.interpreter.builtin['build_machine'].held_object,
host_machine=self.interpreter.builtin['host_machine'].held_object,
target_machine=self.interpreter.builtin['target_machine'].held_object,
current_node=self.current_node
)
# Many modules do for example self.interpreter.find_program_impl(),
# so we have to ensure they use the current interpreter and not the one
# that first imported that module, otherwise it will use outdated
# overrides.
self.held_object.interpreter = self.interpreter
if self.held_object.is_snippet(method_name):
value = fn(self.interpreter, state, args, kwargs)
return self.interpreter.holderify(value)
else:
value = fn(state, args, kwargs)
if num_targets != len(self.interpreter.build.targets):
raise InterpreterException('Extension module altered internal state illegally.')
return self.interpreter.module_method_callback(value)
class Summary:
def __init__(self, project_name, project_version):
self.project_name = project_name
self.project_version = project_version
self.sections = collections.defaultdict(dict)
self.max_key_len = 0
def add_section(self, section, values, kwargs):
bool_yn = kwargs.get('bool_yn', False)
if not isinstance(bool_yn, bool):
raise InterpreterException('bool_yn keyword argument must be boolean')
list_sep = kwargs.get('list_sep')
if list_sep is not None and not isinstance(list_sep, str):
raise InterpreterException('list_sep keyword argument must be string')
for k, v in values.items():
if k in self.sections[section]:
raise InterpreterException('Summary section {!r} already have key {!r}'.format(section, k))
formatted_values = []
for i in listify(v):
if not isinstance(i, (str, int)):
m = 'Summary value in section {!r}, key {!r}, must be string, integer or boolean'
raise InterpreterException(m.format(section, k))
if bool_yn and isinstance(i, bool):
formatted_values.append(mlog.green('YES') if i else mlog.red('NO'))
else:
formatted_values.append(i)
self.sections[section][k] = (formatted_values, list_sep)
self.max_key_len = max(self.max_key_len, len(k))
def dump(self):
mlog.log(self.project_name, mlog.normal_cyan(self.project_version))
for section, values in self.sections.items():
mlog.log('') # newline
if section:
mlog.log(' ', mlog.bold(section))
for k, v in values.items():
v, list_sep = v
indent = self.max_key_len - len(k) + 3
end = ' ' if v else ''
mlog.log(' ' * indent, k + ':', end=end)
if list_sep is None:
indent = self.max_key_len + 6
list_sep = '\n' + ' ' * indent
mlog.log(*v, sep=list_sep)
mlog.log('') # newline
class MesonMain(InterpreterObject):
def __init__(self, build, interpreter):
InterpreterObject.__init__(self)
self.build = build
self.interpreter = interpreter
self._found_source_scripts = {}
self.methods.update({'get_compiler': self.get_compiler_method,
'is_cross_build': self.is_cross_build_method,
'has_exe_wrapper': self.has_exe_wrapper_method,
'is_unity': self.is_unity_method,
'is_subproject': self.is_subproject_method,
'current_source_dir': self.current_source_dir_method,
'current_build_dir': self.current_build_dir_method,
'source_root': self.source_root_method,
'build_root': self.build_root_method,
'add_install_script': self.add_install_script_method,
'add_postconf_script': self.add_postconf_script_method,
'add_dist_script': self.add_dist_script_method,
'install_dependency_manifest': self.install_dependency_manifest_method,
'override_dependency': self.override_dependency_method,
'override_find_program': self.override_find_program_method,
'project_version': self.project_version_method,
'project_license': self.project_license_method,
'version': self.version_method,
'project_name': self.project_name_method,
'get_cross_property': self.get_cross_property_method,
'get_external_property': self.get_external_property_method,
'backend': self.backend_method,
})
def _find_source_script(self, prog: T.Union[str, ExecutableHolder], args):
if isinstance(prog, ExecutableHolder):
prog_path = self.interpreter.backend.get_target_filename(prog.held_object)
return build.RunScript([prog_path], args)
elif isinstance(prog, ExternalProgramHolder):
return build.RunScript(prog.get_command(), args)
# Prefer scripts in the current source directory
search_dir = os.path.join(self.interpreter.environment.source_dir,
self.interpreter.subdir)
key = (prog, search_dir)
if key in self._found_source_scripts:
found = self._found_source_scripts[key]
else:
found = dependencies.ExternalProgram(prog, search_dir=search_dir)
if found.found():
self._found_source_scripts[key] = found
else:
m = 'Script or command {!r} not found or not executable'
raise InterpreterException(m.format(prog))
return build.RunScript(found.get_command(), args)
def _process_script_args(
self, name: str, args: T.List[T.Union[
str, mesonlib.File, CustomTargetHolder,
CustomTargetIndexHolder, ConfigureFileHolder,
ExternalProgramHolder, ExecutableHolder,
]], allow_built: bool = False) -> T.List[str]:
script_args = [] # T.List[str]
new = False
for a in args:
a = unholder(a)
if isinstance(a, str):
script_args.append(a)
elif isinstance(a, mesonlib.File):
new = True
script_args.append(a.rel_to_builddir(self.interpreter.environment.source_dir))
elif isinstance(a, (build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)):
if not allow_built:
raise InterpreterException('Arguments to {} cannot be built'.format(name))
new = True
script_args.extend([os.path.join(a.get_subdir(), o) for o in a.get_outputs()])
# This feels really hacky, but I'm not sure how else to fix
# this without completely rewriting install script handling.
# This is complicated by the fact that the install target
# depends on all.
if isinstance(a, build.CustomTargetIndex):
a.target.build_by_default = True
else:
a.build_by_default = True
elif isinstance(a, build.ConfigureFile):
new = True
script_args.append(os.path.join(a.subdir, a.targetname))
elif isinstance(a, dependencies.ExternalProgram):
script_args.extend(a.command)
new = True
else:
raise InterpreterException(
'Arguments to {} must be strings, Files, CustomTargets, '
'Indexes of CustomTargets, or ConfigureFiles'.format(name))
if new:
FeatureNew('Calling "{}" with File, CustomTaget, Index of CustomTarget, ConfigureFile, Executable, or ExternalProgram'.format(name), '0.55.0').use(
self.interpreter.subproject)
return script_args
@permittedKwargs(set())
def add_install_script_method(self, args: 'T.Tuple[T.Union[str, ExecutableHolder], T.Union[str, mesonlib.File, CustomTargetHolder, CustomTargetIndexHolder, ConfigureFileHolder], ...]', kwargs):
if len(args) < 1:
raise InterpreterException('add_install_script takes one or more arguments')
script_args = self._process_script_args('add_install_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.install_scripts.append(script)
@permittedKwargs(set())
def add_postconf_script_method(self, args, kwargs):
if len(args) < 1:
raise InterpreterException('add_postconf_script takes one or more arguments')
script_args = self._process_script_args('add_postconf_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.postconf_scripts.append(script)
@permittedKwargs(set())
def add_dist_script_method(self, args, kwargs):
if len(args) < 1:
raise InterpreterException('add_dist_script takes one or more arguments')
if len(args) > 1:
FeatureNew('Calling "add_dist_script" with multiple arguments', '0.49.0').use(self.interpreter.subproject)
if self.interpreter.subproject != '':
raise InterpreterException('add_dist_script may not be used in a subproject.')
script_args = self._process_script_args('add_dist_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.dist_scripts.append(script)
@noPosargs
@permittedKwargs({})
def current_source_dir_method(self, args, kwargs):
src = self.interpreter.environment.source_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
@noPosargs
@permittedKwargs({})
def current_build_dir_method(self, args, kwargs):
src = self.interpreter.environment.build_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
@noPosargs
@permittedKwargs({})
def backend_method(self, args, kwargs):
return self.interpreter.backend.name
@noPosargs
@permittedKwargs({})
def source_root_method(self, args, kwargs):
return self.interpreter.environment.source_dir
@noPosargs
@permittedKwargs({})
def build_root_method(self, args, kwargs):
return self.interpreter.environment.build_dir
@noPosargs
@permittedKwargs({})
def has_exe_wrapper_method(self, args, kwargs):
if self.is_cross_build_method(None, None) and \
self.build.environment.need_exe_wrapper():
if self.build.environment.exe_wrapper is None:
return False
# We return True when exe_wrap is defined, when it's not needed, and
# when we're compiling natively. The last two are semantically confusing.
# Need to revisit this.
return True
@noPosargs
@permittedKwargs({})
def is_cross_build_method(self, args, kwargs):
return self.build.environment.is_cross_build()
@permittedKwargs({'native'})
def get_compiler_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('get_compiler_method must have one and only one argument.')
cname = args[0]
for_machine = Interpreter.machine_from_native_kwarg(kwargs)
clist = self.interpreter.coredata.compilers[for_machine]
if cname in clist:
return CompilerHolder(clist[cname], self.build.environment, self.interpreter.subproject)
raise InterpreterException('Tried to access compiler for unspecified language "%s".' % cname)
@noPosargs
@permittedKwargs({})
def is_unity_method(self, args, kwargs):
optval = self.interpreter.environment.coredata.get_builtin_option('unity')
if optval == 'on' or (optval == 'subprojects' and self.interpreter.is_subproject()):
return True
return False
@noPosargs
@permittedKwargs({})
def is_subproject_method(self, args, kwargs):
return self.interpreter.is_subproject()
@permittedKwargs({})
def install_dependency_manifest_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Must specify manifest install file name')
if not isinstance(args[0], str):
raise InterpreterException('Argument must be a string.')
self.build.dep_manifest_name = args[0]
@FeatureNew('meson.override_find_program', '0.46.0')
@permittedKwargs({})
def override_find_program_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Override needs two arguments')
name, exe = args
if not isinstance(name, str):
raise InterpreterException('First argument must be a string')
if hasattr(exe, 'held_object'):
exe = exe.held_object
if isinstance(exe, mesonlib.File):
abspath = exe.absolute_path(self.interpreter.environment.source_dir,
self.interpreter.environment.build_dir)
if not os.path.exists(abspath):
raise InterpreterException('Tried to override %s with a file that does not exist.' % name)
exe = OverrideProgram(abspath)
if not isinstance(exe, (dependencies.ExternalProgram, build.Executable)):
raise InterpreterException('Second argument must be an external program or executable.')
self.interpreter.add_find_program_override(name, exe)
@FeatureNew('meson.override_dependency', '0.54.0')
@permittedKwargs({'native'})
def override_dependency_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Override needs two arguments')
name = args[0]
dep = args[1]
if not isinstance(name, str) or not name:
raise InterpreterException('First argument must be a string and cannot be empty')
if hasattr(dep, 'held_object'):
dep = dep.held_object
if not isinstance(dep, dependencies.Dependency):
raise InterpreterException('Second argument must be a dependency object')
identifier = dependencies.get_dep_identifier(name, kwargs)
for_machine = self.interpreter.machine_from_native_kwarg(kwargs)
override = self.build.dependency_overrides[for_machine].get(identifier)
if override:
m = 'Tried to override dependency {!r} which has already been resolved or overridden at {}'
location = mlog.get_error_location_string(override.node.filename, override.node.lineno)
raise InterpreterException(m.format(name, location))
self.build.dependency_overrides[for_machine][identifier] = \
build.DependencyOverride(dep, self.interpreter.current_node)
@noPosargs
@permittedKwargs({})
def project_version_method(self, args, kwargs):
return self.build.dep_manifest[self.interpreter.active_projectname]['version']
@FeatureNew('meson.project_license()', '0.45.0')
@noPosargs
@permittedKwargs({})
def project_license_method(self, args, kwargs):
return self.build.dep_manifest[self.interpreter.active_projectname]['license']
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return coredata.version
@noPosargs
@permittedKwargs({})
def project_name_method(self, args, kwargs):
return self.interpreter.active_projectname
@noArgsFlattening
@permittedKwargs({})
def get_cross_property_method(self, args, kwargs) -> str:
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Must have one or two arguments.')
propname = args[0]
if not isinstance(propname, str):
raise InterpreterException('Property name must be string.')
try:
props = self.interpreter.environment.properties.host
return props[propname]
except Exception:
if len(args) == 2:
return args[1]
raise InterpreterException('Unknown cross property: %s.' % propname)
@noArgsFlattening
@permittedKwargs({'native'})
@FeatureNew('meson.get_external_property', '0.54.0')
def get_external_property_method(self, args: T.Sequence[str], kwargs: dict) -> str:
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Must have one or two positional arguments.')
propname = args[0]
if not isinstance(propname, str):
raise InterpreterException('Property name must be string.')
def _get_native() -> str:
try:
props = self.interpreter.environment.properties.build
return props[propname]
except Exception:
if len(args) == 2:
return args[1]
raise InterpreterException('Unknown native property: %s.' % propname)
if 'native' in kwargs:
if kwargs['native']:
return _get_native()
else:
return self.get_cross_property_method(args, {})
else: # native: not specified
if self.build.environment.is_cross_build():
return self.get_cross_property_method(args, kwargs)
else:
return _get_native()
known_library_kwargs = (
build.known_shlib_kwargs |
build.known_stlib_kwargs
)
known_build_target_kwargs = (
known_library_kwargs |
build.known_exe_kwargs |
build.known_jar_kwargs |
{'target_type'}
)
_base_test_args = {'args', 'depends', 'env', 'should_fail', 'timeout', 'workdir', 'suite', 'priority', 'protocol'}
permitted_kwargs = {'add_global_arguments': {'language', 'native'},
'add_global_link_arguments': {'language', 'native'},
'add_languages': {'required', 'native'},
'add_project_link_arguments': {'language', 'native'},
'add_project_arguments': {'language', 'native'},
'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default'},
'benchmark': _base_test_args,
'build_target': known_build_target_kwargs,
'configure_file': {'input',
'output',
'configuration',
'command',
'copy',
'depfile',
'install_dir',
'install_mode',
'capture',
'install',
'format',
'output_format',
'encoding'},
'custom_target': {'input',
'output',
'command',
'install',
'install_dir',
'install_mode',
'build_always',
'capture',
'depends',
'depend_files',
'depfile',
'build_by_default',
'build_always_stale',
'console'},
'dependency': {'default_options',
'embed',
'fallback',
'language',
'main',
'method',
'modules',
'components',
'cmake_module_path',
'optional_modules',
'native',
'not_found_message',
'required',
'static',
'version',
'private_headers',
'cmake_args',
'include_type',
},
'declare_dependency': {'include_directories',
'link_with',
'sources',
'dependencies',
'compile_args',
'link_args',
'link_whole',
'version',
'variables',
},
'executable': build.known_exe_kwargs,
'find_program': {'required', 'native', 'version', 'dirs'},
'generator': {'arguments',
'output',
'depends',
'depfile',
'capture',
'preserve_path_from'},
'include_directories': {'is_system'},
'install_data': {'install_dir', 'install_mode', 'rename', 'sources'},
'install_headers': {'install_dir', 'install_mode', 'subdir'},
'install_man': {'install_dir', 'install_mode'},
'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'},
'jar': build.known_jar_kwargs,
'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'},
'run_command': {'check', 'capture', 'env'},
'run_target': {'command', 'depends'},
'shared_library': build.known_shlib_kwargs,
'shared_module': build.known_shmod_kwargs,
'static_library': build.known_stlib_kwargs,
'both_libraries': known_library_kwargs,
'library': known_library_kwargs,
'subdir': {'if_found'},
'subproject': {'version', 'default_options', 'required'},
'test': set.union(_base_test_args, {'is_parallel'}),
'vcs_tag': {'input', 'output', 'fallback', 'command', 'replace_string'},
}
class Interpreter(InterpreterBase):
def __init__(self, build, backend=None, subproject='', subdir='', subproject_dir='subprojects',
modules = None, default_project_options=None, mock=False, ast=None):
super().__init__(build.environment.get_source_dir(), subdir, subproject)
self.an_unpicklable_object = mesonlib.an_unpicklable_object
self.build = build
self.environment = build.environment
self.coredata = self.environment.get_coredata()
self.backend = backend
self.summary = {}
if modules is None:
self.modules = {}
else:
self.modules = modules
# Subproject directory is usually the name of the subproject, but can
# be different for dependencies provided by wrap files.
self.subproject_directory_name = subdir.split(os.path.sep)[-1]
self.subproject_dir = subproject_dir
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if not mock and ast is None:
self.load_root_meson_file()
self.sanity_check_ast()
elif ast is not None:
self.ast = ast
self.sanity_check_ast()
self.builtin.update({'meson': MesonMain(build, self)})
self.generators = []
self.visited_subdirs = {}
self.project_args_frozen = False
self.global_args_frozen = False # implies self.project_args_frozen
self.subprojects = {}
self.subproject_stack = []
self.configure_file_outputs = {}
# Passed from the outside, only used in subprojects.
if default_project_options:
self.default_project_options = default_project_options.copy()
else:
self.default_project_options = {}
self.project_default_options = {}
self.build_func_dict()
# build_def_files needs to be defined before parse_project is called
self.build_def_files = [os.path.join(self.subdir, environment.build_filename)]
if not mock:
self.parse_project()
self._redetect_machines()
def _redetect_machines(self):
# Re-initialize machine descriptions. We can do a better job now because we
# have the compilers needed to gain more knowledge, so wipe out old
# inference and start over.
machines = self.build.environment.machines.miss_defaulting()
machines.build = environment.detect_machine_info(self.coredata.compilers.build)
self.build.environment.machines = machines.default_missing()
assert self.build.environment.machines.build.cpu is not None
assert self.build.environment.machines.host.cpu is not None
assert self.build.environment.machines.target.cpu is not None
self.builtin['build_machine'] = \
MachineHolder(self.build.environment.machines.build)
self.builtin['host_machine'] = \
MachineHolder(self.build.environment.machines.host)
self.builtin['target_machine'] = \
MachineHolder(self.build.environment.machines.target)
def get_non_matching_default_options(self):
env = self.environment
for def_opt_name, def_opt_value in self.project_default_options.items():
for opts in env.coredata.get_all_options():
cur_opt_value = opts.get(def_opt_name)
if cur_opt_value is not None:
def_opt_value = env.coredata.validate_option_value(def_opt_name, def_opt_value)
if def_opt_value != cur_opt_value.value:
yield (def_opt_name, def_opt_value, cur_opt_value)
def build_func_dict(self):
self.funcs.update({'add_global_arguments': self.func_add_global_arguments,
'add_project_arguments': self.func_add_project_arguments,
'add_global_link_arguments': self.func_add_global_link_arguments,
'add_project_link_arguments': self.func_add_project_link_arguments,
'add_test_setup': self.func_add_test_setup,
'add_languages': self.func_add_languages,
'alias_target': self.func_alias_target,
'assert': self.func_assert,
'benchmark': self.func_benchmark,
'build_target': self.func_build_target,
'configuration_data': self.func_configuration_data,
'configure_file': self.func_configure_file,
'custom_target': self.func_custom_target,
'declare_dependency': self.func_declare_dependency,
'dependency': self.func_dependency,
'disabler': self.func_disabler,
'environment': self.func_environment,
'error': self.func_error,
'executable': self.func_executable,
'generator': self.func_generator,
'gettext': self.func_gettext,
'get_option': self.func_get_option,
'get_variable': self.func_get_variable,
'files': self.func_files,
'find_library': self.func_find_library,
'find_program': self.func_find_program,
'include_directories': self.func_include_directories,
'import': self.func_import,
'install_data': self.func_install_data,
'install_headers': self.func_install_headers,
'install_man': self.func_install_man,
'install_subdir': self.func_install_subdir,
'is_disabler': self.func_is_disabler,
'is_variable': self.func_is_variable,
'jar': self.func_jar,
'join_paths': self.func_join_paths,
'library': self.func_library,
'message': self.func_message,
'warning': self.func_warning,
'option': self.func_option,
'project': self.func_project,
'run_target': self.func_run_target,
'run_command': self.func_run_command,
'set_variable': self.func_set_variable,
'subdir': self.func_subdir,
'subdir_done': self.func_subdir_done,
'subproject': self.func_subproject,
'summary': self.func_summary,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'both_libraries': self.func_both_lib,
'test': self.func_test,
'vcs_tag': self.func_vcs_tag
})
if 'MESON_UNIT_TEST' in os.environ:
self.funcs.update({'exception': self.func_exception})
def holderify(self, item):
if isinstance(item, list):
return [self.holderify(x) for x in item]
if isinstance(item, dict):
return {k: self.holderify(v) for k, v in item.items()}
if isinstance(item, build.CustomTarget):
return CustomTargetHolder(item, self)
elif isinstance(item, (int, str, bool, Disabler)) or item is None:
return item
elif isinstance(item, build.Executable):
return ExecutableHolder(item, self)
elif isinstance(item, build.GeneratedList):
return GeneratedListHolder(item)
elif isinstance(item, build.RunTarget):
raise RuntimeError('This is not a pipe.')
elif isinstance(item, build.RunScript):
raise RuntimeError('Do not do this.')
elif isinstance(item, build.Data):
return DataHolder(item)
elif isinstance(item, dependencies.Dependency):
return DependencyHolder(item, self.subproject)
elif isinstance(item, dependencies.ExternalProgram):
return ExternalProgramHolder(item, self.subproject)
elif hasattr(item, 'held_object'):
return item
else:
raise InterpreterException('Module returned a value of unknown type.')
def process_new_values(self, invalues):
invalues = listify(invalues)
for v in invalues:
if isinstance(v, (RunTargetHolder, CustomTargetHolder, BuildTargetHolder)):
v = v.held_object
if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)):
self.add_target(v.name, v)
elif isinstance(v, list):
self.module_method_callback(v)
elif isinstance(v, build.GeneratedList):
pass
elif isinstance(v, build.RunScript):
self.build.install_scripts.append(v)
elif isinstance(v, build.Data):
self.build.data.append(v)
elif isinstance(v, dependencies.ExternalProgram):
return ExternalProgramHolder(v, self.subproject)
elif isinstance(v, dependencies.InternalDependency):
# FIXME: This is special cased and not ideal:
# The first source is our new VapiTarget, the rest are deps
self.process_new_values(v.sources[0])
elif hasattr(v, 'held_object'):
pass
elif isinstance(v, (int, str, bool, Disabler)):
pass
else:
raise InterpreterException('Module returned a value of unknown type.')
def module_method_callback(self, return_object):
if not isinstance(return_object, ModuleReturnValue):
raise InterpreterException('Bug in module, it returned an invalid object')
invalues = return_object.new_objects
self.process_new_values(invalues)
return self.holderify(return_object.return_value)
def get_build_def_files(self):
return self.build_def_files
def add_build_def_file(self, f):
# Use relative path for files within source directory, and absolute path
# for system files. Skip files within build directory. Also skip not regular
# files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this
# is especially important to convert '/' to '\' on Windows.
if isinstance(f, mesonlib.File):
if f.is_built:
return
f = os.path.normpath(f.relative_name())
elif os.path.isfile(f) and not f.startswith('/dev'):
srcdir = Path(self.environment.get_source_dir())
builddir = Path(self.environment.get_build_dir())
f = Path(f).resolve()
if builddir in f.parents:
return
if srcdir in f.parents:
f = f.relative_to(srcdir)
f = str(f)
else:
return
if f not in self.build_def_files:
self.build_def_files.append(f)
def get_variables(self):
return self.variables
def check_stdlibs(self):
for for_machine in MachineChoice:
props = self.build.environment.properties[for_machine]
for l in self.coredata.compilers[for_machine].keys():
try:
di = mesonlib.stringlistify(props.get_stdlib(l))
if len(di) != 2:
raise InterpreterException('Stdlib definition for %s should have exactly two elements.'
% l)
projname, depname = di
subproj = self.do_subproject(projname, 'meson', {})
self.build.stdlibs.host[l] = subproj.get_variable_method([depname], {})
except KeyError:
pass
except InvalidArguments:
pass
@stringArgs
@noKwargs
def func_import(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Import takes one argument.')
modname = args[0]
if modname.startswith('unstable-'):
plainname = modname.split('-', 1)[1]
mlog.warning('Module %s has no backwards or forwards compatibility and might not exist in future releases.' % modname, location=node)
modname = 'unstable_' + plainname
if modname not in self.modules:
try:
module = importlib.import_module('mesonbuild.modules.' + modname)
except ImportError:
raise InvalidArguments('Module "%s" does not exist' % (modname, ))
self.modules[modname] = module.initialize(self)
return ModuleHolder(modname, self.modules[modname], self)
@stringArgs
@noKwargs
def func_files(self, node, args, kwargs):
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args]
@FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole'])
@FeatureNewKwargs('declare_dependency', '0.54.0', ['variables'])
@permittedKwargs(permitted_kwargs['declare_dependency'])
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
version = kwargs.get('version', self.project_version)
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
incs = self.extract_incdirs(kwargs)
libs = unholder(extract_as_list(kwargs, 'link_with'))
libs_whole = unholder(extract_as_list(kwargs, 'link_whole'))
sources = extract_as_list(kwargs, 'sources')
sources = unholder(listify(self.source_strings_to_files(sources)))
deps = unholder(extract_as_list(kwargs, 'dependencies'))
compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))
link_args = mesonlib.stringlistify(kwargs.get('link_args', []))
variables = kwargs.get('variables', {})
if not isinstance(variables, dict):
raise InterpreterException('variables must be a dict.')
if not all(isinstance(v, str) for v in variables.values()):
# Because that is how they will come from pkg-config and cmake
raise InterpreterException('variables values be strings.')
final_deps = []
for d in deps:
try:
d = d.held_object
except Exception:
pass
if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):
raise InterpreterException('Dependencies must be external deps')
final_deps.append(d)
for l in libs:
if isinstance(l, dependencies.Dependency):
raise InterpreterException('''Entries in "link_with" may only be self-built targets,
external dependencies (including libraries) must go to "dependencies".''')
dep = dependencies.InternalDependency(version, incs, compile_args,
link_args, libs, libs_whole, sources, final_deps,
variables)
return DependencyHolder(dep, self.subproject)
@noKwargs
def func_assert(self, node, args, kwargs):
if len(args) == 1:
FeatureNew('assert function without message argument', '0.53.0').use(self.subproject)
value = args[0]
message = None
elif len(args) == 2:
value, message = args
if not isinstance(message, str):
raise InterpreterException('Assert message not a string.')
else:
raise InterpreterException('Assert takes between one and two arguments')
if not isinstance(value, bool):
raise InterpreterException('Assert value not bool.')
if not value:
if message is None:
from .ast import AstPrinter
printer = AstPrinter()
node.args.arguments[0].accept(printer)
message = printer.result
raise InterpreterException('Assert failed: ' + message)
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments('Expected %d arguments, got %d.' %
(argcount, len(args)))
for actual, wanted in zip(args, arg_types):
if wanted is not None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
@FeatureNewKwargs('run_command', '0.50.0', ['env'])
@FeatureNewKwargs('run_command', '0.47.0', ['check', 'capture'])
@permittedKwargs(permitted_kwargs['run_command'])
def func_run_command(self, node, args, kwargs):
return self.run_command_impl(node, args, kwargs)
def run_command_impl(self, node, args, kwargs, in_builddir=False):
if len(args) < 1:
raise InterpreterException('Not enough arguments')
cmd, *cargs = args
capture = kwargs.get('capture', True)
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
check = kwargs.get('check', False)
if not isinstance(check, bool):
raise InterpreterException('Check must be boolean.')
env = self.unpack_env_kwarg(kwargs)
m = 'must be a string, or the output of find_program(), files() '\
'or configure_file(), or a compiler object; not {!r}'
expanded_args = []
if isinstance(cmd, ExternalProgramHolder):
cmd = cmd.held_object
if isinstance(cmd, build.Executable):
progname = node.args.arguments[0].value
msg = 'Program {!r} was overridden with the compiled executable {!r}'\
' and therefore cannot be used during configuration'
raise InterpreterException(msg.format(progname, cmd.description()))
if not cmd.found():
raise InterpreterException('command {!r} not found or not executable'.format(cmd.get_name()))
elif isinstance(cmd, CompilerHolder):
exelist = cmd.compiler.get_exelist()
cmd = exelist[0]
prog = ExternalProgram(cmd, silent=True)
if not prog.found():
raise InterpreterException('Program {!r} not found '
'or not executable'.format(cmd))
cmd = prog
expanded_args = exelist[1:]
else:
if isinstance(cmd, mesonlib.File):
cmd = cmd.absolute_path(srcdir, builddir)
elif not isinstance(cmd, str):
raise InterpreterException('First argument ' + m.format(cmd))
# Prefer scripts in the current source directory
search_dir = os.path.join(srcdir, self.subdir)
prog = ExternalProgram(cmd, silent=True, search_dir=search_dir)
if not prog.found():
raise InterpreterException('Program or command {!r} not found '
'or not executable'.format(cmd))
cmd = prog
for a in listify(cargs):
if isinstance(a, str):
expanded_args.append(a)
elif isinstance(a, mesonlib.File):
expanded_args.append(a.absolute_path(srcdir, builddir))
elif isinstance(a, ExternalProgramHolder):
expanded_args.append(a.held_object.get_path())
else:
raise InterpreterException('Arguments ' + m.format(a))
# If any file that was used as an argument to the command
# changes, we must re-run the configuration step.
self.add_build_def_file(cmd.get_path())
for a in expanded_args:
if not os.path.isabs(a):
a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a)
self.add_build_def_file(a)
return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir,
self.environment.get_build_command() + ['introspect'],
in_builddir=in_builddir, check=check, capture=capture)
@stringArgs
def func_gettext(self, nodes, args, kwargs):
raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead')
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
@FeatureNewKwargs('subproject', '0.38.0', ['default_options'])
@permittedKwargs(permitted_kwargs['subproject'])
@stringArgs
def func_subproject(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Subproject takes exactly one argument')
dirname = args[0]
return self.do_subproject(dirname, 'meson', kwargs)
def disabled_subproject(self, dirname, disabled_feature=None, exception=None):
sub = SubprojectHolder(None, self.subproject_dir, dirname,
disabled_feature=disabled_feature, exception=exception)
self.subprojects[dirname] = sub
return sub
def do_subproject(self, dirname: str, method: str, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Subproject', mlog.bold(dirname), ':', 'skipped: feature', mlog.bold(feature), 'disabled')
return self.disabled_subproject(dirname, disabled_feature=feature)
default_options = mesonlib.stringlistify(kwargs.get('default_options', []))
default_options = coredata.create_options_dict(default_options)
if dirname == '':
raise InterpreterException('Subproject dir name must not be empty.')
if dirname[0] == '.':
raise InterpreterException('Subproject dir name must not start with a period.')
if '..' in dirname:
raise InterpreterException('Subproject name must not contain a ".." path segment.')
if os.path.isabs(dirname):
raise InterpreterException('Subproject name must not be an absolute path.')
if has_path_sep(dirname):
mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.',
location=self.current_node)
if dirname in self.subproject_stack:
fullstack = self.subproject_stack + [dirname]
incpath = ' => '.join(fullstack)
raise InvalidCode('Recursive include of subprojects: %s.' % incpath)
if dirname in self.subprojects:
subproject = self.subprojects[dirname]
if required and not subproject.found():
raise InterpreterException('Subproject "%s/%s" required but not found.' % (
self.subproject_dir, dirname))
return subproject
subproject_dir_abs = os.path.join(self.environment.get_source_dir(), self.subproject_dir)
r = wrap.Resolver(subproject_dir_abs, self.coredata.get_builtin_option('wrap_mode'))
try:
resolved = r.resolve(dirname, method)
except wrap.WrapException as e:
subprojdir = os.path.join(self.subproject_dir, r.directory)
if isinstance(e, wrap.WrapNotFoundException):
# if the reason subproject execution failed was because
# the directory doesn't exist, try to give some helpful
# advice if it's a nested subproject that needs
# promotion...
self.print_nested_info(dirname)
if not required:
mlog.log(e)
mlog.log('Subproject ', mlog.bold(subprojdir), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(dirname, exception=e)
raise e
subdir = os.path.join(self.subproject_dir, resolved)
subdir_abs = os.path.join(subproject_dir_abs, resolved)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
mlog.log()
with mlog.nested():
mlog.log('Executing subproject', mlog.bold(dirname), 'method', mlog.bold(method), '\n')
try:
if method == 'meson':
return self._do_subproject_meson(dirname, subdir, default_options, kwargs)
elif method == 'cmake':
return self._do_subproject_cmake(dirname, subdir, subdir_abs, default_options, kwargs)
else:
raise InterpreterException('The method {} is invalid for the subproject {}'.format(method, dirname))
# Invalid code is always an error
except InvalidCode:
raise
except Exception as e:
if not required:
with mlog.nested():
# Suppress the 'ERROR:' prefix because this exception is not
# fatal and VS CI treat any logs with "ERROR:" as fatal.
mlog.exception(e, prefix=mlog.yellow('Exception:'))
mlog.log('\nSubproject', mlog.bold(dirname), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(dirname, exception=e)
raise e
def _do_subproject_meson(self, dirname, subdir, default_options, kwargs, ast=None, build_def_files=None):
with mlog.nested():
new_build = self.build.copy()
subi = Interpreter(new_build, self.backend, dirname, subdir, self.subproject_dir,
self.modules, default_options, ast=ast)
subi.subprojects = self.subprojects
subi.subproject_stack = self.subproject_stack + [dirname]
current_active = self.active_projectname
current_warnings_counter = mlog.log_warnings_counter
mlog.log_warnings_counter = 0
subi.run()
subi_warnings = mlog.log_warnings_counter
mlog.log_warnings_counter = current_warnings_counter
mlog.log('Subproject', mlog.bold(dirname), 'finished.')
mlog.log()
if 'version' in kwargs:
pv = subi.project_version
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException('Subproject %s version is %s but %s required.' % (dirname, pv, wanted))
self.active_projectname = current_active
self.subprojects.update(subi.subprojects)
self.subprojects[dirname] = SubprojectHolder(subi, self.subproject_dir, dirname,
warnings=subi_warnings)
# Duplicates are possible when subproject uses files from project root
if build_def_files:
self.build_def_files = list(set(self.build_def_files + build_def_files))
else:
self.build_def_files = list(set(self.build_def_files + subi.build_def_files))
self.build.merge(subi.build)
self.build.subprojects[dirname] = subi.project_version
self.summary.update(subi.summary)
return self.subprojects[dirname]
def _do_subproject_cmake(self, dirname, subdir, subdir_abs, default_options, kwargs):
with mlog.nested():
new_build = self.build.copy()
prefix = self.coredata.builtins['prefix'].value
cmake_options = mesonlib.stringlistify(kwargs.get('cmake_options', []))
cm_int = CMakeInterpreter(new_build, subdir, subdir_abs, prefix, new_build.environment, self.backend)
cm_int.initialise(cmake_options)
cm_int.analyse()
# Generate a meson ast and execute it with the normal do_subproject_meson
ast = cm_int.pretend_to_be_meson()
mlog.log()
with mlog.nested():
mlog.log('Processing generated meson AST')
# Debug print the generated meson file
from .ast import AstIndentationGenerator, AstPrinter
printer = AstPrinter()
ast.accept(AstIndentationGenerator())
ast.accept(printer)
printer.post_process()
meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build')
with open(meson_filename, "w") as f:
f.write(printer.result)
mlog.log('Build file:', meson_filename)
mlog.cmd_ci_include(meson_filename)
mlog.log()
result = self._do_subproject_meson(dirname, subdir, default_options, kwargs, ast, cm_int.bs_files)
result.cm_interpreter = cm_int
mlog.log()
return result
def get_option_internal(self, optname):
raw_optname = optname
if self.is_subproject():
optname = self.subproject + ':' + optname
for opts in [
self.coredata.base_options, compilers.base_options, self.coredata.builtins,
dict(self.coredata.get_prefixed_options_per_machine(self.coredata.builtins_per_machine)),
dict(self.coredata.flatten_lang_iterator(
self.coredata.get_prefixed_options_per_machine(self.coredata.compiler_options))),
]:
v = opts.get(optname)
if v is None or v.yielding:
v = opts.get(raw_optname)
if v is not None:
return v
try:
opt = self.coredata.user_options[optname]
if opt.yielding and ':' in optname and raw_optname in self.coredata.user_options:
popt = self.coredata.user_options[raw_optname]
if type(opt) is type(popt):
opt = popt
else:
# Get class name, then option type as a string
opt_type = opt.__class__.__name__[4:][:-6].lower()
popt_type = popt.__class__.__name__[4:][:-6].lower()
# This is not a hard error to avoid dependency hell, the workaround
# when this happens is to simply set the subproject's option directly.
mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield '
'to parent option of type {3!r}, ignoring parent value. '
'Use -D{2}:{0}=value to set the value for this option manually'
'.'.format(raw_optname, opt_type, self.subproject, popt_type),
location=self.current_node)
return opt
except KeyError:
pass
raise InterpreterException('Tried to access unknown option "%s".' % optname)
@stringArgs
@noKwargs
def func_get_option(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Argument required for get_option.')
optname = args[0]
if ':' in optname:
raise InterpreterException('Having a colon in option name is forbidden, '
'projects are not allowed to directly access '
'options of other subprojects.')
opt = self.get_option_internal(optname)
if isinstance(opt, coredata.UserFeatureOption):
return FeatureOptionHolder(self.environment, optname, opt)
elif isinstance(opt, coredata.UserOption):
return opt.value
return opt
@noKwargs
def func_configuration_data(self, node, args, kwargs):
if len(args) > 1:
raise InterpreterException('configuration_data takes only one optional positional arguments')
elif len(args) == 1:
FeatureNew('configuration_data dictionary', '0.49.0').use(self.subproject)
initial_values = args[0]
if not isinstance(initial_values, dict):
raise InterpreterException('configuration_data first argument must be a dictionary')
else:
initial_values = {}
return ConfigurationDataHolder(self.subproject, initial_values)
def set_backend(self):
# The backend is already set when parsing subprojects
if self.backend is not None:
return
backend = self.coredata.get_builtin_option('backend')
from .backend import backends
self.backend = backends.get_backend_from_name(backend, self.build, self)
if self.backend is None:
raise InterpreterException('Unknown backend "%s".' % backend)
if backend != self.backend.name:
if self.backend.name.startswith('vs'):
mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name))
self.coredata.set_builtin_option('backend', self.backend.name)
# Only init backend options on first invocation otherwise it would
# override values previously set from command line.
if self.environment.first_invocation:
self.coredata.init_backend_options(backend)
options = {k: v for k, v in self.environment.cmd_line_options.items() if k.startswith('backend_')}
self.coredata.set_options(options)
@stringArgs
@permittedKwargs(permitted_kwargs['project'])
def func_project(self, node, args, kwargs):
if len(args) < 1:
raise InvalidArguments('Not enough arguments to project(). Needs at least the project name.')
proj_name, *proj_langs = args
if ':' in proj_name:
raise InvalidArguments("Project name {!r} must not contain ':'".format(proj_name))
if 'meson_version' in kwargs:
cv = coredata.version
pv = kwargs['meson_version']
if not mesonlib.version_compare(cv, pv):
raise InterpreterException('Meson version is %s but project requires %s' % (cv, pv))
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.merge_user_options(oi.options)
self.add_build_def_file(self.option_file)
# Do not set default_options on reconfigure otherwise it would override
# values previously set from command line. That means that changing
# default_options in a project will trigger a reconfigure but won't
# have any effect.
self.project_default_options = mesonlib.stringlistify(kwargs.get('default_options', []))
self.project_default_options = coredata.create_options_dict(self.project_default_options)
if self.environment.first_invocation:
default_options = self.project_default_options
default_options.update(self.default_project_options)
self.coredata.init_builtins(self.subproject)
else:
default_options = {}
self.coredata.set_default_options(default_options, self.subproject, self.environment)
if not self.is_subproject():
self.build.project_name = proj_name
self.active_projectname = proj_name
self.project_version = kwargs.get('version', 'undefined')
if self.build.project_version is None:
self.build.project_version = self.project_version
proj_license = mesonlib.stringlistify(kwargs.get('license', 'unknown'))
self.build.dep_manifest[proj_name] = {'version': self.project_version,
'license': proj_license}
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
if not self.is_subproject() and 'subproject_dir' in kwargs:
spdirname = kwargs['subproject_dir']
if not isinstance(spdirname, str):
raise InterpreterException('Subproject_dir must be a string')
if os.path.isabs(spdirname):
raise InterpreterException('Subproject_dir must not be an absolute path.')
if spdirname.startswith('.'):
raise InterpreterException('Subproject_dir must not begin with a period.')
if '..' in spdirname:
raise InterpreterException('Subproject_dir must not contain a ".." segment.')
self.subproject_dir = spdirname
self.build.subproject_dir = self.subproject_dir
mesonlib.project_meson_versions[self.subproject] = ''
if 'meson_version' in kwargs:
mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version']
self.build.projects[self.subproject] = proj_name
mlog.log('Project name:', mlog.bold(proj_name))
mlog.log('Project version:', mlog.bold(self.project_version))
self.add_languages(proj_langs, True, MachineChoice.BUILD)
self.add_languages(proj_langs, True, MachineChoice.HOST)
self.set_backend()
if not self.is_subproject():
self.check_stdlibs()
@FeatureNewKwargs('add_languages', '0.54.0', ['native'])
@permittedKwargs(permitted_kwargs['add_languages'])
@stringArgs
def func_add_languages(self, node, args, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
for lang in sorted(args, key=compilers.sort_clink):
mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
if 'native' in kwargs:
return self.add_languages(args, required, self.machine_from_native_kwarg(kwargs))
else:
# absent 'native' means 'both' for backwards compatibility
mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.',
location=self.current_node)
success = self.add_languages(args, False, MachineChoice.BUILD)
success &= self.add_languages(args, required, MachineChoice.HOST)
return success
def get_message_string_arg(self, arg):
if isinstance(arg, list):
argstr = stringifyUserArguments(arg)
elif isinstance(arg, dict):
argstr = stringifyUserArguments(arg)
elif isinstance(arg, str):
argstr = arg
elif isinstance(arg, int):
argstr = str(arg)
else:
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
return argstr
@noArgsFlattening
@noKwargs
def func_message(self, node, args, kwargs):
if len(args) > 1:
FeatureNew('message with more than one argument', '0.54.0').use(self.subproject)
args_str = [self.get_message_string_arg(i) for i in args]
self.message_impl(args_str)
def message_impl(self, args):
mlog.log(mlog.bold('Message:'), *args)
@noArgsFlattening
@FeatureNewKwargs('summary', '0.54.0', ['list_sep'])
@permittedKwargs({'section', 'bool_yn', 'list_sep'})
@FeatureNew('summary', '0.53.0')
def func_summary(self, node, args, kwargs):
if len(args) == 1:
if not isinstance(args[0], dict):
raise InterpreterException('Summary first argument must be dictionary.')
values = args[0]
elif len(args) == 2:
if not isinstance(args[0], str):
raise InterpreterException('Summary first argument must be string.')
values = {args[0]: args[1]}
else:
raise InterpreterException('Summary accepts at most 2 arguments.')
section = kwargs.get('section', '')
if not isinstance(section, str):
raise InterpreterException('Summary\'s section keyword argument must be string.')
self.summary_impl(section, values, kwargs)
def summary_impl(self, section, values, kwargs):
if self.subproject not in self.summary:
self.summary[self.subproject] = Summary(self.active_projectname, self.project_version)
self.summary[self.subproject].add_section(section, values, kwargs)
def _print_summary(self):
# Add automatic 'Supbrojects' section in main project.
all_subprojects = collections.OrderedDict()
for name, subp in sorted(self.subprojects.items()):
value = subp.found()
if subp.disabled_feature:
value = [value, 'Feature {!r} disabled'.format(subp.disabled_feature)]
elif subp.exception:
value = [value, str(subp.exception)]
elif subp.warnings > 0:
value = [value, '{} warnings'.format(subp.warnings)]
all_subprojects[name] = value
if all_subprojects:
self.summary_impl('Subprojects', all_subprojects,
{'bool_yn': True,
'list_sep': ' ',
})
# Print all summaries, main project last.
mlog.log('') # newline
main_summary = self.summary.pop('', None)
for _, summary in sorted(self.summary.items()):
summary.dump()
if main_summary:
main_summary.dump()
@noArgsFlattening
@FeatureNew('warning', '0.44.0')
@noKwargs
def func_warning(self, node, args, kwargs):
if len(args) > 1:
FeatureNew('warning with more than one argument', '0.54.0').use(self.subproject)
args_str = [self.get_message_string_arg(i) for i in args]
mlog.warning(*args_str, location=node)
@noKwargs
def func_error(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
raise InterpreterException('Problem encountered: ' + args[0])
@noKwargs
def func_exception(self, node, args, kwargs):
self.validate_arguments(args, 0, [])
raise Exception()
def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool:
success = self.add_languages_for(args, required, for_machine)
if not self.coredata.is_cross_build():
self.coredata.copy_build_options_from_regular_ones()
self._redetect_machines()
return success
def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool:
if for_machine != MachineChoice.HOST:
return False
if not self.environment.is_cross_build():
return False
should = self.environment.properties.host.get('skip_sanity_check', False)
if not isinstance(should, bool):
raise InterpreterException('Option skip_sanity_check must be a boolean.')
return should
def add_languages_for(self, args, required, for_machine: MachineChoice):
langs = set(self.coredata.compilers[for_machine].keys())
langs.update(args)
if 'vala' in langs:
if 'c' not in langs:
raise InterpreterException('Compiling Vala requires C. Add C to your project languages and rerun Meson.')
success = True
for lang in sorted(args, key=compilers.sort_clink):
lang = lang.lower()
clist = self.coredata.compilers[for_machine]
machine_name = for_machine.get_lower_case_name()
if lang in clist:
comp = clist[lang]
else:
try:
comp = self.environment.detect_compiler_for(lang, for_machine)
if comp is None:
raise InvalidArguments('Tried to use unknown language "%s".' % lang)
if self.should_skip_sanity_check(for_machine):
mlog.log_once('Cross compiler sanity tests disabled via the cross file.')
else:
comp.sanity_check(self.environment.get_scratch_dir(), self.environment)
except Exception:
if not required:
mlog.log('Compiler for language',
mlog.bold(lang), 'for the', machine_name,
'machine not found.')
success = False
continue
else:
raise
if for_machine == MachineChoice.HOST or self.environment.is_cross_build():
logger_fun = mlog.log
else:
logger_fun = mlog.debug
logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string())
if comp.linker is not None:
logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version)
self.build.ensure_static_linker(comp)
return success
def program_from_file_for(self, for_machine, prognames, silent):
for p in unholder(prognames):
if isinstance(p, mesonlib.File):
continue # Always points to a local (i.e. self generated) file.
if not isinstance(p, str):
raise InterpreterException('Executable name must be a string')
prog = ExternalProgram.from_bin_list(self.environment, for_machine, p)
if prog.found():
return ExternalProgramHolder(prog, self.subproject)
return None
def program_from_system(self, args, search_dirs, silent=False):
# Search for scripts relative to current subdir.
# Do not cache found programs because find_program('foobar')
# might give different results when run from different source dirs.
source_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
for exename in args:
if isinstance(exename, mesonlib.File):
if exename.is_built:
search_dir = os.path.join(self.environment.get_build_dir(),
exename.subdir)
else:
search_dir = os.path.join(self.environment.get_source_dir(),
exename.subdir)
exename = exename.fname
extra_search_dirs = []
elif isinstance(exename, str):
search_dir = source_dir
extra_search_dirs = search_dirs
else:
raise InvalidArguments('find_program only accepts strings and '
'files, not {!r}'.format(exename))
extprog = dependencies.ExternalProgram(exename, search_dir=search_dir,
extra_search_dirs=extra_search_dirs,
silent=silent)
progobj = ExternalProgramHolder(extprog, self.subproject)
if progobj.found():
return progobj
def program_from_overrides(self, command_names, silent=False):
for name in command_names:
if not isinstance(name, str):
continue
if name in self.build.find_overrides:
exe = self.build.find_overrides[name]
if not silent:
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(overridden: %s)' % exe.description())
return ExternalProgramHolder(exe, self.subproject, self.backend)
return None
def store_name_lookups(self, command_names):
for name in command_names:
if isinstance(name, str):
self.build.searched_programs.add(name)
def add_find_program_override(self, name, exe):
if name in self.build.searched_programs:
raise InterpreterException('Tried to override finding of executable "%s" which has already been found.'
% name)
if name in self.build.find_overrides:
raise InterpreterException('Tried to override executable "%s" which has already been overridden.'
% name)
self.build.find_overrides[name] = exe
# TODO update modules to always pass `for_machine`. It is bad-form to assume
# the host machine.
def find_program_impl(self, args, for_machine: MachineChoice = MachineChoice.HOST,
required=True, silent=True, wanted='', search_dirs=None):
if not isinstance(args, list):
args = [args]
progobj = self.program_from_overrides(args, silent=silent)
if progobj is None:
progobj = self.program_from_file_for(for_machine, args, silent=silent)
if progobj is None:
progobj = self.program_from_system(args, search_dirs, silent=silent)
if progobj is None and args[0].endswith('python3'):
prog = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True)
progobj = ExternalProgramHolder(prog, self.subproject)
if required and (progobj is None or not progobj.found()):
raise InvalidArguments('Program(s) {!r} not found or not executable'.format(args))
if progobj is None:
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
# Only store successful lookups
self.store_name_lookups(args)
if wanted:
version = progobj.get_version(self)
is_found, not_found, found = mesonlib.version_compare_many(version, wanted)
if not is_found:
mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'),
'found {!r} but need:'.format(version),
', '.join(["'{}'".format(e) for e in not_found]))
if required:
m = 'Invalid version of program, need {!r} {!r} found {!r}.'
raise InvalidArguments(m.format(progobj.get_name(), not_found, version))
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
return progobj
@FeatureNewKwargs('find_program', '0.53.0', ['dirs'])
@FeatureNewKwargs('find_program', '0.52.0', ['version'])
@FeatureNewKwargs('find_program', '0.49.0', ['disabler'])
@disablerIfNotFound
@permittedKwargs(permitted_kwargs['find_program'])
def func_find_program(self, node, args, kwargs):
if not args:
raise InterpreterException('No program name specified.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Program', mlog.bold(' '.join(args)), 'skipped: feature', mlog.bold(feature), 'disabled')
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
search_dirs = extract_search_dirs(kwargs)
wanted = mesonlib.stringlistify(kwargs.get('version', []))
for_machine = self.machine_from_native_kwarg(kwargs)
return self.find_program_impl(args, for_machine, required=required,
silent=False, wanted=wanted,
search_dirs=search_dirs)
def func_find_library(self, node, args, kwargs):
raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n'
'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n'
'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n'
)
def _find_cached_dep(self, name, display_name, kwargs):
# Check if we want this as a build-time / build machine or runt-time /
# host machine dep.
for_machine = self.machine_from_native_kwarg(kwargs)
identifier = dependencies.get_dep_identifier(name, kwargs)
wanted_vers = mesonlib.stringlistify(kwargs.get('version', []))
override = self.build.dependency_overrides[for_machine].get(identifier)
if override:
info = [mlog.blue('(overridden)' if override.explicit else '(cached)')]
cached_dep = override.dep
# We don't implicitly override not-found dependencies, but user could
# have explicitly called meson.override_dependency() with a not-found
# dep.
if not cached_dep.found():
mlog.log('Dependency', mlog.bold(display_name),
'found:', mlog.red('NO'), *info)
return identifier, cached_dep
found_vers = cached_dep.get_version()
if not self.check_version(wanted_vers, found_vers):
mlog.log('Dependency', mlog.bold(name),
'found:', mlog.red('NO'),
'found', mlog.normal_cyan(found_vers), 'but need:',
mlog.bold(', '.join(["'{}'".format(e) for e in wanted_vers])),
*info)
return identifier, NotFoundDependency(self.environment)
else:
info = [mlog.blue('(cached)')]
cached_dep = self.coredata.deps[for_machine].get(identifier)
if cached_dep:
found_vers = cached_dep.get_version()
if not self.check_version(wanted_vers, found_vers):
return identifier, None
if cached_dep:
if found_vers:
info = [mlog.normal_cyan(found_vers), *info]
mlog.log('Dependency', mlog.bold(display_name),
'found:', mlog.green('YES'), *info)
return identifier, cached_dep
return identifier, None
@staticmethod
def check_version(wanted, found):
if not wanted:
return True
if found == 'undefined' or not mesonlib.version_compare_many(found, wanted)[0]:
return False
return True
def notfound_dependency(self):
return DependencyHolder(NotFoundDependency(self.environment), self.subproject)
def verify_fallback_consistency(self, dirname, varname, cached_dep):
subi = self.subprojects.get(dirname)
if not cached_dep or not varname or not subi or not cached_dep.found():
return
dep = subi.get_variable_method([varname], {})
if dep.held_object != cached_dep:
m = 'Inconsistency: Subproject has overridden the dependency with another variable than {!r}'
raise DependencyException(m.format(varname))
def get_subproject_dep(self, name, display_name, dirname, varname, kwargs):
required = kwargs.get('required', True)
wanted = mesonlib.stringlistify(kwargs.get('version', []))
subproj_path = os.path.join(self.subproject_dir, dirname)
dep = self.notfound_dependency()
try:
subproject = self.subprojects[dirname]
_, cached_dep = self._find_cached_dep(name, display_name, kwargs)
if varname is None:
# Assuming the subproject overridden the dependency we want
if cached_dep:
if required and not cached_dep.found():
m = 'Dependency {!r} is not satisfied'
raise DependencyException(m.format(display_name))
return DependencyHolder(cached_dep, self.subproject)
else:
m = 'Subproject {} did not override dependency {}'
raise DependencyException(m.format(subproj_path, display_name))
if subproject.found():
self.verify_fallback_consistency(dirname, varname, cached_dep)
dep = self.subprojects[dirname].get_variable_method([varname], {})
except InvalidArguments:
pass
if not isinstance(dep, DependencyHolder):
raise InvalidCode('Fetched variable {!r} in the subproject {!r} is '
'not a dependency object.'.format(varname, dirname))
if not dep.found():
if required:
raise DependencyException('Could not find dependency {} in subproject {}'
''.format(varname, dirname))
# If the dependency is not required, don't raise an exception
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.red('NO'))
return dep
found = dep.held_object.get_version()
if not self.check_version(wanted, found):
if required:
raise DependencyException('Version {} of subproject dependency {} already '
'cached, requested incompatible version {} for '
'dep {}'.format(found, dirname, wanted, display_name))
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.red('NO'),
'found', mlog.normal_cyan(found), 'but need:',
mlog.bold(', '.join(["'{}'".format(e) for e in wanted])))
return self.notfound_dependency()
found = mlog.normal_cyan(found) if found else None
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.green('YES'), found)
return dep
def _handle_featurenew_dependencies(self, name):
'Do a feature check on dependencies used by this subproject'
if name == 'mpi':
FeatureNew('MPI Dependency', '0.42.0').use(self.subproject)
elif name == 'pcap':
FeatureNew('Pcap Dependency', '0.42.0').use(self.subproject)
elif name == 'vulkan':
FeatureNew('Vulkan Dependency', '0.42.0').use(self.subproject)
elif name == 'libwmf':
FeatureNew('LibWMF Dependency', '0.44.0').use(self.subproject)
elif name == 'openmp':
FeatureNew('OpenMP Dependency', '0.46.0').use(self.subproject)
@FeatureNewKwargs('dependency', '0.54.0', ['components'])
@FeatureNewKwargs('dependency', '0.52.0', ['include_type'])
@FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args'])
@FeatureNewKwargs('dependency', '0.49.0', ['disabler'])
@FeatureNewKwargs('dependency', '0.40.0', ['method'])
@FeatureNewKwargs('dependency', '0.38.0', ['default_options'])
@disablerIfNotFound
@permittedKwargs(permitted_kwargs['dependency'])
def func_dependency(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
name = args[0]
display_name = name if name else '(anonymous)'
mods = extract_as_list(kwargs, 'modules')
if mods:
display_name += ' (modules: {})'.format(', '.join(str(i) for i in mods))
not_found_message = kwargs.get('not_found_message', '')
if not isinstance(not_found_message, str):
raise InvalidArguments('The not_found_message must be a string.')
try:
d = self.dependency_impl(name, display_name, kwargs)
except Exception:
if not_found_message:
self.message_impl([not_found_message])
raise
if not d.found() and not_found_message:
self.message_impl([not_found_message])
self.message_impl([not_found_message])
# Override this dependency to have consistent results in subsequent
# dependency lookups.
if name and d.found():
for_machine = self.machine_from_native_kwarg(kwargs)
identifier = dependencies.get_dep_identifier(name, kwargs)
if identifier not in self.build.dependency_overrides[for_machine]:
self.build.dependency_overrides[for_machine][identifier] = \
build.DependencyOverride(d.held_object, node, explicit=False)
return d
def dependency_impl(self, name, display_name, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_dependency()
has_fallback = 'fallback' in kwargs
if 'default_options' in kwargs and not has_fallback:
mlog.warning('The "default_options" keyworg argument does nothing without a "fallback" keyword argument.',
location=self.current_node)
# writing just "dependency('')" is an error, because it can only fail
if name == '' and required and not has_fallback:
raise InvalidArguments('Dependency is both required and not-found')
if '<' in name or '>' in name or '=' in name:
raise InvalidArguments('Characters <, > and = are forbidden in dependency names. To specify'
'version\n requirements use the \'version\' keyword argument instead.')
identifier, cached_dep = self._find_cached_dep(name, display_name, kwargs)
if cached_dep:
if has_fallback:
dirname, varname = self.get_subproject_infos(kwargs)
self.verify_fallback_consistency(dirname, varname, cached_dep)
if required and not cached_dep.found():
m = 'Dependency {!r} was already checked and was not found'
raise DependencyException(m.format(display_name))
return DependencyHolder(cached_dep, self.subproject)
# If the dependency has already been configured, possibly by
# a higher level project, try to use it first.
if has_fallback:
dirname, varname = self.get_subproject_infos(kwargs)
if dirname in self.subprojects:
return self.get_subproject_dep(name, display_name, dirname, varname, kwargs)
wrap_mode = self.coredata.get_builtin_option('wrap_mode')
forcefallback = wrap_mode == WrapMode.forcefallback and has_fallback
if name != '' and not forcefallback:
self._handle_featurenew_dependencies(name)
kwargs['required'] = required and not has_fallback
dep = dependencies.find_external_dependency(name, self.environment, kwargs)
kwargs['required'] = required
# Only store found-deps in the cache
# Never add fallback deps to self.coredata.deps since we
# cannot cache them. They must always be evaluated else
# we won't actually read all the build files.
if dep.found():
for_machine = self.machine_from_native_kwarg(kwargs)
self.coredata.deps[for_machine].put(identifier, dep)
return DependencyHolder(dep, self.subproject)
if has_fallback:
return self.dependency_fallback(name, display_name, kwargs)
return self.notfound_dependency()
@FeatureNew('disabler', '0.44.0')
@noKwargs
@noPosargs
def func_disabler(self, node, args, kwargs):
return Disabler()
def print_nested_info(self, dependency_name):
message = ['Dependency', mlog.bold(dependency_name), 'not found but it is available in a sub-subproject.\n' +
'To use it in the current project, promote it by going in the project source\n'
'root and issuing']
sprojs = mesonlib.detect_subprojects('subprojects', self.source_root)
if dependency_name not in sprojs:
return
found = sprojs[dependency_name]
if len(found) > 1:
message.append('one of the following commands:')
else:
message.append('the following command:')
command_templ = '\nmeson wrap promote {}'
for l in found:
message.append(mlog.bold(command_templ.format(l[len(self.source_root) + 1:])))
mlog.warning(*message, location=self.current_node)
def get_subproject_infos(self, kwargs):
fbinfo = mesonlib.stringlistify(kwargs['fallback'])
if len(fbinfo) == 1:
FeatureNew('Fallback without variable name', '0.53.0').use(self.subproject)
return fbinfo[0], None
elif len(fbinfo) != 2:
raise InterpreterException('Fallback info must have one or two items.')
return fbinfo
def dependency_fallback(self, name, display_name, kwargs):
required = kwargs.get('required', True)
if self.coredata.get_builtin_option('wrap_mode') == WrapMode.nofallback:
mlog.log('Not looking for a fallback subproject for the dependency',
mlog.bold(display_name), 'because:\nUse of fallback '
'dependencies is disabled.')
if required:
m = 'Dependency {!r} not found and fallback is disabled'
raise DependencyException(m.format(display_name))
return self.notfound_dependency()
elif self.coredata.get_builtin_option('wrap_mode') == WrapMode.forcefallback:
mlog.log('Looking for a fallback subproject for the dependency',
mlog.bold(display_name), 'because:\nUse of fallback dependencies is forced.')
else:
mlog.log('Looking for a fallback subproject for the dependency',
mlog.bold(display_name))
dirname, varname = self.get_subproject_infos(kwargs)
sp_kwargs = {
'default_options': kwargs.get('default_options', []),
'required': required,
}
self.do_subproject(dirname, 'meson', sp_kwargs)
return self.get_subproject_dep(name, display_name, dirname, varname, kwargs)
@FeatureNewKwargs('executable', '0.42.0', ['implib'])
@permittedKwargs(permitted_kwargs['executable'])
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, ExecutableHolder)
@permittedKwargs(permitted_kwargs['static_library'])
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, StaticLibraryHolder)
@permittedKwargs(permitted_kwargs['shared_library'])
def func_shared_lib(self, node, args, kwargs):
holder = self.build_target(node, args, kwargs, SharedLibraryHolder)
holder.held_object.shared_library_only = True
return holder
@permittedKwargs(permitted_kwargs['both_libraries'])
def func_both_lib(self, node, args, kwargs):
return self.build_both_libraries(node, args, kwargs)
@FeatureNew('shared_module', '0.37.0')
@permittedKwargs(permitted_kwargs['shared_module'])
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, SharedModuleHolder)
@permittedKwargs(permitted_kwargs['library'])
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
@permittedKwargs(permitted_kwargs['jar'])
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, JarHolder)
@FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options'])
@permittedKwargs(permitted_kwargs['build_target'])
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.build_target(node, args, kwargs, ExecutableHolder)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, SharedLibraryHolder)
elif target_type == 'shared_module':
FeatureNew('build_target(target_type: \'shared_module\')',
'0.51.0').use(self.subproject)
return self.build_target(node, args, kwargs, SharedModuleHolder)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, StaticLibraryHolder)
elif target_type == 'both_libraries':
return self.build_both_libraries(node, args, kwargs)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, JarHolder)
else:
raise InterpreterException('Unknown target_type.')
@permittedKwargs(permitted_kwargs['vcs_tag'])
def func_vcs_tag(self, node, args, kwargs):
if 'input' not in kwargs or 'output' not in kwargs:
raise InterpreterException('Keyword arguments input and output must exist')
if 'fallback' not in kwargs:
FeatureNew('Optional fallback in vcs_tag', '0.41.0').use(self.subproject)
fallback = kwargs.pop('fallback', self.project_version)
if not isinstance(fallback, str):
raise InterpreterException('Keyword argument fallback must be a string.')
replace_string = kwargs.pop('replace_string', '@VCS_TAG@')
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs.get('command', None)
if vcs_cmd and not isinstance(vcs_cmd, list):
vcs_cmd = [vcs_cmd]
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
# Is the command an executable in path or maybe a script in the source tree?
vcs_cmd[0] = shutil.which(vcs_cmd[0]) or os.path.join(source_dir, vcs_cmd[0])
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found %s repository at %s' % (vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
kwargs['command'] = self.environment.get_build_command() + \
['--internal',
'vcstagger',
'@INPUT0@',
'@OUTPUT0@',
fallback,
source_dir,
replace_string,
regex_selector] + vcs_cmd
kwargs.setdefault('build_by_default', True)
kwargs.setdefault('build_always_stale', True)
return self._func_custom_target_impl(node, [kwargs['output']], kwargs)
@FeatureNew('subdir_done', '0.46.0')
@stringArgs
def func_subdir_done(self, node, args, kwargs):
if len(kwargs) > 0:
raise InterpreterException('exit does not take named arguments')
if len(args) > 0:
raise InterpreterException('exit does not take any arguments')
raise SubdirDoneRequest()
@stringArgs
@FeatureNewKwargs('custom_target', '0.48.0', ['console'])
@FeatureNewKwargs('custom_target', '0.47.0', ['install_mode', 'build_always_stale'])
@FeatureNewKwargs('custom_target', '0.40.0', ['build_by_default'])
@permittedKwargs(permitted_kwargs['custom_target'])
def func_custom_target(self, node, args, kwargs):
if len(args) != 1:
raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name')
if 'depfile' in kwargs and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']):
FeatureNew('substitutions in custom_target depfile', '0.47.0').use(self.subproject)
return self._func_custom_target_impl(node, args, kwargs)
def _func_custom_target_impl(self, node, args, kwargs):
'Implementation-only, without FeatureNew checks, for internal use'
name = args[0]
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'input' in kwargs:
try:
kwargs['input'] = self.source_strings_to_files(extract_as_list(kwargs, 'input'))
except mesonlib.MesonException:
mlog.warning('''Custom target input \'%s\' can\'t be converted to File object(s).
This will become a hard error in the future.''' % kwargs['input'], location=self.current_node)
tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs, backend=self.backend), self)
self.add_target(name, tg.held_object)
return tg
@permittedKwargs(permitted_kwargs['run_target'])
def func_run_target(self, node, args, kwargs):
if len(args) > 1:
raise InvalidCode('Run_target takes only one positional argument: the target name.')
elif len(args) == 1:
if 'command' not in kwargs:
raise InterpreterException('Missing "command" keyword argument')
all_args = extract_as_list(kwargs, 'command')
deps = unholder(extract_as_list(kwargs, 'depends'))
else:
raise InterpreterException('Run_target needs at least one positional argument.')
cleaned_args = []
for i in unholder(listify(all_args)):
if not isinstance(i, (str, build.BuildTarget, build.CustomTarget, dependencies.ExternalProgram, mesonlib.File)):
mlog.debug('Wrong type:', str(i))
raise InterpreterException('Invalid argument to run_target.')
if isinstance(i, dependencies.ExternalProgram) and not i.found():
raise InterpreterException('Tried to use non-existing executable {!r}'.format(i.name))
cleaned_args.append(i)
name = args[0]
if not isinstance(name, str):
raise InterpreterException('First argument must be a string.')
cleaned_deps = []
for d in deps:
if not isinstance(d, (build.BuildTarget, build.CustomTarget)):
raise InterpreterException('Depends items must be build targets.')
cleaned_deps.append(d)
command, *cmd_args = cleaned_args
tg = RunTargetHolder(build.RunTarget(name, command, cmd_args, cleaned_deps, self.subdir, self.subproject), self)
self.add_target(name, tg.held_object)
full_name = (self.subproject, name)
assert(full_name not in self.build.run_target_names)
self.build.run_target_names.add(full_name)
return tg
@FeatureNew('alias_target', '0.52.0')
@noKwargs
def func_alias_target(self, node, args, kwargs):
if len(args) < 2:
raise InvalidCode('alias_target takes at least 2 arguments.')
name = args[0]
if not isinstance(name, str):
raise InterpreterException('First argument must be a string.')
deps = unholder(listify(args[1:]))
for d in deps:
if not isinstance(d, (build.BuildTarget, build.CustomTarget)):
raise InterpreterException('Depends items must be build targets.')
tg = RunTargetHolder(build.AliasTarget(name, deps, self.subdir, self.subproject), self)
self.add_target(name, tg.held_object)
return tg
@permittedKwargs(permitted_kwargs['generator'])
def func_generator(self, node, args, kwargs):
gen = GeneratorHolder(self, args, kwargs)
self.generators.append(gen)
return gen
@FeatureNewKwargs('benchmark', '0.46.0', ['depends'])
@FeatureNewKwargs('benchmark', '0.52.0', ['priority'])
@permittedKwargs(permitted_kwargs['benchmark'])
def func_benchmark(self, node, args, kwargs):
# is_parallel isn't valid here, so make sure it isn't passed
if 'is_parallel' in kwargs:
del kwargs['is_parallel']
self.add_test(node, args, kwargs, False)
@FeatureNewKwargs('test', '0.46.0', ['depends'])
@FeatureNewKwargs('test', '0.52.0', ['priority'])
@permittedKwargs(permitted_kwargs['test'])
def func_test(self, node, args, kwargs):
if kwargs.get('protocol') == 'gtest':
FeatureNew('"gtest" protocol for tests', '0.55.0').use(self.subproject)
self.add_test(node, args, kwargs, True)
def unpack_env_kwarg(self, kwargs) -> build.EnvironmentVariables:
envlist = kwargs.get('env', EnvironmentVariablesHolder())
if isinstance(envlist, EnvironmentVariablesHolder):
env = envlist.held_object
elif isinstance(envlist, dict):
FeatureNew('environment dictionary', '0.52.0').use(self.subproject)
env = EnvironmentVariablesHolder(envlist)
env = env.held_object
else:
envlist = listify(envlist)
# Convert from array to environment object
env = EnvironmentVariablesHolder(envlist)
env = env.held_object
return env
def add_test(self, node, args, kwargs, is_base_test):
if len(args) != 2:
raise InterpreterException('test expects 2 arguments, {} given'.format(len(args)))
if not isinstance(args[0], str):
raise InterpreterException('First argument of test must be a string.')
exe = args[1]
if not isinstance(exe, (ExecutableHolder, JarHolder, ExternalProgramHolder)):
if isinstance(exe, mesonlib.File):
exe = self.func_find_program(node, args[1], {})
else:
raise InterpreterException('Second argument must be executable.')
par = kwargs.get('is_parallel', True)
if not isinstance(par, bool):
raise InterpreterException('Keyword argument is_parallel must be a boolean.')
cmd_args = unholder(extract_as_list(kwargs, 'args'))
for i in cmd_args:
if not isinstance(i, (str, mesonlib.File, build.Target)):
raise InterpreterException('Command line arguments must be strings, files or targets.')
env = self.unpack_env_kwarg(kwargs)
should_fail = kwargs.get('should_fail', False)
if not isinstance(should_fail, bool):
raise InterpreterException('Keyword argument should_fail must be a boolean.')
timeout = kwargs.get('timeout', 30)
if 'workdir' in kwargs:
workdir = kwargs['workdir']
if not isinstance(workdir, str):
raise InterpreterException('Workdir keyword argument must be a string.')
if not os.path.isabs(workdir):
raise InterpreterException('Workdir keyword argument must be an absolute path.')
else:
workdir = None
if not isinstance(timeout, int):
raise InterpreterException('Timeout must be an integer.')
protocol = kwargs.get('protocol', 'exitcode')
if protocol not in {'exitcode', 'tap', 'gtest'}:
raise InterpreterException('Protocol must be "exitcode", "tap", or "gtest".')
suite = []
prj = self.subproject if self.is_subproject() else self.build.project_name
for s in mesonlib.stringlistify(kwargs.get('suite', '')):
if len(s) > 0:
s = ':' + s
suite.append(prj.replace(' ', '_').replace(':', '_') + s)
depends = unholder(extract_as_list(kwargs, 'depends'))
for dep in depends:
if not isinstance(dep, (build.CustomTarget, build.BuildTarget)):
raise InterpreterException('Depends items must be build targets.')
priority = kwargs.get('priority', 0)
if not isinstance(priority, int):
raise InterpreterException('Keyword argument priority must be an integer.')
t = Test(args[0], prj, suite, exe.held_object, depends, par, cmd_args,
env, should_fail, timeout, workdir, protocol, priority)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test', mlog.bold(args[0], True))
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark', mlog.bold(args[0], True))
@FeatureNewKwargs('install_headers', '0.47.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_headers'])
def func_install_headers(self, node, args, kwargs):
source_files = self.source_strings_to_files(args)
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
h = Headers(source_files, kwargs)
self.build.headers.append(h)
return h
@FeatureNewKwargs('install_man', '0.47.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_man'])
def func_install_man(self, node, args, kwargs):
fargs = self.source_strings_to_files(args)
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
m = Man(fargs, kwargs)
self.build.man.append(m)
return m
@FeatureNewKwargs('subdir', '0.44.0', ['if_found'])
@permittedKwargs(permitted_kwargs['subdir'])
def func_subdir(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
mesonlib.check_direntry_issues(args)
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
if self.subdir == '' and args[0].startswith('meson-'):
raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().')
for i in mesonlib.extract_as_list(kwargs, 'if_found'):
if not hasattr(i, 'found_method'):
raise InterpreterException('Object used in if_found does not have a found method.')
if not i.found_method([], {}):
return
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if os.path.isabs(subdir):
raise InvalidArguments('Subdir argument must be a relative path.')
absdir = os.path.join(self.environment.get_source_dir(), subdir)
symlinkless_dir = os.path.realpath(absdir)
if symlinkless_dir in self.visited_subdirs:
raise InvalidArguments('Tried to enter directory "%s", which has already been visited.'
% subdir)
self.visited_subdirs[symlinkless_dir] = True
self.subdir = subdir
os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True)
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.append(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
self.subdir = prev_subdir
raise InterpreterException("Non-existent build file '{!s}'".format(buildfilename))
with open(absname, encoding='utf8') as f:
code = f.read()
assert(isinstance(code, str))
try:
codeblock = mparser.Parser(code, absname).parse()
except mesonlib.MesonException as me:
me.file = absname
raise me
try:
self.evaluate_codeblock(codeblock)
except SubdirDoneRequest:
pass
self.subdir = prev_subdir
def _get_kwarg_install_mode(self, kwargs):
if kwargs.get('install_mode', None) is None:
return None
install_mode = []
mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))
for m in mode:
# We skip any arguments that are set to `false`
if m is False:
m = None
install_mode.append(m)
if len(install_mode) > 3:
raise InvalidArguments('Keyword argument install_mode takes at '
'most 3 arguments.')
if len(install_mode) > 0 and install_mode[0] is not None and \
not isinstance(install_mode[0], str):
raise InvalidArguments('Keyword argument install_mode requires the '
'permissions arg to be a string or false')
return FileMode(*install_mode)
@FeatureNewKwargs('install_data', '0.46.0', ['rename'])
@FeatureNewKwargs('install_data', '0.38.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_data'])
def func_install_data(self, node, args, kwargs):
kwsource = mesonlib.stringlistify(kwargs.get('sources', []))
raw_sources = args + kwsource
sources = []
source_strings = []
for s in raw_sources:
if isinstance(s, mesonlib.File):
sources.append(s)
elif isinstance(s, str):
source_strings.append(s)
else:
raise InvalidArguments('Argument must be string or file.')
sources += self.source_strings_to_files(source_strings)
install_dir = kwargs.get('install_dir', None)
if not isinstance(install_dir, (str, type(None))):
raise InvalidArguments('Keyword argument install_dir not a string.')
install_mode = self._get_kwarg_install_mode(kwargs)
rename = kwargs.get('rename', None)
data = DataHolder(build.Data(sources, install_dir, install_mode, rename))
self.build.data.append(data.held_object)
return data
@FeatureNewKwargs('install_subdir', '0.42.0', ['exclude_files', 'exclude_directories'])
@FeatureNewKwargs('install_subdir', '0.38.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_subdir'])
@stringArgs
def func_install_subdir(self, node, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Install_subdir requires exactly one argument.')
subdir = args[0]
if 'install_dir' not in kwargs:
raise InvalidArguments('Missing keyword argument install_dir')
install_dir = kwargs['install_dir']
if not isinstance(install_dir, str):
raise InvalidArguments('Keyword argument install_dir not a string.')
if 'strip_directory' in kwargs:
if not isinstance(kwargs['strip_directory'], bool):
raise InterpreterException('"strip_directory" keyword must be a boolean.')
strip_directory = kwargs['strip_directory']
else:
strip_directory = False
if 'exclude_files' in kwargs:
exclude = extract_as_list(kwargs, 'exclude_files')
for f in exclude:
if not isinstance(f, str):
raise InvalidArguments('Exclude argument not a string.')
elif os.path.isabs(f):
raise InvalidArguments('Exclude argument cannot be absolute.')
exclude_files = set(exclude)
else:
exclude_files = set()
if 'exclude_directories' in kwargs:
exclude = extract_as_list(kwargs, 'exclude_directories')
for d in exclude:
if not isinstance(d, str):
raise InvalidArguments('Exclude argument not a string.')
elif os.path.isabs(d):
raise InvalidArguments('Exclude argument cannot be absolute.')
exclude_directories = set(exclude)
else:
exclude_directories = set()
exclude = (exclude_files, exclude_directories)
install_mode = self._get_kwarg_install_mode(kwargs)
idir = InstallDir(self.subdir, subdir, install_dir, install_mode, exclude, strip_directory)
self.build.install_dirs.append(idir)
return idir
@FeatureNewKwargs('configure_file', '0.47.0', ['copy', 'output_format', 'install_mode', 'encoding'])
@FeatureNewKwargs('configure_file', '0.46.0', ['format'])
@FeatureNewKwargs('configure_file', '0.41.0', ['capture'])
@FeatureNewKwargs('configure_file', '0.50.0', ['install'])
@FeatureNewKwargs('configure_file', '0.52.0', ['depfile'])
@permittedKwargs(permitted_kwargs['configure_file'])
def func_configure_file(self, node, args, kwargs):
if len(args) > 0:
raise InterpreterException("configure_file takes only keyword arguments.")
if 'output' not in kwargs:
raise InterpreterException('Required keyword argument "output" not defined.')
actions = set(['configuration', 'command', 'copy']).intersection(kwargs.keys())
if len(actions) == 0:
raise InterpreterException('Must specify an action with one of these '
'keyword arguments: \'configuration\', '
'\'command\', or \'copy\'.')
elif len(actions) == 2:
raise InterpreterException('Must not specify both {!r} and {!r} '
'keyword arguments since they are '
'mutually exclusive.'.format(*actions))
elif len(actions) == 3:
raise InterpreterException('Must specify one of {!r}, {!r}, and '
'{!r} keyword arguments since they are '
'mutually exclusive.'.format(*actions))
if 'capture' in kwargs:
if not isinstance(kwargs['capture'], bool):
raise InterpreterException('"capture" keyword must be a boolean.')
if 'command' not in kwargs:
raise InterpreterException('"capture" keyword requires "command" keyword.')
if 'format' in kwargs:
fmt = kwargs['format']
if not isinstance(fmt, str):
raise InterpreterException('"format" keyword must be a string.')
else:
fmt = 'meson'
if fmt not in ('meson', 'cmake', 'cmake@'):
raise InterpreterException('"format" possible values are "meson", "cmake" or "cmake@".')
if 'output_format' in kwargs:
output_format = kwargs['output_format']
if not isinstance(output_format, str):
raise InterpreterException('"output_format" keyword must be a string.')
else:
output_format = 'c'
if output_format not in ('c', 'nasm'):
raise InterpreterException('"format" possible values are "c" or "nasm".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InterpreterException('depfile file name must be a string')
else:
depfile = None
# Validate input
inputs = self.source_strings_to_files(extract_as_list(kwargs, 'input'))
inputs_abs = []
for f in inputs:
if isinstance(f, mesonlib.File):
inputs_abs.append(f.absolute_path(self.environment.source_dir,
self.environment.build_dir))
self.add_build_def_file(f)
else:
raise InterpreterException('Inputs can only be strings or file objects')
# Validate output
output = kwargs['output']
if not isinstance(output, str):
raise InterpreterException('Output file name must be a string')
if inputs_abs:
values = mesonlib.get_filenames_templates_dict(inputs_abs, None)
outputs = mesonlib.substitute_values([output], values)
output = outputs[0]
if depfile:
depfile = mesonlib.substitute_values([depfile], values)[0]
ofile_rpath = os.path.join(self.subdir, output)
if ofile_rpath in self.configure_file_outputs:
mesonbuildfile = os.path.join(self.subdir, 'meson.build')
current_call = "{}:{}".format(mesonbuildfile, self.current_lineno)
first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath])
mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call)
else:
self.configure_file_outputs[ofile_rpath] = self.current_lineno
if os.path.dirname(output) != '':
raise InterpreterException('Output file name must not contain a subdirectory.')
(ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output))
ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname)
# Perform the appropriate action
if 'configuration' in kwargs:
conf = kwargs['configuration']
if isinstance(conf, dict):
FeatureNew('configure_file.configuration dictionary', '0.49.0').use(self.subproject)
conf = ConfigurationDataHolder(self.subproject, conf)
elif not isinstance(conf, ConfigurationDataHolder):
raise InterpreterException('Argument "configuration" is not of type configuration_data')
mlog.log('Configuring', mlog.bold(output), 'using configuration')
if len(inputs) > 1:
raise InterpreterException('At most one input file can given in configuration mode')
if inputs:
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
file_encoding = kwargs.setdefault('encoding', 'utf-8')
missing_variables, confdata_useless = \
mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf.held_object,
fmt, file_encoding)
if missing_variables:
var_list = ", ".join(map(repr, sorted(missing_variables)))
mlog.warning(
"The variable(s) %s in the input file '%s' are not "
"present in the given configuration data." % (
var_list, inputs[0]), location=node)
if confdata_useless:
ifbase = os.path.basename(inputs_abs[0])
mlog.warning('Got an empty configuration_data() object and found no '
'substitutions in the input file {!r}. If you want to '
'copy a file to the build dir, use the \'copy:\' keyword '
'argument added in 0.47.0'.format(ifbase), location=node)
else:
mesonlib.dump_conf_header(ofile_abs, conf.held_object, output_format)
conf.mark_used()
elif 'command' in kwargs:
if len(inputs) > 1:
FeatureNew('multiple inputs in configure_file()', '0.52.0').use(self.subproject)
# We use absolute paths for input and output here because the cwd
# that the command is run from is 'unspecified', so it could change.
# Currently it's builddir/subdir for in_builddir else srcdir/subdir.
values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs])
if depfile:
depfile = os.path.join(self.environment.get_scratch_dir(), depfile)
values['@DEPFILE@'] = depfile
# Substitute @INPUT@, @OUTPUT@, etc here.
cmd = mesonlib.substitute_values(kwargs['command'], values)
mlog.log('Configuring', mlog.bold(output), 'with command')
res = self.run_command_impl(node, cmd, {}, True)
if res.returncode != 0:
raise InterpreterException('Running configure command failed.\n%s\n%s' %
(res.stdout, res.stderr))
if 'capture' in kwargs and kwargs['capture']:
dst_tmp = ofile_abs + '~'
file_encoding = kwargs.setdefault('encoding', 'utf-8')
with open(dst_tmp, 'w', encoding=file_encoding) as f:
f.writelines(res.stdout)
if inputs_abs:
shutil.copymode(inputs_abs[0], dst_tmp)
mesonlib.replace_if_different(ofile_abs, dst_tmp)
if depfile:
mlog.log('Reading depfile:', mlog.bold(depfile))
with open(depfile, 'r') as f:
df = DepFile(f.readlines())
deps = df.get_all_dependencies(ofile_fname)
for dep in deps:
self.add_build_def_file(dep)
elif 'copy' in kwargs:
if len(inputs_abs) != 1:
raise InterpreterException('Exactly one input file must be given in copy mode')
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
shutil.copyfile(inputs_abs[0], ofile_abs)
shutil.copystat(inputs_abs[0], ofile_abs)
else:
# Not reachable
raise AssertionError
# Install file if requested, we check for the empty string
# for backwards compatibility. That was the behaviour before
# 0.45.0 so preserve it.
idir = kwargs.get('install_dir', '')
if idir is False:
idir = ''
mlog.deprecation('Please use the new `install:` kwarg instead of passing '
'`false` to `install_dir:`', location=node)
if not isinstance(idir, str):
if isinstance(idir, list) and len(idir) == 0:
mlog.deprecation('install_dir: kwarg must be a string and not an empty array. '
'Please use the install: kwarg to enable or disable installation. '
'This will be a hard error in the next release.')
else:
raise InterpreterException('"install_dir" must be a string')
install = kwargs.get('install', idir != '')
if not isinstance(install, bool):
raise InterpreterException('"install" must be a boolean')
if install:
if not idir:
raise InterpreterException('"install_dir" must be specified '
'when "install" in a configure_file '
'is true')
cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)
install_mode = self._get_kwarg_install_mode(kwargs)
self.build.data.append(build.Data([cfile], idir, install_mode))
return mesonlib.File.from_built_file(self.subdir, output)
def extract_incdirs(self, kwargs):
prospectives = unholder(extract_as_list(kwargs, 'include_directories'))
result = []
for p in prospectives:
if isinstance(p, build.IncludeDirs):
result.append(p)
elif isinstance(p, str):
result.append(self.build_incdir_object([p]).held_object)
else:
raise InterpreterException('Include directory objects can only be created from strings or include directories.')
return result
@permittedKwargs(permitted_kwargs['include_directories'])
@stringArgs
def func_include_directories(self, node, args, kwargs):
return self.build_incdir_object(args, kwargs.get('is_system', False))
def build_incdir_object(self, incdir_strings, is_system=False):
if not isinstance(is_system, bool):
raise InvalidArguments('Is_system must be boolean.')
src_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
absbase_src = os.path.join(src_root, self.subdir)
absbase_build = os.path.join(build_root, self.subdir)
for a in incdir_strings:
if a.startswith(src_root):
raise InvalidArguments('Tried to form an absolute path to a source dir. '
'You should not do that but use relative paths instead.'
'''
To get include path to any directory relative to the current dir do
incdir = include_directories(dirname)
After this incdir will contain both the current source dir as well as the
corresponding build dir. It can then be used in any subdirectory and
Meson will take care of all the busywork to make paths work.
Dirname can even be '.' to mark the current directory. Though you should
remember that the current source and build directories are always
put in the include directories by default so you only need to do
include_directories('.') if you intend to use the result in a
different subdirectory.
''')
absdir_src = os.path.join(absbase_src, a)
absdir_build = os.path.join(absbase_build, a)
if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):
raise InvalidArguments('Include dir %s does not exist.' % a)
i = IncludeDirsHolder(build.IncludeDirs(self.subdir, incdir_strings, is_system))
return i
@permittedKwargs(permitted_kwargs['add_test_setup'])
@stringArgs
def func_add_test_setup(self, node, args, kwargs):
if len(args) != 1:
raise InterpreterException('Add_test_setup needs one argument for the setup name.')
setup_name = args[0]
if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None:
raise InterpreterException('Setup name may only contain alphanumeric characters.')
if ":" not in setup_name:
setup_name = (self.subproject if self.subproject else self.build.project_name) + ":" + setup_name
try:
inp = unholder(extract_as_list(kwargs, 'exe_wrapper'))
exe_wrapper = []
for i in inp:
if isinstance(i, str):
exe_wrapper.append(i)
elif isinstance(i, dependencies.ExternalProgram):
if not i.found():
raise InterpreterException('Tried to use non-found executable.')
exe_wrapper += i.get_command()
else:
raise InterpreterException('Exe wrapper can only contain strings or external binaries.')
except KeyError:
exe_wrapper = None
gdb = kwargs.get('gdb', False)
if not isinstance(gdb, bool):
raise InterpreterException('Gdb option must be a boolean')
timeout_multiplier = kwargs.get('timeout_multiplier', 1)
if not isinstance(timeout_multiplier, int):
raise InterpreterException('Timeout multiplier must be a number.')
is_default = kwargs.get('is_default', False)
if not isinstance(is_default, bool):
raise InterpreterException('is_default option must be a boolean')
if is_default:
if self.build.test_setup_default_name is not None:
raise InterpreterException('\'%s\' is already set as default. '
'is_default can be set to true only once' % self.build.test_setup_default_name)
self.build.test_setup_default_name = setup_name
env = self.unpack_env_kwarg(kwargs)
self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env)
@permittedKwargs(permitted_kwargs['add_global_arguments'])
@stringArgs
def func_add_global_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_global_arguments(node, self.build.global_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_global_link_arguments'])
@stringArgs
def func_add_global_link_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_global_arguments(node, self.build.global_link_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_project_arguments'])
@stringArgs
def func_add_project_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_project_arguments(node, self.build.projects_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_project_link_arguments'])
@stringArgs
def func_add_project_link_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_project_arguments(node, self.build.projects_link_args[for_machine], args, kwargs)
def warn_about_builtin_args(self, args):
warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra', '-Wpedantic')
optargs = ('-O0', '-O2', '-O3', '-Os', '/O1', '/O2', '/Os')
for arg in args:
if arg in warnargs:
mlog.warning('Consider using the built-in warning_level option instead of using "{}".'.format(arg),
location=self.current_node)
elif arg in optargs:
mlog.warning('Consider using the built-in optimization level instead of using "{}".'.format(arg),
location=self.current_node)
elif arg == '-g':
mlog.warning('Consider using the built-in debug option instead of using "{}".'.format(arg),
location=self.current_node)
elif arg == '-pipe':
mlog.warning("You don't need to add -pipe, Meson will use it automatically when it is available.",
location=self.current_node)
elif arg.startswith('-fsanitize'):
mlog.warning('Consider using the built-in option for sanitizers instead of using "{}".'.format(arg),
location=self.current_node)
elif arg.startswith('-std=') or arg.startswith('/std:'):
mlog.warning('Consider using the built-in option for language standard version instead of using "{}".'.format(arg),
location=self.current_node)
def add_global_arguments(self, node, argsdict, args, kwargs):
if self.is_subproject():
msg = 'Function \'{}\' cannot be used in subprojects because ' \
'there is no way to make that reliable.\nPlease only call ' \
'this if is_subproject() returns false. Alternatively, ' \
'define a variable that\ncontains your language-specific ' \
'arguments and add it to the appropriate *_args kwarg ' \
'in each target.'.format(node.func_name)
raise InvalidCode(msg)
frozen = self.project_args_frozen or self.global_args_frozen
self.add_arguments(node, argsdict, frozen, args, kwargs)
def add_project_arguments(self, node, argsdict, args, kwargs):
if self.subproject not in argsdict:
argsdict[self.subproject] = {}
self.add_arguments(node, argsdict[self.subproject],
self.project_args_frozen, args, kwargs)
def add_arguments(self, node, argsdict, args_frozen, args, kwargs):
if args_frozen:
msg = 'Tried to use \'{}\' after a build target has been declared.\n' \
'This is not permitted. Please declare all ' \
'arguments before your targets.'.format(node.func_name)
raise InvalidCode(msg)
if 'language' not in kwargs:
raise InvalidCode('Missing language definition in {}'.format(node.func_name))
self.warn_about_builtin_args(args)
for lang in mesonlib.stringlistify(kwargs['language']):
lang = lang.lower()
argsdict[lang] = argsdict.get(lang, []) + args
@noKwargs
@noArgsFlattening
def func_environment(self, node, args, kwargs):
if len(args) > 1:
raise InterpreterException('environment takes only one optional positional arguments')
elif len(args) == 1:
FeatureNew('environment positional arguments', '0.52.0').use(self.subproject)
initial_values = args[0]
if not isinstance(initial_values, dict) and not isinstance(initial_values, list):
raise InterpreterException('environment first argument must be a dictionary or a list')
else:
initial_values = {}
return EnvironmentVariablesHolder(initial_values)
@stringArgs
@noKwargs
def func_join_paths(self, node, args, kwargs):
return self.join_path_strings(args)
def run(self):
super().run()
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
FeatureNew.report(self.subproject)
FeatureDeprecated.report(self.subproject)
if not self.is_subproject():
self.print_extra_warnings()
if self.subproject == '':
self._print_summary()
def print_extra_warnings(self):
# TODO cross compilation
for c in self.coredata.compilers.host.values():
if c.get_id() == 'clang':
self.check_clang_asan_lundef()
break
def check_clang_asan_lundef(self):
if 'b_lundef' not in self.coredata.base_options:
return
if 'b_sanitize' not in self.coredata.base_options:
return
if (self.coredata.base_options['b_lundef'].value and
self.coredata.base_options['b_sanitize'].value != 'none'):
mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef.
This will probably not work.
Try setting b_lundef to false instead.'''.format(self.coredata.base_options['b_sanitize'].value),
location=self.current_node)
def evaluate_subproject_info(self, path_from_source_root, subproject_dirname):
depth = 0
subproj_name = ''
segs = PurePath(path_from_source_root).parts
segs_spd = PurePath(subproject_dirname).parts
while segs and segs[0] == segs_spd[0]:
if len(segs_spd) == 1:
subproj_name = segs[1]
segs = segs[2:]
depth += 1
else:
segs_spd = segs_spd[1:]
segs = segs[1:]
return (depth, subproj_name)
# Check that the indicated file is within the same subproject
# as we currently are. This is to stop people doing
# nasty things like:
#
# f = files('../../master_src/file.c')
#
# Note that this is validated only when the file
# object is generated. The result can be used in a different
# subproject than it is defined in (due to e.g. a
# declare_dependency).
def validate_within_subproject(self, subdir, fname):
norm = os.path.normpath(os.path.join(subdir, fname))
if os.path.isabs(norm):
if not norm.startswith(self.environment.source_dir):
# Grabbing files outside the source tree is ok.
# This is for vendor stuff like:
#
# /opt/vendorsdk/src/file_with_license_restrictions.c
return
norm = os.path.relpath(norm, self.environment.source_dir)
assert(not os.path.isabs(norm))
(num_sps, sproj_name) = self.evaluate_subproject_info(norm, self.subproject_dir)
plain_filename = os.path.basename(norm)
if num_sps == 0:
if not self.is_subproject():
return
raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename)
if num_sps > 1:
raise InterpreterException('Sandbox violation: Tried to grab file %s from a nested subproject.' % plain_filename)
if sproj_name != self.subproject_directory_name:
raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename)
def source_strings_to_files(self, sources):
results = []
mesonlib.check_direntry_issues(sources)
if not isinstance(sources, list):
sources = [sources]
for s in sources:
if isinstance(s, (mesonlib.File, GeneratedListHolder,
TargetHolder, CustomTargetIndexHolder,
GeneratedObjectsHolder)):
pass
elif isinstance(s, str):
self.validate_within_subproject(self.subdir, s)
s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s)
else:
raise InterpreterException('Source item is {!r} instead of '
'string or File-type object'.format(s))
results.append(s)
return results
def add_target(self, name, tobj):
if name == '':
raise InterpreterException('Target name must not be empty.')
if name.strip() == '':
raise InterpreterException('Target name must not consist only of whitespace.')
if name.startswith('meson-'):
raise InvalidArguments("Target names starting with 'meson-' are reserved "
"for Meson's internal use. Please rename.")
if name in coredata.forbidden_target_names:
raise InvalidArguments("Target name '%s' is reserved for Meson's "
"internal use. Please rename." % name)
# To permit an executable and a shared library to have the
# same name, such as "foo.exe" and "libfoo.a".
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode('Tried to create target "%s", but a target of that name already exists.' % name)
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
@FeatureNew('both_libraries', '0.46.0')
def build_both_libraries(self, node, args, kwargs):
shared_holder = self.build_target(node, args, kwargs, SharedLibraryHolder)
# Check if user forces non-PIC static library.
pic = True
if 'pic' in kwargs:
pic = kwargs['pic']
elif 'b_staticpic' in self.environment.coredata.base_options:
pic = self.environment.coredata.base_options['b_staticpic'].value
if pic:
# Exclude sources from args and kwargs to avoid building them twice
static_args = [args[0]]
static_kwargs = kwargs.copy()
static_kwargs['sources'] = []
static_kwargs['objects'] = shared_holder.held_object.extract_all_objects()
else:
static_args = args
static_kwargs = kwargs
static_holder = self.build_target(node, static_args, static_kwargs, StaticLibraryHolder)
return BothLibrariesHolder(shared_holder, static_holder, self)
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_builtin_option('default_library', self.subproject)
if default_library == 'shared':
return self.build_target(node, args, kwargs, SharedLibraryHolder)
elif default_library == 'static':
return self.build_target(node, args, kwargs, StaticLibraryHolder)
elif default_library == 'both':
return self.build_both_libraries(node, args, kwargs)
else:
raise InterpreterException('Unknown default_library value: %s.', default_library)
def build_target(self, node, args, kwargs, targetholder):
@FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories'])
@FeatureNewKwargs('build target', '0.41.0', ['rust_args'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility'])
def build_target_decorator_caller(self, node, args, kwargs):
return True
build_target_decorator_caller(self, node, args, kwargs)
if not args:
raise InterpreterException('Target does not have a name.')
name, *sources = args
for_machine = self.machine_from_native_kwarg(kwargs)
if 'sources' in kwargs:
sources += listify(kwargs['sources'])
sources = self.source_strings_to_files(sources)
objs = extract_as_list(kwargs, 'objects')
kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'extra_files' in kwargs:
ef = extract_as_list(kwargs, 'extra_files')
kwargs['extra_files'] = self.source_strings_to_files(ef)
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetholder == ExecutableHolder:
targetclass = build.Executable
elif targetholder == SharedLibraryHolder:
targetclass = build.SharedLibrary
elif targetholder == SharedModuleHolder:
targetclass = build.SharedModule
elif targetholder == StaticLibraryHolder:
targetclass = build.StaticLibrary
elif targetholder == JarHolder:
targetclass = build.Jar
else:
mlog.debug('Unknown target type:', str(targetholder))
raise RuntimeError('Unreachable code')
self.kwarg_strings_to_includedirs(kwargs)
# Filter out kwargs from other target types. For example 'soversion'
# passed to library() when default_library == 'static'.
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
kwargs['include_directories'] = self.extract_incdirs(kwargs)
target = targetclass(name, self.subdir, self.subproject, for_machine, sources, objs, self.environment, kwargs)
target.project_version = self.project_version
if not self.environment.machines.matches_build_machine(for_machine):
self.add_cross_stdlib_info(target)
l = targetholder(target, self)
self.add_target(name, l.held_object)
self.project_args_frozen = True
return l
def kwarg_strings_to_includedirs(self, kwargs):
if 'd_import_dirs' in kwargs:
items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')
cleaned_items = []
for i in items:
if isinstance(i, str):
# BW compatibility. This was permitted so we must support it
# for a few releases so people can transition to "correct"
# path declarations.
if os.path.normpath(i).startswith(self.environment.get_source_dir()):
mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.
This will become a hard error in the future.''', location=self.current_node)
i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))
i = self.build_incdir_object([i])
cleaned_items.append(i)
kwargs['d_import_dirs'] = cleaned_items
def get_used_languages(self, target):
result = {}
for i in target.sources:
# TODO other platforms
for lang, c in self.coredata.compilers.host.items():
if c.can_compile(i):
result[lang] = True
break
return result
def add_cross_stdlib_info(self, target):
if target.for_machine != MachineChoice.HOST:
return
for l in self.get_used_languages(target):
props = self.environment.properties.host
if props.has_stdlib(l) \
and self.subproject != props.get_stdlib(l)[0]:
target.add_deps(self.build.stdlibs.host[l])
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue # This means a generated source and they always exist.
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException('Tried to add non-existing source file %s.' % s)
# Only permit object extraction from the same subproject
def validate_extraction(self, buildtarget: InterpreterObject) -> None:
if not self.subdir.startswith(self.subproject_dir):
if buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from a subproject target.')
else:
if not buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from the main project from a subproject.')
if self.subdir.split('/')[1] != buildtarget.subdir.split('/')[1]:
raise InterpreterException('Tried to extract objects from a different subproject.')
def is_subproject(self):
return self.subproject != ''
@noKwargs
@noArgsFlattening
def func_set_variable(self, node, args, kwargs):
if len(args) != 2:
raise InvalidCode('Set_variable takes two arguments.')
varname, value = args
self.set_variable(varname, value)
@noKwargs
@noArgsFlattening
def func_get_variable(self, node, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InvalidCode('Get_variable takes one or two arguments.')
varname = args[0]
if isinstance(varname, Disabler):
return varname
if not isinstance(varname, str):
raise InterpreterException('First argument must be a string.')
try:
return self.variables[varname]
except KeyError:
pass
if len(args) == 2:
return args[1]
raise InterpreterException('Tried to get unknown variable "%s".' % varname)
@stringArgs
@noKwargs
def func_is_variable(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Is_variable takes two arguments.')
varname = args[0]
return varname in self.variables
@staticmethod
def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice:
native = kwargs.get('native', False)
if not isinstance(native, bool):
raise InvalidArguments('Argument to "native" must be a boolean.')
return MachineChoice.BUILD if native else MachineChoice.HOST
@FeatureNew('is_disabler', '0.52.0')
@noKwargs
def func_is_disabler(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Is_disabler takes one argument.')
varname = args[0]
return isinstance(varname, Disabler)
| 46.847044 | 197 | 0.609497 |
from . import mparser
from . import environment
from . import coredata
from . import dependencies
from . import mlog
from . import build
from . import optinterpreter
from . import compilers
from .wrap import wrap, WrapMode
from . import mesonlib
from .mesonlib import FileMode, MachineChoice, Popen_safe, listify, extract_as_list, has_path_sep, unholder
from .dependencies import ExternalProgram
from .dependencies import InternalDependency, Dependency, NotFoundDependency, DependencyException
from .depfile import DepFile
from .interpreterbase import InterpreterBase
from .interpreterbase import check_stringlist, flatten, noPosargs, noKwargs, stringArgs, permittedKwargs, noArgsFlattening
from .interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest
from .interpreterbase import InterpreterObject, MutableInterpreterObject, Disabler, disablerIfNotFound
from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs
from .interpreterbase import ObjectHolder
from .modules import ModuleReturnValue
from .cmake import CMakeInterpreter
from .backend.backends import TestProtocol
from pathlib import Path, PurePath
import os
import shutil
import uuid
import re
import shlex
import subprocess
import collections
import functools
import typing as T
import importlib
permitted_method_kwargs = {
'partial_dependency': {'compile_args', 'link_args', 'links', 'includes',
'sources'},
}
def stringifyUserArguments(args):
if isinstance(args, list):
return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args])
elif isinstance(args, dict):
return '{%s}' % ', '.join(['%s : %s' % (stringifyUserArguments(k), stringifyUserArguments(v)) for k, v in args.items()])
elif isinstance(args, int):
return str(args)
elif isinstance(args, str):
return "'%s'" % args
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
class OverrideProgram(dependencies.ExternalProgram):
pass
class FeatureOptionHolder(InterpreterObject, ObjectHolder):
def __init__(self, env, name, option):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, option)
if option.is_auto():
self.held_object = env.coredata.builtins['auto_features']
self.name = name
self.methods.update({'enabled': self.enabled_method,
'disabled': self.disabled_method,
'auto': self.auto_method,
})
@noPosargs
@permittedKwargs({})
def enabled_method(self, args, kwargs):
return self.held_object.is_enabled()
@noPosargs
@permittedKwargs({})
def disabled_method(self, args, kwargs):
return self.held_object.is_disabled()
@noPosargs
@permittedKwargs({})
def auto_method(self, args, kwargs):
return self.held_object.is_auto()
def extract_required_kwarg(kwargs, subproject, feature_check=None, default=True):
val = kwargs.get('required', default)
disabled = False
required = False
feature = None
if isinstance(val, FeatureOptionHolder):
if not feature_check:
feature_check = FeatureNew('User option "feature"', '0.47.0')
feature_check.use(subproject)
option = val.held_object
feature = val.name
if option.is_disabled():
disabled = True
elif option.is_enabled():
required = True
elif isinstance(val, bool):
required = val
else:
raise InterpreterException('required keyword argument must be boolean or a feature option')
kwargs['required'] = required
return disabled, required, feature
def extract_search_dirs(kwargs):
search_dirs = mesonlib.stringlistify(kwargs.get('dirs', []))
search_dirs = [Path(d).expanduser() for d in search_dirs]
for d in search_dirs:
if mesonlib.is_windows() and d.root.startswith('\\'):
continue
if not d.is_absolute():
raise InvalidCode('Search directory {} is not an absolute path.'.format(d))
return list(map(str, search_dirs))
class TryRunResultHolder(InterpreterObject):
def __init__(self, res):
super().__init__()
self.res = res
self.methods.update({'returncode': self.returncode_method,
'compiled': self.compiled_method,
'stdout': self.stdout_method,
'stderr': self.stderr_method,
})
@noPosargs
@permittedKwargs({})
def returncode_method(self, args, kwargs):
return self.res.returncode
@noPosargs
@permittedKwargs({})
def compiled_method(self, args, kwargs):
return self.res.compiled
@noPosargs
@permittedKwargs({})
def stdout_method(self, args, kwargs):
return self.res.stdout
@noPosargs
@permittedKwargs({})
def stderr_method(self, args, kwargs):
return self.res.stderr
class RunProcess(InterpreterObject):
def __init__(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir=False, check=False, capture=True):
super().__init__()
if not isinstance(cmd, ExternalProgram):
raise AssertionError('BUG: RunProcess must be passed an ExternalProgram')
self.capture = capture
pc, self.stdout, self.stderr = self.run_command(cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check)
self.returncode = pc.returncode
self.methods.update({'returncode': self.returncode_method,
'stdout': self.stdout_method,
'stderr': self.stderr_method,
})
def run_command(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check=False):
command_array = cmd.get_command() + args
menv = {'MESON_SOURCE_ROOT': source_dir,
'MESON_BUILD_ROOT': build_dir,
'MESON_SUBDIR': subdir,
'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in mesonintrospect]),
}
if in_builddir:
cwd = os.path.join(build_dir, subdir)
else:
cwd = os.path.join(source_dir, subdir)
child_env = os.environ.copy()
child_env.update(menv)
child_env = env.get_env(child_env)
stdout = subprocess.PIPE if self.capture else subprocess.DEVNULL
mlog.debug('Running command:', ' '.join(command_array))
try:
p, o, e = Popen_safe(command_array, stdout=stdout, env=child_env, cwd=cwd)
if self.capture:
mlog.debug('--- stdout ---')
mlog.debug(o)
else:
o = ''
mlog.debug('--- stdout disabled ---')
mlog.debug('--- stderr ---')
mlog.debug(e)
mlog.debug('')
if check and p.returncode != 0:
raise InterpreterException('Command "{}" failed with status {}.'.format(' '.join(command_array), p.returncode))
return p, o, e
except FileNotFoundError:
raise InterpreterException('Could not execute command "%s".' % ' '.join(command_array))
@noPosargs
@permittedKwargs({})
def returncode_method(self, args, kwargs):
return self.returncode
@noPosargs
@permittedKwargs({})
def stdout_method(self, args, kwargs):
return self.stdout
@noPosargs
@permittedKwargs({})
def stderr_method(self, args, kwargs):
return self.stderr
class ConfigureFileHolder(InterpreterObject, ObjectHolder):
def __init__(self, subdir, sourcename, targetname, configuration_data):
InterpreterObject.__init__(self)
obj = build.ConfigureFile(subdir, sourcename, targetname, configuration_data)
ObjectHolder.__init__(self, obj)
class EnvironmentVariablesHolder(MutableInterpreterObject, ObjectHolder):
def __init__(self, initial_values=None):
MutableInterpreterObject.__init__(self)
ObjectHolder.__init__(self, build.EnvironmentVariables())
self.methods.update({'set': self.set_method,
'append': self.append_method,
'prepend': self.prepend_method,
})
if isinstance(initial_values, dict):
for k, v in initial_values.items():
self.set_method([k, v], {})
elif isinstance(initial_values, list):
for e in initial_values:
if '=' not in e:
raise InterpreterException('Env var definition must be of type key=val.')
(k, val) = e.split('=', 1)
k = k.strip()
val = val.strip()
if ' ' in k:
raise InterpreterException('Env var key must not have spaces in it.')
self.set_method([k, val], {})
elif initial_values:
raise AssertionError('Unsupported EnvironmentVariablesHolder initial_values')
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.held_object.envvars)
def add_var(self, method, args, kwargs):
if not isinstance(kwargs.get("separator", ""), str):
raise InterpreterException("EnvironmentVariablesHolder methods 'separator'"
" argument needs to be a string.")
if len(args) < 2:
raise InterpreterException("EnvironmentVariablesHolder methods require at least"
"2 arguments, first is the name of the variable and"
" following one are values")
if method != self.held_object.set and self.held_object.has_name(args[0]):
mlog.warning('Overriding previous value of environment variable {!r} with a new one'
.format(args[0]), location=self.current_node)
self.held_object.add_var(method, args[0], args[1:], kwargs)
@stringArgs
@permittedKwargs({'separator'})
def set_method(self, args, kwargs):
self.add_var(self.held_object.set, args, kwargs)
@stringArgs
@permittedKwargs({'separator'})
def append_method(self, args, kwargs):
self.add_var(self.held_object.append, args, kwargs)
@stringArgs
@permittedKwargs({'separator'})
def prepend_method(self, args, kwargs):
self.add_var(self.held_object.prepend, args, kwargs)
class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder):
def __init__(self, pv, initial_values=None):
MutableInterpreterObject.__init__(self)
self.used = False # These objects become immutable after use in configure_file.
ObjectHolder.__init__(self, build.ConfigurationData(), pv)
self.methods.update({'set': self.set_method,
'set10': self.set10_method,
'set_quoted': self.set_quoted_method,
'has': self.has_method,
'get': self.get_method,
'get_unquoted': self.get_unquoted_method,
'merge_from': self.merge_from_method,
})
if isinstance(initial_values, dict):
for k, v in initial_values.items():
self.set_method([k, v], {})
elif initial_values:
raise AssertionError('Unsupported ConfigurationDataHolder initial_values')
def is_used(self):
return self.used
def mark_used(self):
self.used = True
def validate_args(self, args, kwargs):
if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2:
mlog.deprecation('Passing a list as the single argument to '
'configuration_data.set is deprecated. This will '
'become a hard error in the future.',
location=self.current_node)
args = args[0]
if len(args) != 2:
raise InterpreterException("Configuration set requires 2 arguments.")
if self.used:
raise InterpreterException("Can not set values on configuration object that has been used.")
name, val = args
if not isinstance(val, (int, str)):
msg = 'Setting a configuration data value to {!r} is invalid, ' \
'and will fail at configure_file(). If you are using it ' \
'just to store some values, please use a dict instead.'
mlog.deprecation(msg.format(val), location=self.current_node)
desc = kwargs.get('description', None)
if not isinstance(name, str):
raise InterpreterException("First argument to set must be a string.")
if desc is not None and not isinstance(desc, str):
raise InterpreterException('Description must be a string.')
return name, val, desc
@noArgsFlattening
def set_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
self.held_object.values[name] = (val, desc)
def set_quoted_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
if not isinstance(val, str):
raise InterpreterException("Second argument to set_quoted must be a string.")
escaped_val = '\\"'.join(val.split('"'))
self.held_object.values[name] = ('"' + escaped_val + '"', desc)
def set10_method(self, args, kwargs):
(name, val, desc) = self.validate_args(args, kwargs)
if val:
self.held_object.values[name] = (1, desc)
else:
self.held_object.values[name] = (0, desc)
def has_method(self, args, kwargs):
return args[0] in self.held_object.values
@FeatureNew('configuration_data.get()', '0.38.0')
@noArgsFlattening
def get_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get method takes one or two arguments.')
name = args[0]
if name in self.held_object:
return self.held_object.get(name)[0]
if len(args) > 1:
return args[1]
raise InterpreterException('Entry %s not in configuration data.' % name)
@FeatureNew('configuration_data.get_unquoted()', '0.44.0')
def get_unquoted_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get method takes one or two arguments.')
name = args[0]
if name in self.held_object:
val = self.held_object.get(name)[0]
elif len(args) > 1:
val = args[1]
else:
raise InterpreterException('Entry %s not in configuration data.' % name)
if val[0] == '"' and val[-1] == '"':
return val[1:-1]
return val
def get(self, name):
return self.held_object.values[name] # (val, desc)
def keys(self):
return self.held_object.values.keys()
def merge_from_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Merge_from takes one positional argument.')
from_object = args[0]
if not isinstance(from_object, ConfigurationDataHolder):
raise InterpreterException('Merge_from argument must be a configuration data object.')
from_object = from_object.held_object
for k, v in from_object.values.items():
self.held_object.values[k] = v
# Interpreter objects can not be pickled so we must have
# these wrappers.
class DependencyHolder(InterpreterObject, ObjectHolder):
def __init__(self, dep, pv):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, dep, pv)
self.methods.update({'found': self.found_method,
'type_name': self.type_name_method,
'version': self.version_method,
'name': self.name_method,
'get_pkgconfig_variable': self.pkgconfig_method,
'get_configtool_variable': self.configtool_method,
'get_variable': self.variable_method,
'partial_dependency': self.partial_dependency_method,
'include_type': self.include_type_method,
'as_system': self.as_system_method,
})
def found(self):
return self.found_method([], {})
@noPosargs
@permittedKwargs({})
def type_name_method(self, args, kwargs):
return self.held_object.type_name
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
if self.held_object.type_name == 'internal':
return True
return self.held_object.found()
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return self.held_object.get_version()
@noPosargs
@permittedKwargs({})
def name_method(self, args, kwargs):
return self.held_object.get_name()
@permittedKwargs({'define_variable', 'default'})
def pkgconfig_method(self, args, kwargs):
args = listify(args)
if len(args) != 1:
raise InterpreterException('get_pkgconfig_variable takes exactly one argument.')
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Variable name must be a string.')
return self.held_object.get_pkgconfig_variable(varname, kwargs)
@FeatureNew('dep.get_configtool_variable', '0.44.0')
@permittedKwargs({})
def configtool_method(self, args, kwargs):
args = listify(args)
if len(args) != 1:
raise InterpreterException('get_configtool_variable takes exactly one argument.')
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Variable name must be a string.')
return self.held_object.get_configtool_variable(varname)
@FeatureNew('dep.partial_dependency', '0.46.0')
@noPosargs
@permittedKwargs(permitted_method_kwargs['partial_dependency'])
def partial_dependency_method(self, args, kwargs):
pdep = self.held_object.get_partial_dependency(**kwargs)
return DependencyHolder(pdep, self.subproject)
@FeatureNew('dep.get_variable', '0.51.0')
@noPosargs
@permittedKwargs({'cmake', 'pkgconfig', 'configtool', 'internal', 'default_value', 'pkgconfig_define'})
@FeatureNewKwargs('dep.get_variable', '0.54.0', ['internal'])
def variable_method(self, args, kwargs):
return self.held_object.get_variable(**kwargs)
@FeatureNew('dep.include_type', '0.52.0')
@noPosargs
@permittedKwargs({})
def include_type_method(self, args, kwargs):
return self.held_object.get_include_type()
@FeatureNew('dep.as_system', '0.52.0')
@permittedKwargs({})
def as_system_method(self, args, kwargs):
args = listify(args)
new_is_system = 'system'
if len(args) > 1:
raise InterpreterException('as_system takes only one optional value')
if len(args) == 1:
new_is_system = args[0]
new_dep = self.held_object.generate_system_dependency(new_is_system)
return DependencyHolder(new_dep, self.subproject)
class ExternalProgramHolder(InterpreterObject, ObjectHolder):
def __init__(self, ep, subproject, backend=None):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, ep)
self.subproject = subproject
self.backend = backend
self.methods.update({'found': self.found_method,
'path': self.path_method,
'full_path': self.full_path_method})
self.cached_version = None
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
@noPosargs
@permittedKwargs({})
def path_method(self, args, kwargs):
mlog.deprecation('path() method is deprecated and replaced by full_path()')
return self._full_path()
@noPosargs
@permittedKwargs({})
@FeatureNew('ExternalProgram.full_path', '0.55.0')
def full_path_method(self, args, kwargs):
return self._full_path()
def _full_path(self):
exe = self.held_object
if isinstance(exe, build.Executable):
return self.backend.get_target_filename_abs(exe)
return exe.get_path()
def found(self):
return isinstance(self.held_object, build.Executable) or self.held_object.found()
def get_command(self):
return self.held_object.get_command()
def get_name(self):
exe = self.held_object
if isinstance(exe, build.Executable):
return exe.name
return exe.get_name()
def get_version(self, interpreter):
if isinstance(self.held_object, build.Executable):
return self.held_object.project_version
if not self.cached_version:
raw_cmd = self.get_command() + ['--version']
cmd = [self, '--version']
res = interpreter.run_command_impl(interpreter.current_node, cmd, {}, True)
if res.returncode != 0:
m = 'Running {!r} failed'
raise InterpreterException(m.format(raw_cmd))
output = res.stdout.strip()
if not output:
output = res.stderr.strip()
match = re.search(r'([0-9][0-9\.]+)', output)
if not match:
m = 'Could not find a version number in output of {!r}'
raise InterpreterException(m.format(raw_cmd))
self.cached_version = match.group(1)
return self.cached_version
class ExternalLibraryHolder(InterpreterObject, ObjectHolder):
def __init__(self, el, pv):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, el, pv)
self.methods.update({'found': self.found_method,
'type_name': self.type_name_method,
'partial_dependency': self.partial_dependency_method,
})
def found(self):
return self.held_object.found()
@noPosargs
@permittedKwargs({})
def type_name_method(self, args, kwargs):
return self.held_object.type_name
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
def get_name(self):
return self.held_object.name
def get_compile_args(self):
return self.held_object.get_compile_args()
def get_link_args(self):
return self.held_object.get_link_args()
def get_exe_args(self):
return self.held_object.get_exe_args()
@FeatureNew('dep.partial_dependency', '0.46.0')
@noPosargs
@permittedKwargs(permitted_method_kwargs['partial_dependency'])
def partial_dependency_method(self, args, kwargs):
pdep = self.held_object.get_partial_dependency(**kwargs)
return DependencyHolder(pdep, self.subproject)
class GeneratorHolder(InterpreterObject, ObjectHolder):
@FeatureNewKwargs('generator', '0.43.0', ['capture'])
def __init__(self, interp, args, kwargs):
self.interpreter = interp
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, build.Generator(args, kwargs), interp.subproject)
self.methods.update({'process': self.process_method})
@FeatureNewKwargs('generator.process', '0.45.0', ['preserve_path_from'])
@permittedKwargs({'extra_args', 'preserve_path_from'})
def process_method(self, args, kwargs):
extras = mesonlib.stringlistify(kwargs.get('extra_args', []))
if 'preserve_path_from' in kwargs:
preserve_path_from = kwargs['preserve_path_from']
if not isinstance(preserve_path_from, str):
raise InvalidArguments('Preserve_path_from must be a string.')
preserve_path_from = os.path.normpath(preserve_path_from)
if not os.path.isabs(preserve_path_from):
# This is a bit of a hack. Fix properly before merging.
raise InvalidArguments('Preserve_path_from must be an absolute path for now. Sorry.')
else:
preserve_path_from = None
gl = self.held_object.process_files('Generator', args, self.interpreter,
preserve_path_from, extra_args=extras)
return GeneratedListHolder(gl)
class GeneratedListHolder(InterpreterObject, ObjectHolder):
def __init__(self, arg1, extra_args=None):
InterpreterObject.__init__(self)
if isinstance(arg1, GeneratorHolder):
ObjectHolder.__init__(self, build.GeneratedList(arg1.held_object, extra_args if extra_args is not None else []))
else:
ObjectHolder.__init__(self, arg1)
def __repr__(self):
r = '<{}: {!r}>'
return r.format(self.__class__.__name__, self.held_object.get_outputs())
def add_file(self, a):
self.held_object.add_file(a)
# A machine that's statically known from the cross file
class MachineHolder(InterpreterObject, ObjectHolder):
def __init__(self, machine_info):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, machine_info)
self.methods.update({'system': self.system_method,
'cpu': self.cpu_method,
'cpu_family': self.cpu_family_method,
'endian': self.endian_method,
})
@noPosargs
@permittedKwargs({})
def cpu_family_method(self, args, kwargs):
return self.held_object.cpu_family
@noPosargs
@permittedKwargs({})
def cpu_method(self, args, kwargs):
return self.held_object.cpu
@noPosargs
@permittedKwargs({})
def system_method(self, args, kwargs):
return self.held_object.system
@noPosargs
@permittedKwargs({})
def endian_method(self, args, kwargs):
return self.held_object.endian
class IncludeDirsHolder(InterpreterObject, ObjectHolder):
def __init__(self, idobj):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, idobj)
class Headers(InterpreterObject):
def __init__(self, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.install_subdir = kwargs.get('subdir', '')
if os.path.isabs(self.install_subdir):
mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.')
self.custom_install_dir = kwargs.get('install_dir', None)
self.custom_install_mode = kwargs.get('install_mode', None)
if self.custom_install_dir is not None:
if not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def set_install_subdir(self, subdir):
self.install_subdir = subdir
def get_install_subdir(self):
return self.install_subdir
def get_sources(self):
return self.sources
def get_custom_install_dir(self):
return self.custom_install_dir
def get_custom_install_mode(self):
return self.custom_install_mode
class DataHolder(InterpreterObject, ObjectHolder):
def __init__(self, data):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, data)
def get_source_subdir(self):
return self.held_object.source_subdir
def get_sources(self):
return self.held_object.sources
def get_install_dir(self):
return self.held_object.install_dir
class InstallDir(InterpreterObject):
def __init__(self, src_subdir, inst_subdir, install_dir, install_mode, exclude, strip_directory):
InterpreterObject.__init__(self)
self.source_subdir = src_subdir
self.installable_subdir = inst_subdir
self.install_dir = install_dir
self.install_mode = install_mode
self.exclude = exclude
self.strip_directory = strip_directory
class Man(InterpreterObject):
def __init__(self, sources, kwargs):
InterpreterObject.__init__(self)
self.sources = sources
self.validate_sources()
self.custom_install_dir = kwargs.get('install_dir', None)
self.custom_install_mode = kwargs.get('install_mode', None)
if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str):
raise InterpreterException('Custom_install_dir must be a string.')
def validate_sources(self):
for s in self.sources:
try:
num = int(s.split('.')[-1])
except (IndexError, ValueError):
num = 0
if num < 1 or num > 8:
raise InvalidArguments('Man file must have a file extension of a number between 1 and 8')
def get_custom_install_dir(self):
return self.custom_install_dir
def get_custom_install_mode(self):
return self.custom_install_mode
def get_sources(self):
return self.sources
class GeneratedObjectsHolder(InterpreterObject, ObjectHolder):
def __init__(self, held_object):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, held_object)
class TargetHolder(InterpreterObject, ObjectHolder):
def __init__(self, target, interp):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, target, interp.subproject)
self.interpreter = interp
class BuildTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'extract_objects': self.extract_objects_method,
'extract_all_objects': self.extract_all_objects_method,
'name': self.name_method,
'get_id': self.get_id_method,
'outdir': self.outdir_method,
'full_path': self.full_path_method,
'private_dir_include': self.private_dir_include_method,
})
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.filename)
def is_cross(self):
return not self.held_object.environment.machines.matches_build_machine(self.held_object.for_machine)
@noPosargs
@permittedKwargs({})
def private_dir_include_method(self, args, kwargs):
return IncludeDirsHolder(build.IncludeDirs('', [], False,
[self.interpreter.backend.get_target_private_dir(self.held_object)]))
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
@noPosargs
@permittedKwargs({})
def outdir_method(self, args, kwargs):
return self.interpreter.backend.get_target_dir(self.held_object)
@permittedKwargs({})
def extract_objects_method(self, args, kwargs):
gobjs = self.held_object.extract_objects(args)
return GeneratedObjectsHolder(gobjs)
@FeatureNewKwargs('extract_all_objects', '0.46.0', ['recursive'])
@noPosargs
@permittedKwargs({'recursive'})
def extract_all_objects_method(self, args, kwargs):
recursive = kwargs.get('recursive', False)
gobjs = self.held_object.extract_all_objects(recursive)
if gobjs.objlist and 'recursive' not in kwargs:
mlog.warning('extract_all_objects called without setting recursive '
'keyword argument. Meson currently defaults to '
'non-recursive to maintain backward compatibility but '
'the default will be changed in the future.',
location=self.current_node)
return GeneratedObjectsHolder(gobjs)
@noPosargs
@permittedKwargs({})
def get_id_method(self, args, kwargs):
return self.held_object.get_id()
@FeatureNew('name', '0.54.0')
@noPosargs
@permittedKwargs({})
def name_method(self, args, kwargs):
return self.held_object.name
class ExecutableHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class StaticLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class SharedLibraryHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
target.shared_library_only = False
class BothLibrariesHolder(BuildTargetHolder):
def __init__(self, shared_holder, static_holder, interp):
super().__init__(shared_holder.held_object, interp)
self.shared_holder = shared_holder
self.static_holder = static_holder
self.methods.update({'get_shared_lib': self.get_shared_lib_method,
'get_static_lib': self.get_static_lib_method,
})
def __repr__(self):
r = '<{} {}: {}, {}: {}>'
h1 = self.shared_holder.held_object
h2 = self.static_holder.held_object
return r.format(self.__class__.__name__, h1.get_id(), h1.filename, h2.get_id(), h2.filename)
@noPosargs
@permittedKwargs({})
def get_shared_lib_method(self, args, kwargs):
return self.shared_holder
@noPosargs
@permittedKwargs({})
def get_static_lib_method(self, args, kwargs):
return self.static_holder
class SharedModuleHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class JarHolder(BuildTargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
class CustomTargetIndexHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'full_path': self.full_path_method,
})
@FeatureNew('custom_target[i].full_path', '0.54.0')
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
class CustomTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
self.methods.update({'full_path': self.full_path_method,
'to_list': self.to_list_method,
})
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.command)
@noPosargs
@permittedKwargs({})
def full_path_method(self, args, kwargs):
return self.interpreter.backend.get_target_filename_abs(self.held_object)
@FeatureNew('custom_target.to_list', '0.54.0')
@noPosargs
@permittedKwargs({})
def to_list_method(self, args, kwargs):
result = []
for i in self.held_object:
result.append(CustomTargetIndexHolder(i, self.interpreter))
return result
def __getitem__(self, index):
return CustomTargetIndexHolder(self.held_object[index], self.interpreter)
def __setitem__(self, index, value):
raise InterpreterException('Cannot set a member of a CustomTarget')
def __delitem__(self, index):
raise InterpreterException('Cannot delete a member of a CustomTarget')
def outdir_include(self):
return IncludeDirsHolder(build.IncludeDirs('', [], False,
[os.path.join('@BUILD_ROOT@', self.interpreter.backend.get_target_dir(self.held_object))]))
class RunTargetHolder(TargetHolder):
def __init__(self, target, interp):
super().__init__(target, interp)
def __repr__(self):
r = '<{} {}: {}>'
h = self.held_object
return r.format(self.__class__.__name__, h.get_id(), h.command)
class Test(InterpreterObject):
def __init__(self, name: str, project: str, suite: T.List[str], exe: build.Executable,
depends: T.List[T.Union[build.CustomTarget, build.BuildTarget]],
is_parallel: bool, cmd_args: T.List[str], env: build.EnvironmentVariables,
should_fail: bool, timeout: int, workdir: T.Optional[str], protocol: str,
priority: int):
InterpreterObject.__init__(self)
self.name = name
self.suite = suite
self.project_name = project
self.exe = exe
self.depends = depends
self.is_parallel = is_parallel
self.cmd_args = cmd_args
self.env = env
self.should_fail = should_fail
self.timeout = timeout
self.workdir = workdir
self.protocol = TestProtocol.from_str(protocol)
self.priority = priority
def get_exe(self):
return self.exe
def get_name(self):
return self.name
class SubprojectHolder(InterpreterObject, ObjectHolder):
def __init__(self, subinterpreter, subproject_dir, name, warnings=0, disabled_feature=None,
exception=None):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, subinterpreter)
self.name = name
self.warnings = warnings
self.disabled_feature = disabled_feature
self.exception = exception
self.subproject_dir = subproject_dir
self.methods.update({'get_variable': self.get_variable_method,
'found': self.found_method,
})
@noPosargs
@permittedKwargs({})
def found_method(self, args, kwargs):
return self.found()
def found(self):
return self.held_object is not None
@permittedKwargs({})
@noArgsFlattening
def get_variable_method(self, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Get_variable takes one or two arguments.')
if not self.found():
raise InterpreterException('Subproject "%s/%s" disabled can\'t get_variable on it.' % (
self.subproject_dir, self.name))
varname = args[0]
if not isinstance(varname, str):
raise InterpreterException('Get_variable first argument must be a string.')
try:
return self.held_object.variables[varname]
except KeyError:
pass
if len(args) == 2:
return args[1]
raise InvalidArguments('Requested variable "{0}" not found.'.format(varname))
header_permitted_kwargs = set([
'required',
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
])
find_library_permitted_kwargs = set([
'has_headers',
'required',
'dirs',
'static',
])
find_library_permitted_kwargs |= set(['header_' + k for k in header_permitted_kwargs])
class CompilerHolder(InterpreterObject):
def __init__(self, compiler, env, subproject):
InterpreterObject.__init__(self)
self.compiler = compiler
self.environment = env
self.subproject = subproject
self.methods.update({'compiles': self.compiles_method,
'links': self.links_method,
'get_id': self.get_id_method,
'get_linker_id': self.get_linker_id_method,
'compute_int': self.compute_int_method,
'sizeof': self.sizeof_method,
'get_define': self.get_define_method,
'check_header': self.check_header_method,
'has_header': self.has_header_method,
'has_header_symbol': self.has_header_symbol_method,
'run': self.run_method,
'has_function': self.has_function_method,
'has_member': self.has_member_method,
'has_members': self.has_members_method,
'has_type': self.has_type_method,
'alignment': self.alignment_method,
'version': self.version_method,
'cmd_array': self.cmd_array_method,
'find_library': self.find_library_method,
'has_argument': self.has_argument_method,
'has_function_attribute': self.has_func_attribute_method,
'get_supported_function_attributes': self.get_supported_function_attributes_method,
'has_multi_arguments': self.has_multi_arguments_method,
'get_supported_arguments': self.get_supported_arguments_method,
'first_supported_argument': self.first_supported_argument_method,
'has_link_argument': self.has_link_argument_method,
'has_multi_link_arguments': self.has_multi_link_arguments_method,
'get_supported_link_arguments': self.get_supported_link_arguments_method,
'first_supported_link_argument': self.first_supported_link_argument_method,
'unittest_args': self.unittest_args_method,
'symbols_have_underscore_prefix': self.symbols_have_underscore_prefix_method,
'get_argument_syntax': self.get_argument_syntax_method,
})
def _dep_msg(self, deps, endl):
msg_single = 'with dependency {}'
msg_many = 'with dependencies {}'
if not deps:
return endl
if endl is None:
endl = ''
tpl = msg_many if len(deps) > 1 else msg_single
names = []
for d in deps:
if isinstance(d, dependencies.ExternalLibrary):
name = '-l' + d.name
else:
name = d.name
names.append(name)
return tpl.format(', '.join(names)) + endl
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return self.compiler.version
@noPosargs
@permittedKwargs({})
def cmd_array_method(self, args, kwargs):
return self.compiler.exelist
def determine_args(self, kwargs, mode='link'):
nobuiltins = kwargs.get('no_builtin_args', False)
if not isinstance(nobuiltins, bool):
raise InterpreterException('Type of no_builtin_args not a boolean.')
args = []
incdirs = extract_as_list(kwargs, 'include_directories')
for i in incdirs:
if not isinstance(i, IncludeDirsHolder):
raise InterpreterException('Include directories argument must be an include_directories object.')
for idir in i.held_object.get_incdirs():
idir = os.path.join(self.environment.get_source_dir(),
i.held_object.get_curdir(), idir)
args += self.compiler.get_include_args(idir, False)
if not nobuiltins:
for_machine = Interpreter.machine_from_native_kwarg(kwargs)
opts = self.environment.coredata.compiler_options[for_machine][self.compiler.language]
args += self.compiler.get_option_compile_args(opts)
if mode == 'link':
args += self.compiler.get_option_link_args(opts)
args += mesonlib.stringlistify(kwargs.get('args', []))
return args
def determine_dependencies(self, kwargs, endl=':'):
deps = kwargs.get('dependencies', None)
if deps is not None:
deps = listify(deps)
final_deps = []
for d in deps:
try:
d = d.held_object
except Exception:
pass
if isinstance(d, InternalDependency) or not isinstance(d, Dependency):
raise InterpreterException('Dependencies must be external dependencies')
final_deps.append(d)
deps = final_deps
return deps, self._dep_msg(deps, endl)
@permittedKwargs({
'prefix',
'args',
'dependencies',
})
def alignment_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Alignment method takes exactly one positional argument.')
check_stringlist(args)
typename = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of alignment must be a string.')
extra_args = mesonlib.stringlistify(kwargs.get('args', []))
deps, msg = self.determine_dependencies(kwargs)
result = self.compiler.alignment(typename, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
mlog.log('Checking for alignment of', mlog.bold(typename, True), msg, result)
return result
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def run_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Run method takes exactly one positional argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result = self.compiler.run(code, self.environment, extra_args=extra_args,
dependencies=deps)
if len(testname) > 0:
if not result.compiled:
h = mlog.red('DID NOT COMPILE')
elif result.returncode == 0:
h = mlog.green('YES')
else:
h = mlog.red('NO (%d)' % result.returncode)
mlog.log('Checking if', mlog.bold(testname, True), msg, 'runs:', h)
return TryRunResultHolder(result)
@noPosargs
@permittedKwargs({})
def get_id_method(self, args, kwargs):
return self.compiler.get_id()
@noPosargs
@permittedKwargs({})
@FeatureNew('compiler.get_linker_id', '0.53.0')
def get_linker_id_method(self, args, kwargs):
return self.compiler.get_linker_id()
@noPosargs
@permittedKwargs({})
def symbols_have_underscore_prefix_method(self, args, kwargs):
return self.compiler.symbols_have_underscore_prefix(self.environment)
@noPosargs
@permittedKwargs({})
def unittest_args_method(self, args, kwargs):
if not hasattr(self.compiler, 'get_feature_args'):
raise InterpreterException('This {} compiler has no feature arguments.'.format(self.compiler.get_display_language()))
build_to_src = os.path.relpath(self.environment.get_source_dir(), self.environment.get_build_dir())
return self.compiler.get_feature_args({'unittest': 'true'}, build_to_src)
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_member_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Has_member takes exactly two arguments.')
check_stringlist(args)
typename, membername = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_member must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_members(typename, [membername], prefix,
self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking whether type', mlog.bold(typename, True),
'has member', mlog.bold(membername, True), msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_members_method(self, args, kwargs):
if len(args) < 2:
raise InterpreterException('Has_members needs at least two arguments.')
check_stringlist(args)
typename, *membernames = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_members must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_members(typename, membernames, prefix,
self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
members = mlog.bold(', '.join(['"{}"'.format(m) for m in membernames]))
mlog.log('Checking whether type', mlog.bold(typename, True),
'has members', members, msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_function_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_function takes exactly one argument.')
check_stringlist(args)
funcname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_function must be a string.')
extra_args = self.determine_args(kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_function(funcname, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for function', mlog.bold(funcname, True), msg, hadtxt, cached)
return had
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def has_type_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Has_type takes exactly one argument.')
check_stringlist(args)
typename = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_type must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
had, cached = self.compiler.has_type(typename, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if had:
hadtxt = mlog.green('YES')
else:
hadtxt = mlog.red('NO')
mlog.log('Checking for type', mlog.bold(typename, True), msg, hadtxt, cached)
return had
@FeatureNew('compiler.compute_int', '0.40.0')
@permittedKwargs({
'prefix',
'low',
'high',
'guess',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def compute_int_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Compute_int takes exactly one argument.')
check_stringlist(args)
expression = args[0]
prefix = kwargs.get('prefix', '')
low = kwargs.get('low', None)
high = kwargs.get('high', None)
guess = kwargs.get('guess', None)
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of compute_int must be a string.')
if low is not None and not isinstance(low, int):
raise InterpreterException('Low argument of compute_int must be an int.')
if high is not None and not isinstance(high, int):
raise InterpreterException('High argument of compute_int must be an int.')
if guess is not None and not isinstance(guess, int):
raise InterpreterException('Guess argument of compute_int must be an int.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
res = self.compiler.compute_int(expression, low, high, guess, prefix,
self.environment, extra_args=extra_args,
dependencies=deps)
mlog.log('Computing int of', mlog.bold(expression, True), msg, res)
return res
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def sizeof_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Sizeof takes exactly one argument.')
check_stringlist(args)
element = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of sizeof must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
esize = self.compiler.sizeof(element, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
mlog.log('Checking for size of', mlog.bold(element, True), msg, esize)
return esize
@FeatureNew('compiler.get_define', '0.40.0')
@permittedKwargs({
'prefix',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def get_define_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('get_define() takes exactly one argument.')
check_stringlist(args)
element = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of get_define() must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
value, cached = self.compiler.get_define(element, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Fetching value of define', mlog.bold(element, True), msg, value, cached)
return value
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def compiles_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('compiles method takes exactly one argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result, cached = self.compiler.compiles(code, self.environment,
extra_args=extra_args,
dependencies=deps)
if len(testname) > 0:
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Checking if', mlog.bold(testname, True), msg, 'compiles:', h, cached)
return result
@permittedKwargs({
'name',
'no_builtin_args',
'include_directories',
'args',
'dependencies',
})
def links_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('links method takes exactly one argument.')
code = args[0]
if isinstance(code, mesonlib.File):
code = mesonlib.File.from_absolute_file(
code.rel_to_builddir(self.environment.source_dir))
elif not isinstance(code, str):
raise InvalidArguments('Argument must be string or file.')
testname = kwargs.get('name', '')
if not isinstance(testname, str):
raise InterpreterException('Testname argument must be a string.')
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs, endl=None)
result, cached = self.compiler.links(code, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if len(testname) > 0:
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Checking if', mlog.bold(testname, True), msg, 'links:', h, cached)
return result
@FeatureNew('compiler.check_header', '0.47.0')
@FeatureNewKwargs('compiler.check_header', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def check_header_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('check_header method takes exactly one argument.')
check_stringlist(args)
hname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Check usable header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.check_header(hname, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if required and not haz:
raise InterpreterException('{} header {!r} not usable'.format(self.compiler.get_display_language(), hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Check usable header', mlog.bold(hname, True), msg, h, cached)
return haz
@FeatureNewKwargs('compiler.has_header', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def has_header_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('has_header method takes exactly one argument.')
check_stringlist(args)
hname = args[0]
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Has header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.has_header(hname, prefix, self.environment,
extra_args=extra_args, dependencies=deps)
cached = mlog.blue('(cached)') if cached else ''
if required and not haz:
raise InterpreterException('{} header {!r} not found'.format(self.compiler.get_display_language(), hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log('Has header', mlog.bold(hname, True), msg, h, cached)
return haz
@FeatureNewKwargs('compiler.has_header_symbol', '0.50.0', ['required'])
@permittedKwargs(header_permitted_kwargs)
def has_header_symbol_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('has_header_symbol method takes exactly two arguments.')
check_stringlist(args)
hname, symbol = args
prefix = kwargs.get('prefix', '')
if not isinstance(prefix, str):
raise InterpreterException('Prefix argument of has_header_symbol must be a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False)
if disabled:
mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
extra_args = functools.partial(self.determine_args, kwargs)
deps, msg = self.determine_dependencies(kwargs)
haz, cached = self.compiler.has_header_symbol(hname, symbol, prefix, self.environment,
extra_args=extra_args,
dependencies=deps)
if required and not haz:
raise InterpreterException('{} symbol {} not found in header {}'.format(self.compiler.get_display_language(), symbol, hname))
elif haz:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), msg, h, cached)
return haz
def notfound_library(self, libname):
lib = dependencies.ExternalLibrary(libname, None,
self.environment,
self.compiler.language,
silent=True)
return ExternalLibraryHolder(lib, self.subproject)
@FeatureNewKwargs('compiler.find_library', '0.51.0', ['static'])
@FeatureNewKwargs('compiler.find_library', '0.50.0', ['has_headers'])
@FeatureNewKwargs('compiler.find_library', '0.49.0', ['disabler'])
@disablerIfNotFound
@permittedKwargs(find_library_permitted_kwargs)
def find_library_method(self, args, kwargs):
# TODO add dependencies support?
if len(args) != 1:
raise InterpreterException('find_library method takes one argument.')
libname = args[0]
if not isinstance(libname, str):
raise InterpreterException('Library name not a string.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Library', mlog.bold(libname), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_library(libname)
has_header_kwargs = {k[7:]: v for k, v in kwargs.items() if k.startswith('header_')}
has_header_kwargs['required'] = required
headers = mesonlib.stringlistify(kwargs.get('has_headers', []))
for h in headers:
if not self.has_header_method([h], has_header_kwargs):
return self.notfound_library(libname)
search_dirs = extract_search_dirs(kwargs)
libtype = mesonlib.LibType.PREFER_SHARED
if 'static' in kwargs:
if not isinstance(kwargs['static'], bool):
raise InterpreterException('static must be a boolean')
libtype = mesonlib.LibType.STATIC if kwargs['static'] else mesonlib.LibType.SHARED
linkargs = self.compiler.find_library(libname, self.environment, search_dirs, libtype)
if required and not linkargs:
raise InterpreterException(
'{} library {!r} not found'.format(self.compiler.get_display_language(), libname))
lib = dependencies.ExternalLibrary(libname, linkargs, self.environment,
self.compiler.language)
return ExternalLibraryHolder(lib, self.subproject)
@permittedKwargs({})
def has_argument_method(self, args: T.Sequence[str], kwargs) -> bool:
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_argument takes exactly one argument.')
return self.has_multi_arguments_method(args, kwargs)
@permittedKwargs({})
def has_multi_arguments_method(self, args: T.Sequence[str], kwargs: dict):
args = mesonlib.stringlistify(args)
result, cached = self.compiler.has_multi_arguments(args, self.environment)
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
cached = mlog.blue('(cached)') if cached else ''
mlog.log(
'Compiler for {} supports arguments {}:'.format(
self.compiler.get_display_language(), ' '.join(args)),
h, cached)
return result
@FeatureNew('compiler.get_supported_arguments', '0.43.0')
@permittedKwargs({})
def get_supported_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
supported_args = []
for arg in args:
if self.has_argument_method(arg, kwargs):
supported_args.append(arg)
return supported_args
@permittedKwargs({})
def first_supported_argument_method(self, args: T.Sequence[str], kwargs: dict) -> T.List[str]:
for arg in mesonlib.stringlistify(args):
if self.has_argument_method(arg, kwargs):
mlog.log('First supported argument:', mlog.bold(arg))
return [arg]
mlog.log('First supported argument:', mlog.red('None'))
return []
@FeatureNew('compiler.has_link_argument', '0.46.0')
@permittedKwargs({})
def has_link_argument_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_link_argument takes exactly one argument.')
return self.has_multi_link_arguments_method(args, kwargs)
@FeatureNew('compiler.has_multi_link_argument', '0.46.0')
@permittedKwargs({})
def has_multi_link_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
result, cached = self.compiler.has_multi_link_arguments(args, self.environment)
cached = mlog.blue('(cached)') if cached else ''
if result:
h = mlog.green('YES')
else:
h = mlog.red('NO')
mlog.log(
'Compiler for {} supports link arguments {}:'.format(
self.compiler.get_display_language(), ' '.join(args)),
h, cached)
return result
@FeatureNew('compiler.get_supported_link_arguments_method', '0.46.0')
@permittedKwargs({})
def get_supported_link_arguments_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
supported_args = []
for arg in args:
if self.has_link_argument_method(arg, kwargs):
supported_args.append(arg)
return supported_args
@FeatureNew('compiler.first_supported_link_argument_method', '0.46.0')
@permittedKwargs({})
def first_supported_link_argument_method(self, args, kwargs):
for i in mesonlib.stringlistify(args):
if self.has_link_argument_method(i, kwargs):
mlog.log('First supported link argument:', mlog.bold(i))
return [i]
mlog.log('First supported link argument:', mlog.red('None'))
return []
@FeatureNew('compiler.has_function_attribute', '0.48.0')
@permittedKwargs({})
def has_func_attribute_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
if len(args) != 1:
raise InterpreterException('has_func_attribute takes exactly one argument.')
result, cached = self.compiler.has_func_attribute(args[0], self.environment)
cached = mlog.blue('(cached)') if cached else ''
h = mlog.green('YES') if result else mlog.red('NO')
mlog.log('Compiler for {} supports function attribute {}:'.format(self.compiler.get_display_language(), args[0]), h, cached)
return result
@FeatureNew('compiler.get_supported_function_attributes', '0.48.0')
@permittedKwargs({})
def get_supported_function_attributes_method(self, args, kwargs):
args = mesonlib.stringlistify(args)
return [a for a in args if self.has_func_attribute_method(a, kwargs)]
@FeatureNew('compiler.get_argument_syntax_method', '0.49.0')
@noPosargs
@noKwargs
def get_argument_syntax_method(self, args, kwargs):
return self.compiler.get_argument_syntax()
ModuleState = collections.namedtuple('ModuleState', [
'source_root', 'build_to_src', 'subproject', 'subdir', 'current_lineno', 'environment',
'project_name', 'project_version', 'backend', 'targets',
'data', 'headers', 'man', 'global_args', 'project_args', 'build_machine',
'host_machine', 'target_machine', 'current_node'])
class ModuleHolder(InterpreterObject, ObjectHolder):
def __init__(self, modname, module, interpreter):
InterpreterObject.__init__(self)
ObjectHolder.__init__(self, module)
self.modname = modname
self.interpreter = interpreter
def method_call(self, method_name, args, kwargs):
try:
fn = getattr(self.held_object, method_name)
except AttributeError:
raise InvalidArguments('Module %s does not have method %s.' % (self.modname, method_name))
if method_name.startswith('_'):
raise InvalidArguments('Function {!r} in module {!r} is private.'.format(method_name, self.modname))
if not getattr(fn, 'no-args-flattening', False):
args = flatten(args)
# This is not 100% reliable but we can't use hash()
num_targets = len(self.interpreter.build.targets)
state = ModuleState(
source_root = self.interpreter.environment.get_source_dir(),
build_to_src=mesonlib.relpath(self.interpreter.environment.get_source_dir(),
self.interpreter.environment.get_build_dir()),
subproject=self.interpreter.subproject,
subdir=self.interpreter.subdir,
current_lineno=self.interpreter.current_lineno,
environment=self.interpreter.environment,
project_name=self.interpreter.build.project_name,
project_version=self.interpreter.build.dep_manifest[self.interpreter.active_projectname],
backend=self.interpreter.backend,
targets=self.interpreter.build.targets,
data=self.interpreter.build.data,
headers=self.interpreter.build.get_headers(),
man=self.interpreter.build.get_man(),
global_args = self.interpreter.build.global_args.host,
project_args = self.interpreter.build.projects_args.host.get(self.interpreter.subproject, {}),
build_machine=self.interpreter.builtin['build_machine'].held_object,
host_machine=self.interpreter.builtin['host_machine'].held_object,
target_machine=self.interpreter.builtin['target_machine'].held_object,
current_node=self.current_node
)
self.held_object.interpreter = self.interpreter
if self.held_object.is_snippet(method_name):
value = fn(self.interpreter, state, args, kwargs)
return self.interpreter.holderify(value)
else:
value = fn(state, args, kwargs)
if num_targets != len(self.interpreter.build.targets):
raise InterpreterException('Extension module altered internal state illegally.')
return self.interpreter.module_method_callback(value)
class Summary:
def __init__(self, project_name, project_version):
self.project_name = project_name
self.project_version = project_version
self.sections = collections.defaultdict(dict)
self.max_key_len = 0
def add_section(self, section, values, kwargs):
bool_yn = kwargs.get('bool_yn', False)
if not isinstance(bool_yn, bool):
raise InterpreterException('bool_yn keyword argument must be boolean')
list_sep = kwargs.get('list_sep')
if list_sep is not None and not isinstance(list_sep, str):
raise InterpreterException('list_sep keyword argument must be string')
for k, v in values.items():
if k in self.sections[section]:
raise InterpreterException('Summary section {!r} already have key {!r}'.format(section, k))
formatted_values = []
for i in listify(v):
if not isinstance(i, (str, int)):
m = 'Summary value in section {!r}, key {!r}, must be string, integer or boolean'
raise InterpreterException(m.format(section, k))
if bool_yn and isinstance(i, bool):
formatted_values.append(mlog.green('YES') if i else mlog.red('NO'))
else:
formatted_values.append(i)
self.sections[section][k] = (formatted_values, list_sep)
self.max_key_len = max(self.max_key_len, len(k))
def dump(self):
mlog.log(self.project_name, mlog.normal_cyan(self.project_version))
for section, values in self.sections.items():
mlog.log('')
if section:
mlog.log(' ', mlog.bold(section))
for k, v in values.items():
v, list_sep = v
indent = self.max_key_len - len(k) + 3
end = ' ' if v else ''
mlog.log(' ' * indent, k + ':', end=end)
if list_sep is None:
indent = self.max_key_len + 6
list_sep = '\n' + ' ' * indent
mlog.log(*v, sep=list_sep)
mlog.log('')
class MesonMain(InterpreterObject):
def __init__(self, build, interpreter):
InterpreterObject.__init__(self)
self.build = build
self.interpreter = interpreter
self._found_source_scripts = {}
self.methods.update({'get_compiler': self.get_compiler_method,
'is_cross_build': self.is_cross_build_method,
'has_exe_wrapper': self.has_exe_wrapper_method,
'is_unity': self.is_unity_method,
'is_subproject': self.is_subproject_method,
'current_source_dir': self.current_source_dir_method,
'current_build_dir': self.current_build_dir_method,
'source_root': self.source_root_method,
'build_root': self.build_root_method,
'add_install_script': self.add_install_script_method,
'add_postconf_script': self.add_postconf_script_method,
'add_dist_script': self.add_dist_script_method,
'install_dependency_manifest': self.install_dependency_manifest_method,
'override_dependency': self.override_dependency_method,
'override_find_program': self.override_find_program_method,
'project_version': self.project_version_method,
'project_license': self.project_license_method,
'version': self.version_method,
'project_name': self.project_name_method,
'get_cross_property': self.get_cross_property_method,
'get_external_property': self.get_external_property_method,
'backend': self.backend_method,
})
def _find_source_script(self, prog: T.Union[str, ExecutableHolder], args):
if isinstance(prog, ExecutableHolder):
prog_path = self.interpreter.backend.get_target_filename(prog.held_object)
return build.RunScript([prog_path], args)
elif isinstance(prog, ExternalProgramHolder):
return build.RunScript(prog.get_command(), args)
search_dir = os.path.join(self.interpreter.environment.source_dir,
self.interpreter.subdir)
key = (prog, search_dir)
if key in self._found_source_scripts:
found = self._found_source_scripts[key]
else:
found = dependencies.ExternalProgram(prog, search_dir=search_dir)
if found.found():
self._found_source_scripts[key] = found
else:
m = 'Script or command {!r} not found or not executable'
raise InterpreterException(m.format(prog))
return build.RunScript(found.get_command(), args)
def _process_script_args(
self, name: str, args: T.List[T.Union[
str, mesonlib.File, CustomTargetHolder,
CustomTargetIndexHolder, ConfigureFileHolder,
ExternalProgramHolder, ExecutableHolder,
]], allow_built: bool = False) -> T.List[str]:
script_args = []
new = False
for a in args:
a = unholder(a)
if isinstance(a, str):
script_args.append(a)
elif isinstance(a, mesonlib.File):
new = True
script_args.append(a.rel_to_builddir(self.interpreter.environment.source_dir))
elif isinstance(a, (build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)):
if not allow_built:
raise InterpreterException('Arguments to {} cannot be built'.format(name))
new = True
script_args.extend([os.path.join(a.get_subdir(), o) for o in a.get_outputs()])
# this without completely rewriting install script handling.
# This is complicated by the fact that the install target
# depends on all.
if isinstance(a, build.CustomTargetIndex):
a.target.build_by_default = True
else:
a.build_by_default = True
elif isinstance(a, build.ConfigureFile):
new = True
script_args.append(os.path.join(a.subdir, a.targetname))
elif isinstance(a, dependencies.ExternalProgram):
script_args.extend(a.command)
new = True
else:
raise InterpreterException(
'Arguments to {} must be strings, Files, CustomTargets, '
'Indexes of CustomTargets, or ConfigureFiles'.format(name))
if new:
FeatureNew('Calling "{}" with File, CustomTaget, Index of CustomTarget, ConfigureFile, Executable, or ExternalProgram'.format(name), '0.55.0').use(
self.interpreter.subproject)
return script_args
@permittedKwargs(set())
def add_install_script_method(self, args: 'T.Tuple[T.Union[str, ExecutableHolder], T.Union[str, mesonlib.File, CustomTargetHolder, CustomTargetIndexHolder, ConfigureFileHolder], ...]', kwargs):
if len(args) < 1:
raise InterpreterException('add_install_script takes one or more arguments')
script_args = self._process_script_args('add_install_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.install_scripts.append(script)
@permittedKwargs(set())
def add_postconf_script_method(self, args, kwargs):
if len(args) < 1:
raise InterpreterException('add_postconf_script takes one or more arguments')
script_args = self._process_script_args('add_postconf_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.postconf_scripts.append(script)
@permittedKwargs(set())
def add_dist_script_method(self, args, kwargs):
if len(args) < 1:
raise InterpreterException('add_dist_script takes one or more arguments')
if len(args) > 1:
FeatureNew('Calling "add_dist_script" with multiple arguments', '0.49.0').use(self.interpreter.subproject)
if self.interpreter.subproject != '':
raise InterpreterException('add_dist_script may not be used in a subproject.')
script_args = self._process_script_args('add_dist_script', args[1:], allow_built=True)
script = self._find_source_script(args[0], script_args)
self.build.dist_scripts.append(script)
@noPosargs
@permittedKwargs({})
def current_source_dir_method(self, args, kwargs):
src = self.interpreter.environment.source_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
@noPosargs
@permittedKwargs({})
def current_build_dir_method(self, args, kwargs):
src = self.interpreter.environment.build_dir
sub = self.interpreter.subdir
if sub == '':
return src
return os.path.join(src, sub)
@noPosargs
@permittedKwargs({})
def backend_method(self, args, kwargs):
return self.interpreter.backend.name
@noPosargs
@permittedKwargs({})
def source_root_method(self, args, kwargs):
return self.interpreter.environment.source_dir
@noPosargs
@permittedKwargs({})
def build_root_method(self, args, kwargs):
return self.interpreter.environment.build_dir
@noPosargs
@permittedKwargs({})
def has_exe_wrapper_method(self, args, kwargs):
if self.is_cross_build_method(None, None) and \
self.build.environment.need_exe_wrapper():
if self.build.environment.exe_wrapper is None:
return False
# We return True when exe_wrap is defined, when it's not needed, and
# Need to revisit this.
return True
@noPosargs
@permittedKwargs({})
def is_cross_build_method(self, args, kwargs):
return self.build.environment.is_cross_build()
@permittedKwargs({'native'})
def get_compiler_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('get_compiler_method must have one and only one argument.')
cname = args[0]
for_machine = Interpreter.machine_from_native_kwarg(kwargs)
clist = self.interpreter.coredata.compilers[for_machine]
if cname in clist:
return CompilerHolder(clist[cname], self.build.environment, self.interpreter.subproject)
raise InterpreterException('Tried to access compiler for unspecified language "%s".' % cname)
@noPosargs
@permittedKwargs({})
def is_unity_method(self, args, kwargs):
optval = self.interpreter.environment.coredata.get_builtin_option('unity')
if optval == 'on' or (optval == 'subprojects' and self.interpreter.is_subproject()):
return True
return False
@noPosargs
@permittedKwargs({})
def is_subproject_method(self, args, kwargs):
return self.interpreter.is_subproject()
@permittedKwargs({})
def install_dependency_manifest_method(self, args, kwargs):
if len(args) != 1:
raise InterpreterException('Must specify manifest install file name')
if not isinstance(args[0], str):
raise InterpreterException('Argument must be a string.')
self.build.dep_manifest_name = args[0]
@FeatureNew('meson.override_find_program', '0.46.0')
@permittedKwargs({})
def override_find_program_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Override needs two arguments')
name, exe = args
if not isinstance(name, str):
raise InterpreterException('First argument must be a string')
if hasattr(exe, 'held_object'):
exe = exe.held_object
if isinstance(exe, mesonlib.File):
abspath = exe.absolute_path(self.interpreter.environment.source_dir,
self.interpreter.environment.build_dir)
if not os.path.exists(abspath):
raise InterpreterException('Tried to override %s with a file that does not exist.' % name)
exe = OverrideProgram(abspath)
if not isinstance(exe, (dependencies.ExternalProgram, build.Executable)):
raise InterpreterException('Second argument must be an external program or executable.')
self.interpreter.add_find_program_override(name, exe)
@FeatureNew('meson.override_dependency', '0.54.0')
@permittedKwargs({'native'})
def override_dependency_method(self, args, kwargs):
if len(args) != 2:
raise InterpreterException('Override needs two arguments')
name = args[0]
dep = args[1]
if not isinstance(name, str) or not name:
raise InterpreterException('First argument must be a string and cannot be empty')
if hasattr(dep, 'held_object'):
dep = dep.held_object
if not isinstance(dep, dependencies.Dependency):
raise InterpreterException('Second argument must be a dependency object')
identifier = dependencies.get_dep_identifier(name, kwargs)
for_machine = self.interpreter.machine_from_native_kwarg(kwargs)
override = self.build.dependency_overrides[for_machine].get(identifier)
if override:
m = 'Tried to override dependency {!r} which has already been resolved or overridden at {}'
location = mlog.get_error_location_string(override.node.filename, override.node.lineno)
raise InterpreterException(m.format(name, location))
self.build.dependency_overrides[for_machine][identifier] = \
build.DependencyOverride(dep, self.interpreter.current_node)
@noPosargs
@permittedKwargs({})
def project_version_method(self, args, kwargs):
return self.build.dep_manifest[self.interpreter.active_projectname]['version']
@FeatureNew('meson.project_license()', '0.45.0')
@noPosargs
@permittedKwargs({})
def project_license_method(self, args, kwargs):
return self.build.dep_manifest[self.interpreter.active_projectname]['license']
@noPosargs
@permittedKwargs({})
def version_method(self, args, kwargs):
return coredata.version
@noPosargs
@permittedKwargs({})
def project_name_method(self, args, kwargs):
return self.interpreter.active_projectname
@noArgsFlattening
@permittedKwargs({})
def get_cross_property_method(self, args, kwargs) -> str:
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Must have one or two arguments.')
propname = args[0]
if not isinstance(propname, str):
raise InterpreterException('Property name must be string.')
try:
props = self.interpreter.environment.properties.host
return props[propname]
except Exception:
if len(args) == 2:
return args[1]
raise InterpreterException('Unknown cross property: %s.' % propname)
@noArgsFlattening
@permittedKwargs({'native'})
@FeatureNew('meson.get_external_property', '0.54.0')
def get_external_property_method(self, args: T.Sequence[str], kwargs: dict) -> str:
if len(args) < 1 or len(args) > 2:
raise InterpreterException('Must have one or two positional arguments.')
propname = args[0]
if not isinstance(propname, str):
raise InterpreterException('Property name must be string.')
def _get_native() -> str:
try:
props = self.interpreter.environment.properties.build
return props[propname]
except Exception:
if len(args) == 2:
return args[1]
raise InterpreterException('Unknown native property: %s.' % propname)
if 'native' in kwargs:
if kwargs['native']:
return _get_native()
else:
return self.get_cross_property_method(args, {})
else: # native: not specified
if self.build.environment.is_cross_build():
return self.get_cross_property_method(args, kwargs)
else:
return _get_native()
known_library_kwargs = (
build.known_shlib_kwargs |
build.known_stlib_kwargs
)
known_build_target_kwargs = (
known_library_kwargs |
build.known_exe_kwargs |
build.known_jar_kwargs |
{'target_type'}
)
_base_test_args = {'args', 'depends', 'env', 'should_fail', 'timeout', 'workdir', 'suite', 'priority', 'protocol'}
permitted_kwargs = {'add_global_arguments': {'language', 'native'},
'add_global_link_arguments': {'language', 'native'},
'add_languages': {'required', 'native'},
'add_project_link_arguments': {'language', 'native'},
'add_project_arguments': {'language', 'native'},
'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default'},
'benchmark': _base_test_args,
'build_target': known_build_target_kwargs,
'configure_file': {'input',
'output',
'configuration',
'command',
'copy',
'depfile',
'install_dir',
'install_mode',
'capture',
'install',
'format',
'output_format',
'encoding'},
'custom_target': {'input',
'output',
'command',
'install',
'install_dir',
'install_mode',
'build_always',
'capture',
'depends',
'depend_files',
'depfile',
'build_by_default',
'build_always_stale',
'console'},
'dependency': {'default_options',
'embed',
'fallback',
'language',
'main',
'method',
'modules',
'components',
'cmake_module_path',
'optional_modules',
'native',
'not_found_message',
'required',
'static',
'version',
'private_headers',
'cmake_args',
'include_type',
},
'declare_dependency': {'include_directories',
'link_with',
'sources',
'dependencies',
'compile_args',
'link_args',
'link_whole',
'version',
'variables',
},
'executable': build.known_exe_kwargs,
'find_program': {'required', 'native', 'version', 'dirs'},
'generator': {'arguments',
'output',
'depends',
'depfile',
'capture',
'preserve_path_from'},
'include_directories': {'is_system'},
'install_data': {'install_dir', 'install_mode', 'rename', 'sources'},
'install_headers': {'install_dir', 'install_mode', 'subdir'},
'install_man': {'install_dir', 'install_mode'},
'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'},
'jar': build.known_jar_kwargs,
'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'},
'run_command': {'check', 'capture', 'env'},
'run_target': {'command', 'depends'},
'shared_library': build.known_shlib_kwargs,
'shared_module': build.known_shmod_kwargs,
'static_library': build.known_stlib_kwargs,
'both_libraries': known_library_kwargs,
'library': known_library_kwargs,
'subdir': {'if_found'},
'subproject': {'version', 'default_options', 'required'},
'test': set.union(_base_test_args, {'is_parallel'}),
'vcs_tag': {'input', 'output', 'fallback', 'command', 'replace_string'},
}
class Interpreter(InterpreterBase):
def __init__(self, build, backend=None, subproject='', subdir='', subproject_dir='subprojects',
modules = None, default_project_options=None, mock=False, ast=None):
super().__init__(build.environment.get_source_dir(), subdir, subproject)
self.an_unpicklable_object = mesonlib.an_unpicklable_object
self.build = build
self.environment = build.environment
self.coredata = self.environment.get_coredata()
self.backend = backend
self.summary = {}
if modules is None:
self.modules = {}
else:
self.modules = modules
# Subproject directory is usually the name of the subproject, but can
# be different for dependencies provided by wrap files.
self.subproject_directory_name = subdir.split(os.path.sep)[-1]
self.subproject_dir = subproject_dir
self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt')
if not mock and ast is None:
self.load_root_meson_file()
self.sanity_check_ast()
elif ast is not None:
self.ast = ast
self.sanity_check_ast()
self.builtin.update({'meson': MesonMain(build, self)})
self.generators = []
self.visited_subdirs = {}
self.project_args_frozen = False
self.global_args_frozen = False # implies self.project_args_frozen
self.subprojects = {}
self.subproject_stack = []
self.configure_file_outputs = {}
# Passed from the outside, only used in subprojects.
if default_project_options:
self.default_project_options = default_project_options.copy()
else:
self.default_project_options = {}
self.project_default_options = {}
self.build_func_dict()
# build_def_files needs to be defined before parse_project is called
self.build_def_files = [os.path.join(self.subdir, environment.build_filename)]
if not mock:
self.parse_project()
self._redetect_machines()
def _redetect_machines(self):
# Re-initialize machine descriptions. We can do a better job now because we
# have the compilers needed to gain more knowledge, so wipe out old
# inference and start over.
machines = self.build.environment.machines.miss_defaulting()
machines.build = environment.detect_machine_info(self.coredata.compilers.build)
self.build.environment.machines = machines.default_missing()
assert self.build.environment.machines.build.cpu is not None
assert self.build.environment.machines.host.cpu is not None
assert self.build.environment.machines.target.cpu is not None
self.builtin['build_machine'] = \
MachineHolder(self.build.environment.machines.build)
self.builtin['host_machine'] = \
MachineHolder(self.build.environment.machines.host)
self.builtin['target_machine'] = \
MachineHolder(self.build.environment.machines.target)
def get_non_matching_default_options(self):
env = self.environment
for def_opt_name, def_opt_value in self.project_default_options.items():
for opts in env.coredata.get_all_options():
cur_opt_value = opts.get(def_opt_name)
if cur_opt_value is not None:
def_opt_value = env.coredata.validate_option_value(def_opt_name, def_opt_value)
if def_opt_value != cur_opt_value.value:
yield (def_opt_name, def_opt_value, cur_opt_value)
def build_func_dict(self):
self.funcs.update({'add_global_arguments': self.func_add_global_arguments,
'add_project_arguments': self.func_add_project_arguments,
'add_global_link_arguments': self.func_add_global_link_arguments,
'add_project_link_arguments': self.func_add_project_link_arguments,
'add_test_setup': self.func_add_test_setup,
'add_languages': self.func_add_languages,
'alias_target': self.func_alias_target,
'assert': self.func_assert,
'benchmark': self.func_benchmark,
'build_target': self.func_build_target,
'configuration_data': self.func_configuration_data,
'configure_file': self.func_configure_file,
'custom_target': self.func_custom_target,
'declare_dependency': self.func_declare_dependency,
'dependency': self.func_dependency,
'disabler': self.func_disabler,
'environment': self.func_environment,
'error': self.func_error,
'executable': self.func_executable,
'generator': self.func_generator,
'gettext': self.func_gettext,
'get_option': self.func_get_option,
'get_variable': self.func_get_variable,
'files': self.func_files,
'find_library': self.func_find_library,
'find_program': self.func_find_program,
'include_directories': self.func_include_directories,
'import': self.func_import,
'install_data': self.func_install_data,
'install_headers': self.func_install_headers,
'install_man': self.func_install_man,
'install_subdir': self.func_install_subdir,
'is_disabler': self.func_is_disabler,
'is_variable': self.func_is_variable,
'jar': self.func_jar,
'join_paths': self.func_join_paths,
'library': self.func_library,
'message': self.func_message,
'warning': self.func_warning,
'option': self.func_option,
'project': self.func_project,
'run_target': self.func_run_target,
'run_command': self.func_run_command,
'set_variable': self.func_set_variable,
'subdir': self.func_subdir,
'subdir_done': self.func_subdir_done,
'subproject': self.func_subproject,
'summary': self.func_summary,
'shared_library': self.func_shared_lib,
'shared_module': self.func_shared_module,
'static_library': self.func_static_lib,
'both_libraries': self.func_both_lib,
'test': self.func_test,
'vcs_tag': self.func_vcs_tag
})
if 'MESON_UNIT_TEST' in os.environ:
self.funcs.update({'exception': self.func_exception})
def holderify(self, item):
if isinstance(item, list):
return [self.holderify(x) for x in item]
if isinstance(item, dict):
return {k: self.holderify(v) for k, v in item.items()}
if isinstance(item, build.CustomTarget):
return CustomTargetHolder(item, self)
elif isinstance(item, (int, str, bool, Disabler)) or item is None:
return item
elif isinstance(item, build.Executable):
return ExecutableHolder(item, self)
elif isinstance(item, build.GeneratedList):
return GeneratedListHolder(item)
elif isinstance(item, build.RunTarget):
raise RuntimeError('This is not a pipe.')
elif isinstance(item, build.RunScript):
raise RuntimeError('Do not do this.')
elif isinstance(item, build.Data):
return DataHolder(item)
elif isinstance(item, dependencies.Dependency):
return DependencyHolder(item, self.subproject)
elif isinstance(item, dependencies.ExternalProgram):
return ExternalProgramHolder(item, self.subproject)
elif hasattr(item, 'held_object'):
return item
else:
raise InterpreterException('Module returned a value of unknown type.')
def process_new_values(self, invalues):
invalues = listify(invalues)
for v in invalues:
if isinstance(v, (RunTargetHolder, CustomTargetHolder, BuildTargetHolder)):
v = v.held_object
if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)):
self.add_target(v.name, v)
elif isinstance(v, list):
self.module_method_callback(v)
elif isinstance(v, build.GeneratedList):
pass
elif isinstance(v, build.RunScript):
self.build.install_scripts.append(v)
elif isinstance(v, build.Data):
self.build.data.append(v)
elif isinstance(v, dependencies.ExternalProgram):
return ExternalProgramHolder(v, self.subproject)
elif isinstance(v, dependencies.InternalDependency):
# FIXME: This is special cased and not ideal:
# The first source is our new VapiTarget, the rest are deps
self.process_new_values(v.sources[0])
elif hasattr(v, 'held_object'):
pass
elif isinstance(v, (int, str, bool, Disabler)):
pass
else:
raise InterpreterException('Module returned a value of unknown type.')
def module_method_callback(self, return_object):
if not isinstance(return_object, ModuleReturnValue):
raise InterpreterException('Bug in module, it returned an invalid object')
invalues = return_object.new_objects
self.process_new_values(invalues)
return self.holderify(return_object.return_value)
def get_build_def_files(self):
return self.build_def_files
def add_build_def_file(self, f):
# Use relative path for files within source directory, and absolute path
# for system files. Skip files within build directory. Also skip not regular
# files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this
# is especially important to convert '/' to '\' on Windows.
if isinstance(f, mesonlib.File):
if f.is_built:
return
f = os.path.normpath(f.relative_name())
elif os.path.isfile(f) and not f.startswith('/dev'):
srcdir = Path(self.environment.get_source_dir())
builddir = Path(self.environment.get_build_dir())
f = Path(f).resolve()
if builddir in f.parents:
return
if srcdir in f.parents:
f = f.relative_to(srcdir)
f = str(f)
else:
return
if f not in self.build_def_files:
self.build_def_files.append(f)
def get_variables(self):
return self.variables
def check_stdlibs(self):
for for_machine in MachineChoice:
props = self.build.environment.properties[for_machine]
for l in self.coredata.compilers[for_machine].keys():
try:
di = mesonlib.stringlistify(props.get_stdlib(l))
if len(di) != 2:
raise InterpreterException('Stdlib definition for %s should have exactly two elements.'
% l)
projname, depname = di
subproj = self.do_subproject(projname, 'meson', {})
self.build.stdlibs.host[l] = subproj.get_variable_method([depname], {})
except KeyError:
pass
except InvalidArguments:
pass
@stringArgs
@noKwargs
def func_import(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Import takes one argument.')
modname = args[0]
if modname.startswith('unstable-'):
plainname = modname.split('-', 1)[1]
mlog.warning('Module %s has no backwards or forwards compatibility and might not exist in future releases.' % modname, location=node)
modname = 'unstable_' + plainname
if modname not in self.modules:
try:
module = importlib.import_module('mesonbuild.modules.' + modname)
except ImportError:
raise InvalidArguments('Module "%s" does not exist' % (modname, ))
self.modules[modname] = module.initialize(self)
return ModuleHolder(modname, self.modules[modname], self)
@stringArgs
@noKwargs
def func_files(self, node, args, kwargs):
return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args]
@FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole'])
@FeatureNewKwargs('declare_dependency', '0.54.0', ['variables'])
@permittedKwargs(permitted_kwargs['declare_dependency'])
@noPosargs
def func_declare_dependency(self, node, args, kwargs):
version = kwargs.get('version', self.project_version)
if not isinstance(version, str):
raise InterpreterException('Version must be a string.')
incs = self.extract_incdirs(kwargs)
libs = unholder(extract_as_list(kwargs, 'link_with'))
libs_whole = unholder(extract_as_list(kwargs, 'link_whole'))
sources = extract_as_list(kwargs, 'sources')
sources = unholder(listify(self.source_strings_to_files(sources)))
deps = unholder(extract_as_list(kwargs, 'dependencies'))
compile_args = mesonlib.stringlistify(kwargs.get('compile_args', []))
link_args = mesonlib.stringlistify(kwargs.get('link_args', []))
variables = kwargs.get('variables', {})
if not isinstance(variables, dict):
raise InterpreterException('variables must be a dict.')
if not all(isinstance(v, str) for v in variables.values()):
# Because that is how they will come from pkg-config and cmake
raise InterpreterException('variables values be strings.')
final_deps = []
for d in deps:
try:
d = d.held_object
except Exception:
pass
if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)):
raise InterpreterException('Dependencies must be external deps')
final_deps.append(d)
for l in libs:
if isinstance(l, dependencies.Dependency):
raise InterpreterException('''Entries in "link_with" may only be self-built targets,
external dependencies (including libraries) must go to "dependencies".''')
dep = dependencies.InternalDependency(version, incs, compile_args,
link_args, libs, libs_whole, sources, final_deps,
variables)
return DependencyHolder(dep, self.subproject)
@noKwargs
def func_assert(self, node, args, kwargs):
if len(args) == 1:
FeatureNew('assert function without message argument', '0.53.0').use(self.subproject)
value = args[0]
message = None
elif len(args) == 2:
value, message = args
if not isinstance(message, str):
raise InterpreterException('Assert message not a string.')
else:
raise InterpreterException('Assert takes between one and two arguments')
if not isinstance(value, bool):
raise InterpreterException('Assert value not bool.')
if not value:
if message is None:
from .ast import AstPrinter
printer = AstPrinter()
node.args.arguments[0].accept(printer)
message = printer.result
raise InterpreterException('Assert failed: ' + message)
def validate_arguments(self, args, argcount, arg_types):
if argcount is not None:
if argcount != len(args):
raise InvalidArguments('Expected %d arguments, got %d.' %
(argcount, len(args)))
for actual, wanted in zip(args, arg_types):
if wanted is not None:
if not isinstance(actual, wanted):
raise InvalidArguments('Incorrect argument type.')
@FeatureNewKwargs('run_command', '0.50.0', ['env'])
@FeatureNewKwargs('run_command', '0.47.0', ['check', 'capture'])
@permittedKwargs(permitted_kwargs['run_command'])
def func_run_command(self, node, args, kwargs):
return self.run_command_impl(node, args, kwargs)
def run_command_impl(self, node, args, kwargs, in_builddir=False):
if len(args) < 1:
raise InterpreterException('Not enough arguments')
cmd, *cargs = args
capture = kwargs.get('capture', True)
srcdir = self.environment.get_source_dir()
builddir = self.environment.get_build_dir()
check = kwargs.get('check', False)
if not isinstance(check, bool):
raise InterpreterException('Check must be boolean.')
env = self.unpack_env_kwarg(kwargs)
m = 'must be a string, or the output of find_program(), files() '\
'or configure_file(), or a compiler object; not {!r}'
expanded_args = []
if isinstance(cmd, ExternalProgramHolder):
cmd = cmd.held_object
if isinstance(cmd, build.Executable):
progname = node.args.arguments[0].value
msg = 'Program {!r} was overridden with the compiled executable {!r}'\
' and therefore cannot be used during configuration'
raise InterpreterException(msg.format(progname, cmd.description()))
if not cmd.found():
raise InterpreterException('command {!r} not found or not executable'.format(cmd.get_name()))
elif isinstance(cmd, CompilerHolder):
exelist = cmd.compiler.get_exelist()
cmd = exelist[0]
prog = ExternalProgram(cmd, silent=True)
if not prog.found():
raise InterpreterException('Program {!r} not found '
'or not executable'.format(cmd))
cmd = prog
expanded_args = exelist[1:]
else:
if isinstance(cmd, mesonlib.File):
cmd = cmd.absolute_path(srcdir, builddir)
elif not isinstance(cmd, str):
raise InterpreterException('First argument ' + m.format(cmd))
# Prefer scripts in the current source directory
search_dir = os.path.join(srcdir, self.subdir)
prog = ExternalProgram(cmd, silent=True, search_dir=search_dir)
if not prog.found():
raise InterpreterException('Program or command {!r} not found '
'or not executable'.format(cmd))
cmd = prog
for a in listify(cargs):
if isinstance(a, str):
expanded_args.append(a)
elif isinstance(a, mesonlib.File):
expanded_args.append(a.absolute_path(srcdir, builddir))
elif isinstance(a, ExternalProgramHolder):
expanded_args.append(a.held_object.get_path())
else:
raise InterpreterException('Arguments ' + m.format(a))
# If any file that was used as an argument to the command
# changes, we must re-run the configuration step.
self.add_build_def_file(cmd.get_path())
for a in expanded_args:
if not os.path.isabs(a):
a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a)
self.add_build_def_file(a)
return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir,
self.environment.get_build_command() + ['introspect'],
in_builddir=in_builddir, check=check, capture=capture)
@stringArgs
def func_gettext(self, nodes, args, kwargs):
raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead')
def func_option(self, nodes, args, kwargs):
raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.')
@FeatureNewKwargs('subproject', '0.38.0', ['default_options'])
@permittedKwargs(permitted_kwargs['subproject'])
@stringArgs
def func_subproject(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Subproject takes exactly one argument')
dirname = args[0]
return self.do_subproject(dirname, 'meson', kwargs)
def disabled_subproject(self, dirname, disabled_feature=None, exception=None):
sub = SubprojectHolder(None, self.subproject_dir, dirname,
disabled_feature=disabled_feature, exception=exception)
self.subprojects[dirname] = sub
return sub
def do_subproject(self, dirname: str, method: str, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Subproject', mlog.bold(dirname), ':', 'skipped: feature', mlog.bold(feature), 'disabled')
return self.disabled_subproject(dirname, disabled_feature=feature)
default_options = mesonlib.stringlistify(kwargs.get('default_options', []))
default_options = coredata.create_options_dict(default_options)
if dirname == '':
raise InterpreterException('Subproject dir name must not be empty.')
if dirname[0] == '.':
raise InterpreterException('Subproject dir name must not start with a period.')
if '..' in dirname:
raise InterpreterException('Subproject name must not contain a ".." path segment.')
if os.path.isabs(dirname):
raise InterpreterException('Subproject name must not be an absolute path.')
if has_path_sep(dirname):
mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.',
location=self.current_node)
if dirname in self.subproject_stack:
fullstack = self.subproject_stack + [dirname]
incpath = ' => '.join(fullstack)
raise InvalidCode('Recursive include of subprojects: %s.' % incpath)
if dirname in self.subprojects:
subproject = self.subprojects[dirname]
if required and not subproject.found():
raise InterpreterException('Subproject "%s/%s" required but not found.' % (
self.subproject_dir, dirname))
return subproject
subproject_dir_abs = os.path.join(self.environment.get_source_dir(), self.subproject_dir)
r = wrap.Resolver(subproject_dir_abs, self.coredata.get_builtin_option('wrap_mode'))
try:
resolved = r.resolve(dirname, method)
except wrap.WrapException as e:
subprojdir = os.path.join(self.subproject_dir, r.directory)
if isinstance(e, wrap.WrapNotFoundException):
# if the reason subproject execution failed was because
# the directory doesn't exist, try to give some helpful
# promotion...
self.print_nested_info(dirname)
if not required:
mlog.log(e)
mlog.log('Subproject ', mlog.bold(subprojdir), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(dirname, exception=e)
raise e
subdir = os.path.join(self.subproject_dir, resolved)
subdir_abs = os.path.join(subproject_dir_abs, resolved)
os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True)
self.global_args_frozen = True
mlog.log()
with mlog.nested():
mlog.log('Executing subproject', mlog.bold(dirname), 'method', mlog.bold(method), '\n')
try:
if method == 'meson':
return self._do_subproject_meson(dirname, subdir, default_options, kwargs)
elif method == 'cmake':
return self._do_subproject_cmake(dirname, subdir, subdir_abs, default_options, kwargs)
else:
raise InterpreterException('The method {} is invalid for the subproject {}'.format(method, dirname))
# Invalid code is always an error
except InvalidCode:
raise
except Exception as e:
if not required:
with mlog.nested():
# Suppress the 'ERROR:' prefix because this exception is not
# fatal and VS CI treat any logs with "ERROR:" as fatal.
mlog.exception(e, prefix=mlog.yellow('Exception:'))
mlog.log('\nSubproject', mlog.bold(dirname), 'is buildable:', mlog.red('NO'), '(disabling)')
return self.disabled_subproject(dirname, exception=e)
raise e
def _do_subproject_meson(self, dirname, subdir, default_options, kwargs, ast=None, build_def_files=None):
with mlog.nested():
new_build = self.build.copy()
subi = Interpreter(new_build, self.backend, dirname, subdir, self.subproject_dir,
self.modules, default_options, ast=ast)
subi.subprojects = self.subprojects
subi.subproject_stack = self.subproject_stack + [dirname]
current_active = self.active_projectname
current_warnings_counter = mlog.log_warnings_counter
mlog.log_warnings_counter = 0
subi.run()
subi_warnings = mlog.log_warnings_counter
mlog.log_warnings_counter = current_warnings_counter
mlog.log('Subproject', mlog.bold(dirname), 'finished.')
mlog.log()
if 'version' in kwargs:
pv = subi.project_version
wanted = kwargs['version']
if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]:
raise InterpreterException('Subproject %s version is %s but %s required.' % (dirname, pv, wanted))
self.active_projectname = current_active
self.subprojects.update(subi.subprojects)
self.subprojects[dirname] = SubprojectHolder(subi, self.subproject_dir, dirname,
warnings=subi_warnings)
# Duplicates are possible when subproject uses files from project root
if build_def_files:
self.build_def_files = list(set(self.build_def_files + build_def_files))
else:
self.build_def_files = list(set(self.build_def_files + subi.build_def_files))
self.build.merge(subi.build)
self.build.subprojects[dirname] = subi.project_version
self.summary.update(subi.summary)
return self.subprojects[dirname]
def _do_subproject_cmake(self, dirname, subdir, subdir_abs, default_options, kwargs):
with mlog.nested():
new_build = self.build.copy()
prefix = self.coredata.builtins['prefix'].value
cmake_options = mesonlib.stringlistify(kwargs.get('cmake_options', []))
cm_int = CMakeInterpreter(new_build, subdir, subdir_abs, prefix, new_build.environment, self.backend)
cm_int.initialise(cmake_options)
cm_int.analyse()
# Generate a meson ast and execute it with the normal do_subproject_meson
ast = cm_int.pretend_to_be_meson()
mlog.log()
with mlog.nested():
mlog.log('Processing generated meson AST')
# Debug print the generated meson file
from .ast import AstIndentationGenerator, AstPrinter
printer = AstPrinter()
ast.accept(AstIndentationGenerator())
ast.accept(printer)
printer.post_process()
meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build')
with open(meson_filename, "w") as f:
f.write(printer.result)
mlog.log('Build file:', meson_filename)
mlog.cmd_ci_include(meson_filename)
mlog.log()
result = self._do_subproject_meson(dirname, subdir, default_options, kwargs, ast, cm_int.bs_files)
result.cm_interpreter = cm_int
mlog.log()
return result
def get_option_internal(self, optname):
raw_optname = optname
if self.is_subproject():
optname = self.subproject + ':' + optname
for opts in [
self.coredata.base_options, compilers.base_options, self.coredata.builtins,
dict(self.coredata.get_prefixed_options_per_machine(self.coredata.builtins_per_machine)),
dict(self.coredata.flatten_lang_iterator(
self.coredata.get_prefixed_options_per_machine(self.coredata.compiler_options))),
]:
v = opts.get(optname)
if v is None or v.yielding:
v = opts.get(raw_optname)
if v is not None:
return v
try:
opt = self.coredata.user_options[optname]
if opt.yielding and ':' in optname and raw_optname in self.coredata.user_options:
popt = self.coredata.user_options[raw_optname]
if type(opt) is type(popt):
opt = popt
else:
# Get class name, then option type as a string
opt_type = opt.__class__.__name__[4:][:-6].lower()
popt_type = popt.__class__.__name__[4:][:-6].lower()
# This is not a hard error to avoid dependency hell, the workaround
# when this happens is to simply set the subproject's option directly.
mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield '
'to parent option of type {3!r}, ignoring parent value. '
'Use -D{2}:{0}=value to set the value for this option manually'
'.'.format(raw_optname, opt_type, self.subproject, popt_type),
location=self.current_node)
return opt
except KeyError:
pass
raise InterpreterException('Tried to access unknown option "%s".' % optname)
@stringArgs
@noKwargs
def func_get_option(self, nodes, args, kwargs):
if len(args) != 1:
raise InterpreterException('Argument required for get_option.')
optname = args[0]
if ':' in optname:
raise InterpreterException('Having a colon in option name is forbidden, '
'projects are not allowed to directly access '
'options of other subprojects.')
opt = self.get_option_internal(optname)
if isinstance(opt, coredata.UserFeatureOption):
return FeatureOptionHolder(self.environment, optname, opt)
elif isinstance(opt, coredata.UserOption):
return opt.value
return opt
@noKwargs
def func_configuration_data(self, node, args, kwargs):
if len(args) > 1:
raise InterpreterException('configuration_data takes only one optional positional arguments')
elif len(args) == 1:
FeatureNew('configuration_data dictionary', '0.49.0').use(self.subproject)
initial_values = args[0]
if not isinstance(initial_values, dict):
raise InterpreterException('configuration_data first argument must be a dictionary')
else:
initial_values = {}
return ConfigurationDataHolder(self.subproject, initial_values)
def set_backend(self):
if self.backend is not None:
return
backend = self.coredata.get_builtin_option('backend')
from .backend import backends
self.backend = backends.get_backend_from_name(backend, self.build, self)
if self.backend is None:
raise InterpreterException('Unknown backend "%s".' % backend)
if backend != self.backend.name:
if self.backend.name.startswith('vs'):
mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name))
self.coredata.set_builtin_option('backend', self.backend.name)
if self.environment.first_invocation:
self.coredata.init_backend_options(backend)
options = {k: v for k, v in self.environment.cmd_line_options.items() if k.startswith('backend_')}
self.coredata.set_options(options)
@stringArgs
@permittedKwargs(permitted_kwargs['project'])
def func_project(self, node, args, kwargs):
if len(args) < 1:
raise InvalidArguments('Not enough arguments to project(). Needs at least the project name.')
proj_name, *proj_langs = args
if ':' in proj_name:
raise InvalidArguments("Project name {!r} must not contain ':'".format(proj_name))
if 'meson_version' in kwargs:
cv = coredata.version
pv = kwargs['meson_version']
if not mesonlib.version_compare(cv, pv):
raise InterpreterException('Meson version is %s but project requires %s' % (cv, pv))
if os.path.exists(self.option_file):
oi = optinterpreter.OptionInterpreter(self.subproject)
oi.process(self.option_file)
self.coredata.merge_user_options(oi.options)
self.add_build_def_file(self.option_file)
# have any effect.
self.project_default_options = mesonlib.stringlistify(kwargs.get('default_options', []))
self.project_default_options = coredata.create_options_dict(self.project_default_options)
if self.environment.first_invocation:
default_options = self.project_default_options
default_options.update(self.default_project_options)
self.coredata.init_builtins(self.subproject)
else:
default_options = {}
self.coredata.set_default_options(default_options, self.subproject, self.environment)
if not self.is_subproject():
self.build.project_name = proj_name
self.active_projectname = proj_name
self.project_version = kwargs.get('version', 'undefined')
if self.build.project_version is None:
self.build.project_version = self.project_version
proj_license = mesonlib.stringlistify(kwargs.get('license', 'unknown'))
self.build.dep_manifest[proj_name] = {'version': self.project_version,
'license': proj_license}
if self.subproject in self.build.projects:
raise InvalidCode('Second call to project().')
if not self.is_subproject() and 'subproject_dir' in kwargs:
spdirname = kwargs['subproject_dir']
if not isinstance(spdirname, str):
raise InterpreterException('Subproject_dir must be a string')
if os.path.isabs(spdirname):
raise InterpreterException('Subproject_dir must not be an absolute path.')
if spdirname.startswith('.'):
raise InterpreterException('Subproject_dir must not begin with a period.')
if '..' in spdirname:
raise InterpreterException('Subproject_dir must not contain a ".." segment.')
self.subproject_dir = spdirname
self.build.subproject_dir = self.subproject_dir
mesonlib.project_meson_versions[self.subproject] = ''
if 'meson_version' in kwargs:
mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version']
self.build.projects[self.subproject] = proj_name
mlog.log('Project name:', mlog.bold(proj_name))
mlog.log('Project version:', mlog.bold(self.project_version))
self.add_languages(proj_langs, True, MachineChoice.BUILD)
self.add_languages(proj_langs, True, MachineChoice.HOST)
self.set_backend()
if not self.is_subproject():
self.check_stdlibs()
@FeatureNewKwargs('add_languages', '0.54.0', ['native'])
@permittedKwargs(permitted_kwargs['add_languages'])
@stringArgs
def func_add_languages(self, node, args, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
for lang in sorted(args, key=compilers.sort_clink):
mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled')
return False
if 'native' in kwargs:
return self.add_languages(args, required, self.machine_from_native_kwarg(kwargs))
else:
# absent 'native' means 'both' for backwards compatibility
mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.',
location=self.current_node)
success = self.add_languages(args, False, MachineChoice.BUILD)
success &= self.add_languages(args, required, MachineChoice.HOST)
return success
def get_message_string_arg(self, arg):
if isinstance(arg, list):
argstr = stringifyUserArguments(arg)
elif isinstance(arg, dict):
argstr = stringifyUserArguments(arg)
elif isinstance(arg, str):
argstr = arg
elif isinstance(arg, int):
argstr = str(arg)
else:
raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.')
return argstr
@noArgsFlattening
@noKwargs
def func_message(self, node, args, kwargs):
if len(args) > 1:
FeatureNew('message with more than one argument', '0.54.0').use(self.subproject)
args_str = [self.get_message_string_arg(i) for i in args]
self.message_impl(args_str)
def message_impl(self, args):
mlog.log(mlog.bold('Message:'), *args)
@noArgsFlattening
@FeatureNewKwargs('summary', '0.54.0', ['list_sep'])
@permittedKwargs({'section', 'bool_yn', 'list_sep'})
@FeatureNew('summary', '0.53.0')
def func_summary(self, node, args, kwargs):
if len(args) == 1:
if not isinstance(args[0], dict):
raise InterpreterException('Summary first argument must be dictionary.')
values = args[0]
elif len(args) == 2:
if not isinstance(args[0], str):
raise InterpreterException('Summary first argument must be string.')
values = {args[0]: args[1]}
else:
raise InterpreterException('Summary accepts at most 2 arguments.')
section = kwargs.get('section', '')
if not isinstance(section, str):
raise InterpreterException('Summary\'s section keyword argument must be string.')
self.summary_impl(section, values, kwargs)
def summary_impl(self, section, values, kwargs):
if self.subproject not in self.summary:
self.summary[self.subproject] = Summary(self.active_projectname, self.project_version)
self.summary[self.subproject].add_section(section, values, kwargs)
def _print_summary(self):
all_subprojects = collections.OrderedDict()
for name, subp in sorted(self.subprojects.items()):
value = subp.found()
if subp.disabled_feature:
value = [value, 'Feature {!r} disabled'.format(subp.disabled_feature)]
elif subp.exception:
value = [value, str(subp.exception)]
elif subp.warnings > 0:
value = [value, '{} warnings'.format(subp.warnings)]
all_subprojects[name] = value
if all_subprojects:
self.summary_impl('Subprojects', all_subprojects,
{'bool_yn': True,
'list_sep': ' ',
})
mlog.log('')
main_summary = self.summary.pop('', None)
for _, summary in sorted(self.summary.items()):
summary.dump()
if main_summary:
main_summary.dump()
@noArgsFlattening
@FeatureNew('warning', '0.44.0')
@noKwargs
def func_warning(self, node, args, kwargs):
if len(args) > 1:
FeatureNew('warning with more than one argument', '0.54.0').use(self.subproject)
args_str = [self.get_message_string_arg(i) for i in args]
mlog.warning(*args_str, location=node)
@noKwargs
def func_error(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
raise InterpreterException('Problem encountered: ' + args[0])
@noKwargs
def func_exception(self, node, args, kwargs):
self.validate_arguments(args, 0, [])
raise Exception()
def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool:
success = self.add_languages_for(args, required, for_machine)
if not self.coredata.is_cross_build():
self.coredata.copy_build_options_from_regular_ones()
self._redetect_machines()
return success
def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool:
if for_machine != MachineChoice.HOST:
return False
if not self.environment.is_cross_build():
return False
should = self.environment.properties.host.get('skip_sanity_check', False)
if not isinstance(should, bool):
raise InterpreterException('Option skip_sanity_check must be a boolean.')
return should
def add_languages_for(self, args, required, for_machine: MachineChoice):
langs = set(self.coredata.compilers[for_machine].keys())
langs.update(args)
if 'vala' in langs:
if 'c' not in langs:
raise InterpreterException('Compiling Vala requires C. Add C to your project languages and rerun Meson.')
success = True
for lang in sorted(args, key=compilers.sort_clink):
lang = lang.lower()
clist = self.coredata.compilers[for_machine]
machine_name = for_machine.get_lower_case_name()
if lang in clist:
comp = clist[lang]
else:
try:
comp = self.environment.detect_compiler_for(lang, for_machine)
if comp is None:
raise InvalidArguments('Tried to use unknown language "%s".' % lang)
if self.should_skip_sanity_check(for_machine):
mlog.log_once('Cross compiler sanity tests disabled via the cross file.')
else:
comp.sanity_check(self.environment.get_scratch_dir(), self.environment)
except Exception:
if not required:
mlog.log('Compiler for language',
mlog.bold(lang), 'for the', machine_name,
'machine not found.')
success = False
continue
else:
raise
if for_machine == MachineChoice.HOST or self.environment.is_cross_build():
logger_fun = mlog.log
else:
logger_fun = mlog.debug
logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string())
if comp.linker is not None:
logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:',
mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version)
self.build.ensure_static_linker(comp)
return success
def program_from_file_for(self, for_machine, prognames, silent):
for p in unholder(prognames):
if isinstance(p, mesonlib.File):
continue
if not isinstance(p, str):
raise InterpreterException('Executable name must be a string')
prog = ExternalProgram.from_bin_list(self.environment, for_machine, p)
if prog.found():
return ExternalProgramHolder(prog, self.subproject)
return None
def program_from_system(self, args, search_dirs, silent=False):
source_dir = os.path.join(self.environment.get_source_dir(), self.subdir)
for exename in args:
if isinstance(exename, mesonlib.File):
if exename.is_built:
search_dir = os.path.join(self.environment.get_build_dir(),
exename.subdir)
else:
search_dir = os.path.join(self.environment.get_source_dir(),
exename.subdir)
exename = exename.fname
extra_search_dirs = []
elif isinstance(exename, str):
search_dir = source_dir
extra_search_dirs = search_dirs
else:
raise InvalidArguments('find_program only accepts strings and '
'files, not {!r}'.format(exename))
extprog = dependencies.ExternalProgram(exename, search_dir=search_dir,
extra_search_dirs=extra_search_dirs,
silent=silent)
progobj = ExternalProgramHolder(extprog, self.subproject)
if progobj.found():
return progobj
def program_from_overrides(self, command_names, silent=False):
for name in command_names:
if not isinstance(name, str):
continue
if name in self.build.find_overrides:
exe = self.build.find_overrides[name]
if not silent:
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(overridden: %s)' % exe.description())
return ExternalProgramHolder(exe, self.subproject, self.backend)
return None
def store_name_lookups(self, command_names):
for name in command_names:
if isinstance(name, str):
self.build.searched_programs.add(name)
def add_find_program_override(self, name, exe):
if name in self.build.searched_programs:
raise InterpreterException('Tried to override finding of executable "%s" which has already been found.'
% name)
if name in self.build.find_overrides:
raise InterpreterException('Tried to override executable "%s" which has already been overridden.'
% name)
self.build.find_overrides[name] = exe
def find_program_impl(self, args, for_machine: MachineChoice = MachineChoice.HOST,
required=True, silent=True, wanted='', search_dirs=None):
if not isinstance(args, list):
args = [args]
progobj = self.program_from_overrides(args, silent=silent)
if progobj is None:
progobj = self.program_from_file_for(for_machine, args, silent=silent)
if progobj is None:
progobj = self.program_from_system(args, search_dirs, silent=silent)
if progobj is None and args[0].endswith('python3'):
prog = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True)
progobj = ExternalProgramHolder(prog, self.subproject)
if required and (progobj is None or not progobj.found()):
raise InvalidArguments('Program(s) {!r} not found or not executable'.format(args))
if progobj is None:
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
self.store_name_lookups(args)
if wanted:
version = progobj.get_version(self)
is_found, not_found, found = mesonlib.version_compare_many(version, wanted)
if not is_found:
mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'),
'found {!r} but need:'.format(version),
', '.join(["'{}'".format(e) for e in not_found]))
if required:
m = 'Invalid version of program, need {!r} {!r} found {!r}.'
raise InvalidArguments(m.format(progobj.get_name(), not_found, version))
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
return progobj
@FeatureNewKwargs('find_program', '0.53.0', ['dirs'])
@FeatureNewKwargs('find_program', '0.52.0', ['version'])
@FeatureNewKwargs('find_program', '0.49.0', ['disabler'])
@disablerIfNotFound
@permittedKwargs(permitted_kwargs['find_program'])
def func_find_program(self, node, args, kwargs):
if not args:
raise InterpreterException('No program name specified.')
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Program', mlog.bold(' '.join(args)), 'skipped: feature', mlog.bold(feature), 'disabled')
return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject)
search_dirs = extract_search_dirs(kwargs)
wanted = mesonlib.stringlistify(kwargs.get('version', []))
for_machine = self.machine_from_native_kwarg(kwargs)
return self.find_program_impl(args, for_machine, required=required,
silent=False, wanted=wanted,
search_dirs=search_dirs)
def func_find_library(self, node, args, kwargs):
raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n'
'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n'
'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n'
)
def _find_cached_dep(self, name, display_name, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
identifier = dependencies.get_dep_identifier(name, kwargs)
wanted_vers = mesonlib.stringlistify(kwargs.get('version', []))
override = self.build.dependency_overrides[for_machine].get(identifier)
if override:
info = [mlog.blue('(overridden)' if override.explicit else '(cached)')]
cached_dep = override.dep
# have explicitly called meson.override_dependency() with a not-found
# dep.
if not cached_dep.found():
mlog.log('Dependency', mlog.bold(display_name),
'found:', mlog.red('NO'), *info)
return identifier, cached_dep
found_vers = cached_dep.get_version()
if not self.check_version(wanted_vers, found_vers):
mlog.log('Dependency', mlog.bold(name),
'found:', mlog.red('NO'),
'found', mlog.normal_cyan(found_vers), 'but need:',
mlog.bold(', '.join(["'{}'".format(e) for e in wanted_vers])),
*info)
return identifier, NotFoundDependency(self.environment)
else:
info = [mlog.blue('(cached)')]
cached_dep = self.coredata.deps[for_machine].get(identifier)
if cached_dep:
found_vers = cached_dep.get_version()
if not self.check_version(wanted_vers, found_vers):
return identifier, None
if cached_dep:
if found_vers:
info = [mlog.normal_cyan(found_vers), *info]
mlog.log('Dependency', mlog.bold(display_name),
'found:', mlog.green('YES'), *info)
return identifier, cached_dep
return identifier, None
@staticmethod
def check_version(wanted, found):
if not wanted:
return True
if found == 'undefined' or not mesonlib.version_compare_many(found, wanted)[0]:
return False
return True
def notfound_dependency(self):
return DependencyHolder(NotFoundDependency(self.environment), self.subproject)
def verify_fallback_consistency(self, dirname, varname, cached_dep):
subi = self.subprojects.get(dirname)
if not cached_dep or not varname or not subi or not cached_dep.found():
return
dep = subi.get_variable_method([varname], {})
if dep.held_object != cached_dep:
m = 'Inconsistency: Subproject has overridden the dependency with another variable than {!r}'
raise DependencyException(m.format(varname))
def get_subproject_dep(self, name, display_name, dirname, varname, kwargs):
required = kwargs.get('required', True)
wanted = mesonlib.stringlistify(kwargs.get('version', []))
subproj_path = os.path.join(self.subproject_dir, dirname)
dep = self.notfound_dependency()
try:
subproject = self.subprojects[dirname]
_, cached_dep = self._find_cached_dep(name, display_name, kwargs)
if varname is None:
# Assuming the subproject overridden the dependency we want
if cached_dep:
if required and not cached_dep.found():
m = 'Dependency {!r} is not satisfied'
raise DependencyException(m.format(display_name))
return DependencyHolder(cached_dep, self.subproject)
else:
m = 'Subproject {} did not override dependency {}'
raise DependencyException(m.format(subproj_path, display_name))
if subproject.found():
self.verify_fallback_consistency(dirname, varname, cached_dep)
dep = self.subprojects[dirname].get_variable_method([varname], {})
except InvalidArguments:
pass
if not isinstance(dep, DependencyHolder):
raise InvalidCode('Fetched variable {!r} in the subproject {!r} is '
'not a dependency object.'.format(varname, dirname))
if not dep.found():
if required:
raise DependencyException('Could not find dependency {} in subproject {}'
''.format(varname, dirname))
# If the dependency is not required, don't raise an exception
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.red('NO'))
return dep
found = dep.held_object.get_version()
if not self.check_version(wanted, found):
if required:
raise DependencyException('Version {} of subproject dependency {} already '
'cached, requested incompatible version {} for '
'dep {}'.format(found, dirname, wanted, display_name))
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.red('NO'),
'found', mlog.normal_cyan(found), 'but need:',
mlog.bold(', '.join(["'{}'".format(e) for e in wanted])))
return self.notfound_dependency()
found = mlog.normal_cyan(found) if found else None
mlog.log('Dependency', mlog.bold(display_name), 'from subproject',
mlog.bold(subproj_path), 'found:', mlog.green('YES'), found)
return dep
def _handle_featurenew_dependencies(self, name):
if name == 'mpi':
FeatureNew('MPI Dependency', '0.42.0').use(self.subproject)
elif name == 'pcap':
FeatureNew('Pcap Dependency', '0.42.0').use(self.subproject)
elif name == 'vulkan':
FeatureNew('Vulkan Dependency', '0.42.0').use(self.subproject)
elif name == 'libwmf':
FeatureNew('LibWMF Dependency', '0.44.0').use(self.subproject)
elif name == 'openmp':
FeatureNew('OpenMP Dependency', '0.46.0').use(self.subproject)
@FeatureNewKwargs('dependency', '0.54.0', ['components'])
@FeatureNewKwargs('dependency', '0.52.0', ['include_type'])
@FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args'])
@FeatureNewKwargs('dependency', '0.49.0', ['disabler'])
@FeatureNewKwargs('dependency', '0.40.0', ['method'])
@FeatureNewKwargs('dependency', '0.38.0', ['default_options'])
@disablerIfNotFound
@permittedKwargs(permitted_kwargs['dependency'])
def func_dependency(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
name = args[0]
display_name = name if name else '(anonymous)'
mods = extract_as_list(kwargs, 'modules')
if mods:
display_name += ' (modules: {})'.format(', '.join(str(i) for i in mods))
not_found_message = kwargs.get('not_found_message', '')
if not isinstance(not_found_message, str):
raise InvalidArguments('The not_found_message must be a string.')
try:
d = self.dependency_impl(name, display_name, kwargs)
except Exception:
if not_found_message:
self.message_impl([not_found_message])
raise
if not d.found() and not_found_message:
self.message_impl([not_found_message])
self.message_impl([not_found_message])
if name and d.found():
for_machine = self.machine_from_native_kwarg(kwargs)
identifier = dependencies.get_dep_identifier(name, kwargs)
if identifier not in self.build.dependency_overrides[for_machine]:
self.build.dependency_overrides[for_machine][identifier] = \
build.DependencyOverride(d.held_object, node, explicit=False)
return d
def dependency_impl(self, name, display_name, kwargs):
disabled, required, feature = extract_required_kwarg(kwargs, self.subproject)
if disabled:
mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled')
return self.notfound_dependency()
has_fallback = 'fallback' in kwargs
if 'default_options' in kwargs and not has_fallback:
mlog.warning('The "default_options" keyworg argument does nothing without a "fallback" keyword argument.',
location=self.current_node)
if name == '' and required and not has_fallback:
raise InvalidArguments('Dependency is both required and not-found')
if '<' in name or '>' in name or '=' in name:
raise InvalidArguments('Characters <, > and = are forbidden in dependency names. To specify'
'version\n requirements use the \'version\' keyword argument instead.')
identifier, cached_dep = self._find_cached_dep(name, display_name, kwargs)
if cached_dep:
if has_fallback:
dirname, varname = self.get_subproject_infos(kwargs)
self.verify_fallback_consistency(dirname, varname, cached_dep)
if required and not cached_dep.found():
m = 'Dependency {!r} was already checked and was not found'
raise DependencyException(m.format(display_name))
return DependencyHolder(cached_dep, self.subproject)
if has_fallback:
dirname, varname = self.get_subproject_infos(kwargs)
if dirname in self.subprojects:
return self.get_subproject_dep(name, display_name, dirname, varname, kwargs)
wrap_mode = self.coredata.get_builtin_option('wrap_mode')
forcefallback = wrap_mode == WrapMode.forcefallback and has_fallback
if name != '' and not forcefallback:
self._handle_featurenew_dependencies(name)
kwargs['required'] = required and not has_fallback
dep = dependencies.find_external_dependency(name, self.environment, kwargs)
kwargs['required'] = required
if dep.found():
for_machine = self.machine_from_native_kwarg(kwargs)
self.coredata.deps[for_machine].put(identifier, dep)
return DependencyHolder(dep, self.subproject)
if has_fallback:
return self.dependency_fallback(name, display_name, kwargs)
return self.notfound_dependency()
@FeatureNew('disabler', '0.44.0')
@noKwargs
@noPosargs
def func_disabler(self, node, args, kwargs):
return Disabler()
def print_nested_info(self, dependency_name):
message = ['Dependency', mlog.bold(dependency_name), 'not found but it is available in a sub-subproject.\n' +
'To use it in the current project, promote it by going in the project source\n'
'root and issuing']
sprojs = mesonlib.detect_subprojects('subprojects', self.source_root)
if dependency_name not in sprojs:
return
found = sprojs[dependency_name]
if len(found) > 1:
message.append('one of the following commands:')
else:
message.append('the following command:')
command_templ = '\nmeson wrap promote {}'
for l in found:
message.append(mlog.bold(command_templ.format(l[len(self.source_root) + 1:])))
mlog.warning(*message, location=self.current_node)
def get_subproject_infos(self, kwargs):
fbinfo = mesonlib.stringlistify(kwargs['fallback'])
if len(fbinfo) == 1:
FeatureNew('Fallback without variable name', '0.53.0').use(self.subproject)
return fbinfo[0], None
elif len(fbinfo) != 2:
raise InterpreterException('Fallback info must have one or two items.')
return fbinfo
def dependency_fallback(self, name, display_name, kwargs):
required = kwargs.get('required', True)
if self.coredata.get_builtin_option('wrap_mode') == WrapMode.nofallback:
mlog.log('Not looking for a fallback subproject for the dependency',
mlog.bold(display_name), 'because:\nUse of fallback '
'dependencies is disabled.')
if required:
m = 'Dependency {!r} not found and fallback is disabled'
raise DependencyException(m.format(display_name))
return self.notfound_dependency()
elif self.coredata.get_builtin_option('wrap_mode') == WrapMode.forcefallback:
mlog.log('Looking for a fallback subproject for the dependency',
mlog.bold(display_name), 'because:\nUse of fallback dependencies is forced.')
else:
mlog.log('Looking for a fallback subproject for the dependency',
mlog.bold(display_name))
dirname, varname = self.get_subproject_infos(kwargs)
sp_kwargs = {
'default_options': kwargs.get('default_options', []),
'required': required,
}
self.do_subproject(dirname, 'meson', sp_kwargs)
return self.get_subproject_dep(name, display_name, dirname, varname, kwargs)
@FeatureNewKwargs('executable', '0.42.0', ['implib'])
@permittedKwargs(permitted_kwargs['executable'])
def func_executable(self, node, args, kwargs):
return self.build_target(node, args, kwargs, ExecutableHolder)
@permittedKwargs(permitted_kwargs['static_library'])
def func_static_lib(self, node, args, kwargs):
return self.build_target(node, args, kwargs, StaticLibraryHolder)
@permittedKwargs(permitted_kwargs['shared_library'])
def func_shared_lib(self, node, args, kwargs):
holder = self.build_target(node, args, kwargs, SharedLibraryHolder)
holder.held_object.shared_library_only = True
return holder
@permittedKwargs(permitted_kwargs['both_libraries'])
def func_both_lib(self, node, args, kwargs):
return self.build_both_libraries(node, args, kwargs)
@FeatureNew('shared_module', '0.37.0')
@permittedKwargs(permitted_kwargs['shared_module'])
def func_shared_module(self, node, args, kwargs):
return self.build_target(node, args, kwargs, SharedModuleHolder)
@permittedKwargs(permitted_kwargs['library'])
def func_library(self, node, args, kwargs):
return self.build_library(node, args, kwargs)
@permittedKwargs(permitted_kwargs['jar'])
def func_jar(self, node, args, kwargs):
return self.build_target(node, args, kwargs, JarHolder)
@FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options'])
@permittedKwargs(permitted_kwargs['build_target'])
def func_build_target(self, node, args, kwargs):
if 'target_type' not in kwargs:
raise InterpreterException('Missing target_type keyword argument')
target_type = kwargs.pop('target_type')
if target_type == 'executable':
return self.build_target(node, args, kwargs, ExecutableHolder)
elif target_type == 'shared_library':
return self.build_target(node, args, kwargs, SharedLibraryHolder)
elif target_type == 'shared_module':
FeatureNew('build_target(target_type: \'shared_module\')',
'0.51.0').use(self.subproject)
return self.build_target(node, args, kwargs, SharedModuleHolder)
elif target_type == 'static_library':
return self.build_target(node, args, kwargs, StaticLibraryHolder)
elif target_type == 'both_libraries':
return self.build_both_libraries(node, args, kwargs)
elif target_type == 'library':
return self.build_library(node, args, kwargs)
elif target_type == 'jar':
return self.build_target(node, args, kwargs, JarHolder)
else:
raise InterpreterException('Unknown target_type.')
@permittedKwargs(permitted_kwargs['vcs_tag'])
def func_vcs_tag(self, node, args, kwargs):
if 'input' not in kwargs or 'output' not in kwargs:
raise InterpreterException('Keyword arguments input and output must exist')
if 'fallback' not in kwargs:
FeatureNew('Optional fallback in vcs_tag', '0.41.0').use(self.subproject)
fallback = kwargs.pop('fallback', self.project_version)
if not isinstance(fallback, str):
raise InterpreterException('Keyword argument fallback must be a string.')
replace_string = kwargs.pop('replace_string', '@VCS_TAG@')
regex_selector = '(.*)' # default regex selector for custom command: use complete output
vcs_cmd = kwargs.get('command', None)
if vcs_cmd and not isinstance(vcs_cmd, list):
vcs_cmd = [vcs_cmd]
source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir))
if vcs_cmd:
# Is the command an executable in path or maybe a script in the source tree?
vcs_cmd[0] = shutil.which(vcs_cmd[0]) or os.path.join(source_dir, vcs_cmd[0])
else:
vcs = mesonlib.detect_vcs(source_dir)
if vcs:
mlog.log('Found %s repository at %s' % (vcs['name'], vcs['wc_dir']))
vcs_cmd = vcs['get_rev'].split()
regex_selector = vcs['rev_regex']
else:
vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string
# vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command...
kwargs['command'] = self.environment.get_build_command() + \
['--internal',
'vcstagger',
'@INPUT0@',
'@OUTPUT0@',
fallback,
source_dir,
replace_string,
regex_selector] + vcs_cmd
kwargs.setdefault('build_by_default', True)
kwargs.setdefault('build_always_stale', True)
return self._func_custom_target_impl(node, [kwargs['output']], kwargs)
@FeatureNew('subdir_done', '0.46.0')
@stringArgs
def func_subdir_done(self, node, args, kwargs):
if len(kwargs) > 0:
raise InterpreterException('exit does not take named arguments')
if len(args) > 0:
raise InterpreterException('exit does not take any arguments')
raise SubdirDoneRequest()
@stringArgs
@FeatureNewKwargs('custom_target', '0.48.0', ['console'])
@FeatureNewKwargs('custom_target', '0.47.0', ['install_mode', 'build_always_stale'])
@FeatureNewKwargs('custom_target', '0.40.0', ['build_by_default'])
@permittedKwargs(permitted_kwargs['custom_target'])
def func_custom_target(self, node, args, kwargs):
if len(args) != 1:
raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name')
if 'depfile' in kwargs and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']):
FeatureNew('substitutions in custom_target depfile', '0.47.0').use(self.subproject)
return self._func_custom_target_impl(node, args, kwargs)
def _func_custom_target_impl(self, node, args, kwargs):
name = args[0]
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'input' in kwargs:
try:
kwargs['input'] = self.source_strings_to_files(extract_as_list(kwargs, 'input'))
except mesonlib.MesonException:
mlog.warning('''Custom target input \'%s\' can\'t be converted to File object(s).
This will become a hard error in the future.''' % kwargs['input'], location=self.current_node)
tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs, backend=self.backend), self)
self.add_target(name, tg.held_object)
return tg
@permittedKwargs(permitted_kwargs['run_target'])
def func_run_target(self, node, args, kwargs):
if len(args) > 1:
raise InvalidCode('Run_target takes only one positional argument: the target name.')
elif len(args) == 1:
if 'command' not in kwargs:
raise InterpreterException('Missing "command" keyword argument')
all_args = extract_as_list(kwargs, 'command')
deps = unholder(extract_as_list(kwargs, 'depends'))
else:
raise InterpreterException('Run_target needs at least one positional argument.')
cleaned_args = []
for i in unholder(listify(all_args)):
if not isinstance(i, (str, build.BuildTarget, build.CustomTarget, dependencies.ExternalProgram, mesonlib.File)):
mlog.debug('Wrong type:', str(i))
raise InterpreterException('Invalid argument to run_target.')
if isinstance(i, dependencies.ExternalProgram) and not i.found():
raise InterpreterException('Tried to use non-existing executable {!r}'.format(i.name))
cleaned_args.append(i)
name = args[0]
if not isinstance(name, str):
raise InterpreterException('First argument must be a string.')
cleaned_deps = []
for d in deps:
if not isinstance(d, (build.BuildTarget, build.CustomTarget)):
raise InterpreterException('Depends items must be build targets.')
cleaned_deps.append(d)
command, *cmd_args = cleaned_args
tg = RunTargetHolder(build.RunTarget(name, command, cmd_args, cleaned_deps, self.subdir, self.subproject), self)
self.add_target(name, tg.held_object)
full_name = (self.subproject, name)
assert(full_name not in self.build.run_target_names)
self.build.run_target_names.add(full_name)
return tg
@FeatureNew('alias_target', '0.52.0')
@noKwargs
def func_alias_target(self, node, args, kwargs):
if len(args) < 2:
raise InvalidCode('alias_target takes at least 2 arguments.')
name = args[0]
if not isinstance(name, str):
raise InterpreterException('First argument must be a string.')
deps = unholder(listify(args[1:]))
for d in deps:
if not isinstance(d, (build.BuildTarget, build.CustomTarget)):
raise InterpreterException('Depends items must be build targets.')
tg = RunTargetHolder(build.AliasTarget(name, deps, self.subdir, self.subproject), self)
self.add_target(name, tg.held_object)
return tg
@permittedKwargs(permitted_kwargs['generator'])
def func_generator(self, node, args, kwargs):
gen = GeneratorHolder(self, args, kwargs)
self.generators.append(gen)
return gen
@FeatureNewKwargs('benchmark', '0.46.0', ['depends'])
@FeatureNewKwargs('benchmark', '0.52.0', ['priority'])
@permittedKwargs(permitted_kwargs['benchmark'])
def func_benchmark(self, node, args, kwargs):
if 'is_parallel' in kwargs:
del kwargs['is_parallel']
self.add_test(node, args, kwargs, False)
@FeatureNewKwargs('test', '0.46.0', ['depends'])
@FeatureNewKwargs('test', '0.52.0', ['priority'])
@permittedKwargs(permitted_kwargs['test'])
def func_test(self, node, args, kwargs):
if kwargs.get('protocol') == 'gtest':
FeatureNew('"gtest" protocol for tests', '0.55.0').use(self.subproject)
self.add_test(node, args, kwargs, True)
def unpack_env_kwarg(self, kwargs) -> build.EnvironmentVariables:
envlist = kwargs.get('env', EnvironmentVariablesHolder())
if isinstance(envlist, EnvironmentVariablesHolder):
env = envlist.held_object
elif isinstance(envlist, dict):
FeatureNew('environment dictionary', '0.52.0').use(self.subproject)
env = EnvironmentVariablesHolder(envlist)
env = env.held_object
else:
envlist = listify(envlist)
env = EnvironmentVariablesHolder(envlist)
env = env.held_object
return env
def add_test(self, node, args, kwargs, is_base_test):
if len(args) != 2:
raise InterpreterException('test expects 2 arguments, {} given'.format(len(args)))
if not isinstance(args[0], str):
raise InterpreterException('First argument of test must be a string.')
exe = args[1]
if not isinstance(exe, (ExecutableHolder, JarHolder, ExternalProgramHolder)):
if isinstance(exe, mesonlib.File):
exe = self.func_find_program(node, args[1], {})
else:
raise InterpreterException('Second argument must be executable.')
par = kwargs.get('is_parallel', True)
if not isinstance(par, bool):
raise InterpreterException('Keyword argument is_parallel must be a boolean.')
cmd_args = unholder(extract_as_list(kwargs, 'args'))
for i in cmd_args:
if not isinstance(i, (str, mesonlib.File, build.Target)):
raise InterpreterException('Command line arguments must be strings, files or targets.')
env = self.unpack_env_kwarg(kwargs)
should_fail = kwargs.get('should_fail', False)
if not isinstance(should_fail, bool):
raise InterpreterException('Keyword argument should_fail must be a boolean.')
timeout = kwargs.get('timeout', 30)
if 'workdir' in kwargs:
workdir = kwargs['workdir']
if not isinstance(workdir, str):
raise InterpreterException('Workdir keyword argument must be a string.')
if not os.path.isabs(workdir):
raise InterpreterException('Workdir keyword argument must be an absolute path.')
else:
workdir = None
if not isinstance(timeout, int):
raise InterpreterException('Timeout must be an integer.')
protocol = kwargs.get('protocol', 'exitcode')
if protocol not in {'exitcode', 'tap', 'gtest'}:
raise InterpreterException('Protocol must be "exitcode", "tap", or "gtest".')
suite = []
prj = self.subproject if self.is_subproject() else self.build.project_name
for s in mesonlib.stringlistify(kwargs.get('suite', '')):
if len(s) > 0:
s = ':' + s
suite.append(prj.replace(' ', '_').replace(':', '_') + s)
depends = unholder(extract_as_list(kwargs, 'depends'))
for dep in depends:
if not isinstance(dep, (build.CustomTarget, build.BuildTarget)):
raise InterpreterException('Depends items must be build targets.')
priority = kwargs.get('priority', 0)
if not isinstance(priority, int):
raise InterpreterException('Keyword argument priority must be an integer.')
t = Test(args[0], prj, suite, exe.held_object, depends, par, cmd_args,
env, should_fail, timeout, workdir, protocol, priority)
if is_base_test:
self.build.tests.append(t)
mlog.debug('Adding test', mlog.bold(args[0], True))
else:
self.build.benchmarks.append(t)
mlog.debug('Adding benchmark', mlog.bold(args[0], True))
@FeatureNewKwargs('install_headers', '0.47.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_headers'])
def func_install_headers(self, node, args, kwargs):
source_files = self.source_strings_to_files(args)
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
h = Headers(source_files, kwargs)
self.build.headers.append(h)
return h
@FeatureNewKwargs('install_man', '0.47.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_man'])
def func_install_man(self, node, args, kwargs):
fargs = self.source_strings_to_files(args)
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
m = Man(fargs, kwargs)
self.build.man.append(m)
return m
@FeatureNewKwargs('subdir', '0.44.0', ['if_found'])
@permittedKwargs(permitted_kwargs['subdir'])
def func_subdir(self, node, args, kwargs):
self.validate_arguments(args, 1, [str])
mesonlib.check_direntry_issues(args)
if '..' in args[0]:
raise InvalidArguments('Subdir contains ..')
if self.subdir == '' and args[0] == self.subproject_dir:
raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.')
if self.subdir == '' and args[0].startswith('meson-'):
raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().')
for i in mesonlib.extract_as_list(kwargs, 'if_found'):
if not hasattr(i, 'found_method'):
raise InterpreterException('Object used in if_found does not have a found method.')
if not i.found_method([], {}):
return
prev_subdir = self.subdir
subdir = os.path.join(prev_subdir, args[0])
if os.path.isabs(subdir):
raise InvalidArguments('Subdir argument must be a relative path.')
absdir = os.path.join(self.environment.get_source_dir(), subdir)
symlinkless_dir = os.path.realpath(absdir)
if symlinkless_dir in self.visited_subdirs:
raise InvalidArguments('Tried to enter directory "%s", which has already been visited.'
% subdir)
self.visited_subdirs[symlinkless_dir] = True
self.subdir = subdir
os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True)
buildfilename = os.path.join(self.subdir, environment.build_filename)
self.build_def_files.append(buildfilename)
absname = os.path.join(self.environment.get_source_dir(), buildfilename)
if not os.path.isfile(absname):
self.subdir = prev_subdir
raise InterpreterException("Non-existent build file '{!s}'".format(buildfilename))
with open(absname, encoding='utf8') as f:
code = f.read()
assert(isinstance(code, str))
try:
codeblock = mparser.Parser(code, absname).parse()
except mesonlib.MesonException as me:
me.file = absname
raise me
try:
self.evaluate_codeblock(codeblock)
except SubdirDoneRequest:
pass
self.subdir = prev_subdir
def _get_kwarg_install_mode(self, kwargs):
if kwargs.get('install_mode', None) is None:
return None
install_mode = []
mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int))
for m in mode:
if m is False:
m = None
install_mode.append(m)
if len(install_mode) > 3:
raise InvalidArguments('Keyword argument install_mode takes at '
'most 3 arguments.')
if len(install_mode) > 0 and install_mode[0] is not None and \
not isinstance(install_mode[0], str):
raise InvalidArguments('Keyword argument install_mode requires the '
'permissions arg to be a string or false')
return FileMode(*install_mode)
@FeatureNewKwargs('install_data', '0.46.0', ['rename'])
@FeatureNewKwargs('install_data', '0.38.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_data'])
def func_install_data(self, node, args, kwargs):
kwsource = mesonlib.stringlistify(kwargs.get('sources', []))
raw_sources = args + kwsource
sources = []
source_strings = []
for s in raw_sources:
if isinstance(s, mesonlib.File):
sources.append(s)
elif isinstance(s, str):
source_strings.append(s)
else:
raise InvalidArguments('Argument must be string or file.')
sources += self.source_strings_to_files(source_strings)
install_dir = kwargs.get('install_dir', None)
if not isinstance(install_dir, (str, type(None))):
raise InvalidArguments('Keyword argument install_dir not a string.')
install_mode = self._get_kwarg_install_mode(kwargs)
rename = kwargs.get('rename', None)
data = DataHolder(build.Data(sources, install_dir, install_mode, rename))
self.build.data.append(data.held_object)
return data
@FeatureNewKwargs('install_subdir', '0.42.0', ['exclude_files', 'exclude_directories'])
@FeatureNewKwargs('install_subdir', '0.38.0', ['install_mode'])
@permittedKwargs(permitted_kwargs['install_subdir'])
@stringArgs
def func_install_subdir(self, node, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Install_subdir requires exactly one argument.')
subdir = args[0]
if 'install_dir' not in kwargs:
raise InvalidArguments('Missing keyword argument install_dir')
install_dir = kwargs['install_dir']
if not isinstance(install_dir, str):
raise InvalidArguments('Keyword argument install_dir not a string.')
if 'strip_directory' in kwargs:
if not isinstance(kwargs['strip_directory'], bool):
raise InterpreterException('"strip_directory" keyword must be a boolean.')
strip_directory = kwargs['strip_directory']
else:
strip_directory = False
if 'exclude_files' in kwargs:
exclude = extract_as_list(kwargs, 'exclude_files')
for f in exclude:
if not isinstance(f, str):
raise InvalidArguments('Exclude argument not a string.')
elif os.path.isabs(f):
raise InvalidArguments('Exclude argument cannot be absolute.')
exclude_files = set(exclude)
else:
exclude_files = set()
if 'exclude_directories' in kwargs:
exclude = extract_as_list(kwargs, 'exclude_directories')
for d in exclude:
if not isinstance(d, str):
raise InvalidArguments('Exclude argument not a string.')
elif os.path.isabs(d):
raise InvalidArguments('Exclude argument cannot be absolute.')
exclude_directories = set(exclude)
else:
exclude_directories = set()
exclude = (exclude_files, exclude_directories)
install_mode = self._get_kwarg_install_mode(kwargs)
idir = InstallDir(self.subdir, subdir, install_dir, install_mode, exclude, strip_directory)
self.build.install_dirs.append(idir)
return idir
@FeatureNewKwargs('configure_file', '0.47.0', ['copy', 'output_format', 'install_mode', 'encoding'])
@FeatureNewKwargs('configure_file', '0.46.0', ['format'])
@FeatureNewKwargs('configure_file', '0.41.0', ['capture'])
@FeatureNewKwargs('configure_file', '0.50.0', ['install'])
@FeatureNewKwargs('configure_file', '0.52.0', ['depfile'])
@permittedKwargs(permitted_kwargs['configure_file'])
def func_configure_file(self, node, args, kwargs):
if len(args) > 0:
raise InterpreterException("configure_file takes only keyword arguments.")
if 'output' not in kwargs:
raise InterpreterException('Required keyword argument "output" not defined.')
actions = set(['configuration', 'command', 'copy']).intersection(kwargs.keys())
if len(actions) == 0:
raise InterpreterException('Must specify an action with one of these '
'keyword arguments: \'configuration\', '
'\'command\', or \'copy\'.')
elif len(actions) == 2:
raise InterpreterException('Must not specify both {!r} and {!r} '
'keyword arguments since they are '
'mutually exclusive.'.format(*actions))
elif len(actions) == 3:
raise InterpreterException('Must specify one of {!r}, {!r}, and '
'{!r} keyword arguments since they are '
'mutually exclusive.'.format(*actions))
if 'capture' in kwargs:
if not isinstance(kwargs['capture'], bool):
raise InterpreterException('"capture" keyword must be a boolean.')
if 'command' not in kwargs:
raise InterpreterException('"capture" keyword requires "command" keyword.')
if 'format' in kwargs:
fmt = kwargs['format']
if not isinstance(fmt, str):
raise InterpreterException('"format" keyword must be a string.')
else:
fmt = 'meson'
if fmt not in ('meson', 'cmake', 'cmake@'):
raise InterpreterException('"format" possible values are "meson", "cmake" or "cmake@".')
if 'output_format' in kwargs:
output_format = kwargs['output_format']
if not isinstance(output_format, str):
raise InterpreterException('"output_format" keyword must be a string.')
else:
output_format = 'c'
if output_format not in ('c', 'nasm'):
raise InterpreterException('"format" possible values are "c" or "nasm".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InterpreterException('depfile file name must be a string')
else:
depfile = None
inputs = self.source_strings_to_files(extract_as_list(kwargs, 'input'))
inputs_abs = []
for f in inputs:
if isinstance(f, mesonlib.File):
inputs_abs.append(f.absolute_path(self.environment.source_dir,
self.environment.build_dir))
self.add_build_def_file(f)
else:
raise InterpreterException('Inputs can only be strings or file objects')
output = kwargs['output']
if not isinstance(output, str):
raise InterpreterException('Output file name must be a string')
if inputs_abs:
values = mesonlib.get_filenames_templates_dict(inputs_abs, None)
outputs = mesonlib.substitute_values([output], values)
output = outputs[0]
if depfile:
depfile = mesonlib.substitute_values([depfile], values)[0]
ofile_rpath = os.path.join(self.subdir, output)
if ofile_rpath in self.configure_file_outputs:
mesonbuildfile = os.path.join(self.subdir, 'meson.build')
current_call = "{}:{}".format(mesonbuildfile, self.current_lineno)
first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath])
mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call)
else:
self.configure_file_outputs[ofile_rpath] = self.current_lineno
if os.path.dirname(output) != '':
raise InterpreterException('Output file name must not contain a subdirectory.')
(ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output))
ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname)
if 'configuration' in kwargs:
conf = kwargs['configuration']
if isinstance(conf, dict):
FeatureNew('configure_file.configuration dictionary', '0.49.0').use(self.subproject)
conf = ConfigurationDataHolder(self.subproject, conf)
elif not isinstance(conf, ConfigurationDataHolder):
raise InterpreterException('Argument "configuration" is not of type configuration_data')
mlog.log('Configuring', mlog.bold(output), 'using configuration')
if len(inputs) > 1:
raise InterpreterException('At most one input file can given in configuration mode')
if inputs:
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
file_encoding = kwargs.setdefault('encoding', 'utf-8')
missing_variables, confdata_useless = \
mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf.held_object,
fmt, file_encoding)
if missing_variables:
var_list = ", ".join(map(repr, sorted(missing_variables)))
mlog.warning(
"The variable(s) %s in the input file '%s' are not "
"present in the given configuration data." % (
var_list, inputs[0]), location=node)
if confdata_useless:
ifbase = os.path.basename(inputs_abs[0])
mlog.warning('Got an empty configuration_data() object and found no '
'substitutions in the input file {!r}. If you want to '
'copy a file to the build dir, use the \'copy:\' keyword '
'argument added in 0.47.0'.format(ifbase), location=node)
else:
mesonlib.dump_conf_header(ofile_abs, conf.held_object, output_format)
conf.mark_used()
elif 'command' in kwargs:
if len(inputs) > 1:
FeatureNew('multiple inputs in configure_file()', '0.52.0').use(self.subproject)
values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs])
if depfile:
depfile = os.path.join(self.environment.get_scratch_dir(), depfile)
values['@DEPFILE@'] = depfile
# Substitute @INPUT@, @OUTPUT@, etc here.
cmd = mesonlib.substitute_values(kwargs['command'], values)
mlog.log('Configuring', mlog.bold(output), 'with command')
res = self.run_command_impl(node, cmd, {}, True)
if res.returncode != 0:
raise InterpreterException('Running configure command failed.\n%s\n%s' %
(res.stdout, res.stderr))
if 'capture' in kwargs and kwargs['capture']:
dst_tmp = ofile_abs + '~'
file_encoding = kwargs.setdefault('encoding', 'utf-8')
with open(dst_tmp, 'w', encoding=file_encoding) as f:
f.writelines(res.stdout)
if inputs_abs:
shutil.copymode(inputs_abs[0], dst_tmp)
mesonlib.replace_if_different(ofile_abs, dst_tmp)
if depfile:
mlog.log('Reading depfile:', mlog.bold(depfile))
with open(depfile, 'r') as f:
df = DepFile(f.readlines())
deps = df.get_all_dependencies(ofile_fname)
for dep in deps:
self.add_build_def_file(dep)
elif 'copy' in kwargs:
if len(inputs_abs) != 1:
raise InterpreterException('Exactly one input file must be given in copy mode')
os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True)
shutil.copyfile(inputs_abs[0], ofile_abs)
shutil.copystat(inputs_abs[0], ofile_abs)
else:
# Not reachable
raise AssertionError
# Install file if requested, we check for the empty string
# for backwards compatibility. That was the behaviour before
# 0.45.0 so preserve it.
idir = kwargs.get('install_dir', '')
if idir is False:
idir = ''
mlog.deprecation('Please use the new `install:` kwarg instead of passing '
'`false` to `install_dir:`', location=node)
if not isinstance(idir, str):
if isinstance(idir, list) and len(idir) == 0:
mlog.deprecation('install_dir: kwarg must be a string and not an empty array. '
'Please use the install: kwarg to enable or disable installation. '
'This will be a hard error in the next release.')
else:
raise InterpreterException('"install_dir" must be a string')
install = kwargs.get('install', idir != '')
if not isinstance(install, bool):
raise InterpreterException('"install" must be a boolean')
if install:
if not idir:
raise InterpreterException('"install_dir" must be specified '
'when "install" in a configure_file '
'is true')
cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname)
install_mode = self._get_kwarg_install_mode(kwargs)
self.build.data.append(build.Data([cfile], idir, install_mode))
return mesonlib.File.from_built_file(self.subdir, output)
def extract_incdirs(self, kwargs):
prospectives = unholder(extract_as_list(kwargs, 'include_directories'))
result = []
for p in prospectives:
if isinstance(p, build.IncludeDirs):
result.append(p)
elif isinstance(p, str):
result.append(self.build_incdir_object([p]).held_object)
else:
raise InterpreterException('Include directory objects can only be created from strings or include directories.')
return result
@permittedKwargs(permitted_kwargs['include_directories'])
@stringArgs
def func_include_directories(self, node, args, kwargs):
return self.build_incdir_object(args, kwargs.get('is_system', False))
def build_incdir_object(self, incdir_strings, is_system=False):
if not isinstance(is_system, bool):
raise InvalidArguments('Is_system must be boolean.')
src_root = self.environment.get_source_dir()
build_root = self.environment.get_build_dir()
absbase_src = os.path.join(src_root, self.subdir)
absbase_build = os.path.join(build_root, self.subdir)
for a in incdir_strings:
if a.startswith(src_root):
raise InvalidArguments('Tried to form an absolute path to a source dir. '
'You should not do that but use relative paths instead.'
'''
To get include path to any directory relative to the current dir do
incdir = include_directories(dirname)
After this incdir will contain both the current source dir as well as the
corresponding build dir. It can then be used in any subdirectory and
Meson will take care of all the busywork to make paths work.
Dirname can even be '.' to mark the current directory. Though you should
remember that the current source and build directories are always
put in the include directories by default so you only need to do
include_directories('.') if you intend to use the result in a
different subdirectory.
''')
absdir_src = os.path.join(absbase_src, a)
absdir_build = os.path.join(absbase_build, a)
if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build):
raise InvalidArguments('Include dir %s does not exist.' % a)
i = IncludeDirsHolder(build.IncludeDirs(self.subdir, incdir_strings, is_system))
return i
@permittedKwargs(permitted_kwargs['add_test_setup'])
@stringArgs
def func_add_test_setup(self, node, args, kwargs):
if len(args) != 1:
raise InterpreterException('Add_test_setup needs one argument for the setup name.')
setup_name = args[0]
if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None:
raise InterpreterException('Setup name may only contain alphanumeric characters.')
if ":" not in setup_name:
setup_name = (self.subproject if self.subproject else self.build.project_name) + ":" + setup_name
try:
inp = unholder(extract_as_list(kwargs, 'exe_wrapper'))
exe_wrapper = []
for i in inp:
if isinstance(i, str):
exe_wrapper.append(i)
elif isinstance(i, dependencies.ExternalProgram):
if not i.found():
raise InterpreterException('Tried to use non-found executable.')
exe_wrapper += i.get_command()
else:
raise InterpreterException('Exe wrapper can only contain strings or external binaries.')
except KeyError:
exe_wrapper = None
gdb = kwargs.get('gdb', False)
if not isinstance(gdb, bool):
raise InterpreterException('Gdb option must be a boolean')
timeout_multiplier = kwargs.get('timeout_multiplier', 1)
if not isinstance(timeout_multiplier, int):
raise InterpreterException('Timeout multiplier must be a number.')
is_default = kwargs.get('is_default', False)
if not isinstance(is_default, bool):
raise InterpreterException('is_default option must be a boolean')
if is_default:
if self.build.test_setup_default_name is not None:
raise InterpreterException('\'%s\' is already set as default. '
'is_default can be set to true only once' % self.build.test_setup_default_name)
self.build.test_setup_default_name = setup_name
env = self.unpack_env_kwarg(kwargs)
self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env)
@permittedKwargs(permitted_kwargs['add_global_arguments'])
@stringArgs
def func_add_global_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_global_arguments(node, self.build.global_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_global_link_arguments'])
@stringArgs
def func_add_global_link_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_global_arguments(node, self.build.global_link_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_project_arguments'])
@stringArgs
def func_add_project_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_project_arguments(node, self.build.projects_args[for_machine], args, kwargs)
@permittedKwargs(permitted_kwargs['add_project_link_arguments'])
@stringArgs
def func_add_project_link_arguments(self, node, args, kwargs):
for_machine = self.machine_from_native_kwarg(kwargs)
self.add_project_arguments(node, self.build.projects_link_args[for_machine], args, kwargs)
def warn_about_builtin_args(self, args):
warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra', '-Wpedantic')
optargs = ('-O0', '-O2', '-O3', '-Os', '/O1', '/O2', '/Os')
for arg in args:
if arg in warnargs:
mlog.warning('Consider using the built-in warning_level option instead of using "{}".'.format(arg),
location=self.current_node)
elif arg in optargs:
mlog.warning('Consider using the built-in optimization level instead of using "{}".'.format(arg),
location=self.current_node)
elif arg == '-g':
mlog.warning('Consider using the built-in debug option instead of using "{}".'.format(arg),
location=self.current_node)
elif arg == '-pipe':
mlog.warning("You don't need to add -pipe, Meson will use it automatically when it is available.",
location=self.current_node)
elif arg.startswith('-fsanitize'):
mlog.warning('Consider using the built-in option for sanitizers instead of using "{}".'.format(arg),
location=self.current_node)
elif arg.startswith('-std=') or arg.startswith('/std:'):
mlog.warning('Consider using the built-in option for language standard version instead of using "{}".'.format(arg),
location=self.current_node)
def add_global_arguments(self, node, argsdict, args, kwargs):
if self.is_subproject():
msg = 'Function \'{}\' cannot be used in subprojects because ' \
'there is no way to make that reliable.\nPlease only call ' \
'this if is_subproject() returns false. Alternatively, ' \
'define a variable that\ncontains your language-specific ' \
'arguments and add it to the appropriate *_args kwarg ' \
'in each target.'.format(node.func_name)
raise InvalidCode(msg)
frozen = self.project_args_frozen or self.global_args_frozen
self.add_arguments(node, argsdict, frozen, args, kwargs)
def add_project_arguments(self, node, argsdict, args, kwargs):
if self.subproject not in argsdict:
argsdict[self.subproject] = {}
self.add_arguments(node, argsdict[self.subproject],
self.project_args_frozen, args, kwargs)
def add_arguments(self, node, argsdict, args_frozen, args, kwargs):
if args_frozen:
msg = 'Tried to use \'{}\' after a build target has been declared.\n' \
'This is not permitted. Please declare all ' \
'arguments before your targets.'.format(node.func_name)
raise InvalidCode(msg)
if 'language' not in kwargs:
raise InvalidCode('Missing language definition in {}'.format(node.func_name))
self.warn_about_builtin_args(args)
for lang in mesonlib.stringlistify(kwargs['language']):
lang = lang.lower()
argsdict[lang] = argsdict.get(lang, []) + args
@noKwargs
@noArgsFlattening
def func_environment(self, node, args, kwargs):
if len(args) > 1:
raise InterpreterException('environment takes only one optional positional arguments')
elif len(args) == 1:
FeatureNew('environment positional arguments', '0.52.0').use(self.subproject)
initial_values = args[0]
if not isinstance(initial_values, dict) and not isinstance(initial_values, list):
raise InterpreterException('environment first argument must be a dictionary or a list')
else:
initial_values = {}
return EnvironmentVariablesHolder(initial_values)
@stringArgs
@noKwargs
def func_join_paths(self, node, args, kwargs):
return self.join_path_strings(args)
def run(self):
super().run()
mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets))))
FeatureNew.report(self.subproject)
FeatureDeprecated.report(self.subproject)
if not self.is_subproject():
self.print_extra_warnings()
if self.subproject == '':
self._print_summary()
def print_extra_warnings(self):
for c in self.coredata.compilers.host.values():
if c.get_id() == 'clang':
self.check_clang_asan_lundef()
break
def check_clang_asan_lundef(self):
if 'b_lundef' not in self.coredata.base_options:
return
if 'b_sanitize' not in self.coredata.base_options:
return
if (self.coredata.base_options['b_lundef'].value and
self.coredata.base_options['b_sanitize'].value != 'none'):
mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef.
This will probably not work.
Try setting b_lundef to false instead.'''.format(self.coredata.base_options['b_sanitize'].value),
location=self.current_node)
def evaluate_subproject_info(self, path_from_source_root, subproject_dirname):
depth = 0
subproj_name = ''
segs = PurePath(path_from_source_root).parts
segs_spd = PurePath(subproject_dirname).parts
while segs and segs[0] == segs_spd[0]:
if len(segs_spd) == 1:
subproj_name = segs[1]
segs = segs[2:]
depth += 1
else:
segs_spd = segs_spd[1:]
segs = segs[1:]
return (depth, subproj_name)
def validate_within_subproject(self, subdir, fname):
norm = os.path.normpath(os.path.join(subdir, fname))
if os.path.isabs(norm):
if not norm.startswith(self.environment.source_dir):
return
norm = os.path.relpath(norm, self.environment.source_dir)
assert(not os.path.isabs(norm))
(num_sps, sproj_name) = self.evaluate_subproject_info(norm, self.subproject_dir)
plain_filename = os.path.basename(norm)
if num_sps == 0:
if not self.is_subproject():
return
raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename)
if num_sps > 1:
raise InterpreterException('Sandbox violation: Tried to grab file %s from a nested subproject.' % plain_filename)
if sproj_name != self.subproject_directory_name:
raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename)
def source_strings_to_files(self, sources):
results = []
mesonlib.check_direntry_issues(sources)
if not isinstance(sources, list):
sources = [sources]
for s in sources:
if isinstance(s, (mesonlib.File, GeneratedListHolder,
TargetHolder, CustomTargetIndexHolder,
GeneratedObjectsHolder)):
pass
elif isinstance(s, str):
self.validate_within_subproject(self.subdir, s)
s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s)
else:
raise InterpreterException('Source item is {!r} instead of '
'string or File-type object'.format(s))
results.append(s)
return results
def add_target(self, name, tobj):
if name == '':
raise InterpreterException('Target name must not be empty.')
if name.strip() == '':
raise InterpreterException('Target name must not consist only of whitespace.')
if name.startswith('meson-'):
raise InvalidArguments("Target names starting with 'meson-' are reserved "
"for Meson's internal use. Please rename.")
if name in coredata.forbidden_target_names:
raise InvalidArguments("Target name '%s' is reserved for Meson's "
"internal use. Please rename." % name)
idname = tobj.get_id()
if idname in self.build.targets:
raise InvalidCode('Tried to create target "%s", but a target of that name already exists.' % name)
self.build.targets[idname] = tobj
if idname not in self.coredata.target_guids:
self.coredata.target_guids[idname] = str(uuid.uuid4()).upper()
@FeatureNew('both_libraries', '0.46.0')
def build_both_libraries(self, node, args, kwargs):
shared_holder = self.build_target(node, args, kwargs, SharedLibraryHolder)
pic = True
if 'pic' in kwargs:
pic = kwargs['pic']
elif 'b_staticpic' in self.environment.coredata.base_options:
pic = self.environment.coredata.base_options['b_staticpic'].value
if pic:
static_args = [args[0]]
static_kwargs = kwargs.copy()
static_kwargs['sources'] = []
static_kwargs['objects'] = shared_holder.held_object.extract_all_objects()
else:
static_args = args
static_kwargs = kwargs
static_holder = self.build_target(node, static_args, static_kwargs, StaticLibraryHolder)
return BothLibrariesHolder(shared_holder, static_holder, self)
def build_library(self, node, args, kwargs):
default_library = self.coredata.get_builtin_option('default_library', self.subproject)
if default_library == 'shared':
return self.build_target(node, args, kwargs, SharedLibraryHolder)
elif default_library == 'static':
return self.build_target(node, args, kwargs, StaticLibraryHolder)
elif default_library == 'both':
return self.build_both_libraries(node, args, kwargs)
else:
raise InterpreterException('Unknown default_library value: %s.', default_library)
def build_target(self, node, args, kwargs, targetholder):
@FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories'])
@FeatureNewKwargs('build target', '0.41.0', ['rust_args'])
@FeatureNewKwargs('build target', '0.40.0', ['build_by_default'])
@FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility'])
def build_target_decorator_caller(self, node, args, kwargs):
return True
build_target_decorator_caller(self, node, args, kwargs)
if not args:
raise InterpreterException('Target does not have a name.')
name, *sources = args
for_machine = self.machine_from_native_kwarg(kwargs)
if 'sources' in kwargs:
sources += listify(kwargs['sources'])
sources = self.source_strings_to_files(sources)
objs = extract_as_list(kwargs, 'objects')
kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies')
kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs)
if 'extra_files' in kwargs:
ef = extract_as_list(kwargs, 'extra_files')
kwargs['extra_files'] = self.source_strings_to_files(ef)
self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources)
if targetholder == ExecutableHolder:
targetclass = build.Executable
elif targetholder == SharedLibraryHolder:
targetclass = build.SharedLibrary
elif targetholder == SharedModuleHolder:
targetclass = build.SharedModule
elif targetholder == StaticLibraryHolder:
targetclass = build.StaticLibrary
elif targetholder == JarHolder:
targetclass = build.Jar
else:
mlog.debug('Unknown target type:', str(targetholder))
raise RuntimeError('Unreachable code')
self.kwarg_strings_to_includedirs(kwargs)
kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs}
kwargs['include_directories'] = self.extract_incdirs(kwargs)
target = targetclass(name, self.subdir, self.subproject, for_machine, sources, objs, self.environment, kwargs)
target.project_version = self.project_version
if not self.environment.machines.matches_build_machine(for_machine):
self.add_cross_stdlib_info(target)
l = targetholder(target, self)
self.add_target(name, l.held_object)
self.project_args_frozen = True
return l
def kwarg_strings_to_includedirs(self, kwargs):
if 'd_import_dirs' in kwargs:
items = mesonlib.extract_as_list(kwargs, 'd_import_dirs')
cleaned_items = []
for i in items:
if isinstance(i, str):
if os.path.normpath(i).startswith(self.environment.get_source_dir()):
mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead.
This will become a hard error in the future.''', location=self.current_node)
i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir))
i = self.build_incdir_object([i])
cleaned_items.append(i)
kwargs['d_import_dirs'] = cleaned_items
def get_used_languages(self, target):
result = {}
for i in target.sources:
for lang, c in self.coredata.compilers.host.items():
if c.can_compile(i):
result[lang] = True
break
return result
def add_cross_stdlib_info(self, target):
if target.for_machine != MachineChoice.HOST:
return
for l in self.get_used_languages(target):
props = self.environment.properties.host
if props.has_stdlib(l) \
and self.subproject != props.get_stdlib(l)[0]:
target.add_deps(self.build.stdlibs.host[l])
def check_sources_exist(self, subdir, sources):
for s in sources:
if not isinstance(s, str):
continue
fname = os.path.join(subdir, s)
if not os.path.isfile(fname):
raise InterpreterException('Tried to add non-existing source file %s.' % s)
def validate_extraction(self, buildtarget: InterpreterObject) -> None:
if not self.subdir.startswith(self.subproject_dir):
if buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from a subproject target.')
else:
if not buildtarget.subdir.startswith(self.subproject_dir):
raise InterpreterException('Tried to extract objects from the main project from a subproject.')
if self.subdir.split('/')[1] != buildtarget.subdir.split('/')[1]:
raise InterpreterException('Tried to extract objects from a different subproject.')
def is_subproject(self):
return self.subproject != ''
@noKwargs
@noArgsFlattening
def func_set_variable(self, node, args, kwargs):
if len(args) != 2:
raise InvalidCode('Set_variable takes two arguments.')
varname, value = args
self.set_variable(varname, value)
@noKwargs
@noArgsFlattening
def func_get_variable(self, node, args, kwargs):
if len(args) < 1 or len(args) > 2:
raise InvalidCode('Get_variable takes one or two arguments.')
varname = args[0]
if isinstance(varname, Disabler):
return varname
if not isinstance(varname, str):
raise InterpreterException('First argument must be a string.')
try:
return self.variables[varname]
except KeyError:
pass
if len(args) == 2:
return args[1]
raise InterpreterException('Tried to get unknown variable "%s".' % varname)
@stringArgs
@noKwargs
def func_is_variable(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Is_variable takes two arguments.')
varname = args[0]
return varname in self.variables
@staticmethod
def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice:
native = kwargs.get('native', False)
if not isinstance(native, bool):
raise InvalidArguments('Argument to "native" must be a boolean.')
return MachineChoice.BUILD if native else MachineChoice.HOST
@FeatureNew('is_disabler', '0.52.0')
@noKwargs
def func_is_disabler(self, node, args, kwargs):
if len(args) != 1:
raise InvalidCode('Is_disabler takes one argument.')
varname = args[0]
return isinstance(varname, Disabler)
| true | true |
7901e5ad83ca521c9c99a63ba0f973da0c734978 | 2,072 | py | Python | bumblebee/connections/urls.py | sthasam2/bumblebee-backend | 22057399f34cdc1edb0ef04e622c97df46532de3 | [
"Linux-OpenIB"
] | null | null | null | bumblebee/connections/urls.py | sthasam2/bumblebee-backend | 22057399f34cdc1edb0ef04e622c97df46532de3 | [
"Linux-OpenIB"
] | null | null | null | bumblebee/connections/urls.py | sthasam2/bumblebee-backend | 22057399f34cdc1edb0ef04e622c97df46532de3 | [
"Linux-OpenIB"
] | null | null | null | """
comment-crud
comment-list
comment-detail
"""
from django.urls import path
from bumblebee.connections.api.views.connection_views import (
AcceptFollowRequestView,
BlockUnblockView,
DeleteFollowerView,
DeleteFollowRequestView,
FollowUnfollowRequestUnrequestView,
MuteUnmuteView,
RetrieveBlockedIDListView,
RetrieveConnectionListView,
RetrieveFollowerListView,
RetrieveFollowingListView,
RetrieveMutedIDListView,
RetrieveUserConnectionListView,
)
urlpatterns = [
# retrieve
path(
"list",
RetrieveConnectionListView.as_view(),
name="connection-list",
),
path(
"user/username=<str:username>/list",
RetrieveUserConnectionListView.as_view(),
name="user-connection-list",
),
path(
"user/username=<str:username>/follower/detail",
RetrieveFollowerListView.as_view(),
name="follower-detail",
),
path(
"user/username=<str:username>/following/detail",
RetrieveFollowingListView.as_view(),
name="following-detail",
),
path(
"user/muted/detail",
RetrieveMutedIDListView.as_view(),
name="muted-detail",
),
path(
"user/blocked/detail",
RetrieveBlockedIDListView.as_view(),
name="blocked-detail",
),
# create
path(
"user/follower_request/accept",
AcceptFollowRequestView.as_view(),
name="accept-follow",
),
path(
"user/follow",
FollowUnfollowRequestUnrequestView.as_view(),
name="follow-user",
),
path(
"user/block",
BlockUnblockView.as_view(),
name="block-user",
),
path(
"user/mute",
MuteUnmuteView.as_view(),
name="mute-user",
),
# delete
path(
"user/follower_request/delete",
DeleteFollowRequestView.as_view(),
name="delete-follower-request",
),
path(
"user/follower/delete",
DeleteFollowerView.as_view(),
name="delete-follower",
),
]
| 23.280899 | 62 | 0.617761 |
from django.urls import path
from bumblebee.connections.api.views.connection_views import (
AcceptFollowRequestView,
BlockUnblockView,
DeleteFollowerView,
DeleteFollowRequestView,
FollowUnfollowRequestUnrequestView,
MuteUnmuteView,
RetrieveBlockedIDListView,
RetrieveConnectionListView,
RetrieveFollowerListView,
RetrieveFollowingListView,
RetrieveMutedIDListView,
RetrieveUserConnectionListView,
)
urlpatterns = [
path(
"list",
RetrieveConnectionListView.as_view(),
name="connection-list",
),
path(
"user/username=<str:username>/list",
RetrieveUserConnectionListView.as_view(),
name="user-connection-list",
),
path(
"user/username=<str:username>/follower/detail",
RetrieveFollowerListView.as_view(),
name="follower-detail",
),
path(
"user/username=<str:username>/following/detail",
RetrieveFollowingListView.as_view(),
name="following-detail",
),
path(
"user/muted/detail",
RetrieveMutedIDListView.as_view(),
name="muted-detail",
),
path(
"user/blocked/detail",
RetrieveBlockedIDListView.as_view(),
name="blocked-detail",
),
path(
"user/follower_request/accept",
AcceptFollowRequestView.as_view(),
name="accept-follow",
),
path(
"user/follow",
FollowUnfollowRequestUnrequestView.as_view(),
name="follow-user",
),
path(
"user/block",
BlockUnblockView.as_view(),
name="block-user",
),
path(
"user/mute",
MuteUnmuteView.as_view(),
name="mute-user",
),
path(
"user/follower_request/delete",
DeleteFollowRequestView.as_view(),
name="delete-follower-request",
),
path(
"user/follower/delete",
DeleteFollowerView.as_view(),
name="delete-follower",
),
]
| true | true |
7901e623cc885f2b3b8f8a9feec81785c4fda428 | 3,975 | py | Python | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | stormi/tsunami | bdc853229834b52b2ee8ed54a3161a1a3133d926 | [
"BSD-3-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la volonté TenirGouvernail"""
import re
from secondaires.navigation.equipage.ordres.tenir_gouvernail import \
TenirGouvernail as OrdreTenirGouvernail
from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer
from secondaires.navigation.equipage.volonte import Volonte
class TenirGouvernail(Volonte):
"""Classe représentant une volonté.
Cette volonté choisit un matelot pour tenir le gouvernail
du navire.
"""
cle = "tenir_gouvernail"
ordre_court = re.compile(r"^tg$", re.I)
ordre_long = re.compile(r"^tenir\s+gouvernail?$", re.I)
def choisir_matelots(self, exception=None):
"""Retourne le matelot le plus apte à accomplir la volonté."""
proches = []
matelots = self.navire.equipage.get_matelots_libres(exception)
graph = self.navire.graph
gouvernail = self.navire.gouvernail
if gouvernail is None or gouvernail.tenu is not None:
return None
for matelot in matelots:
origine = matelot.salle.mnemonic
destination = gouvernail.parent.mnemonic
if origine == destination:
proches.append((matelot, [], gouvernail))
else:
chemin = graph.get((origine, destination))
if chemin:
proches.append((matelot, chemin, gouvernail))
proches = sorted([couple for couple in proches],
key=lambda couple: len(couple[1]))
if proches:
return proches[0]
return None
def executer(self, sequence):
"""Exécute la volonté."""
if sequence is None:
self.terminer()
return
matelot, sorties, gouvernail = sequence
navire = self.navire
ordres = []
if sorties:
aller = LongDeplacer(matelot, navire, *sorties)
ordres.append(aller)
tenir = OrdreTenirGouvernail(matelot, navire)
ordres.append(tenir)
self.ajouter_ordres(matelot, ordres)
def crier_ordres(self, personnage):
"""On fait crier l'ordre au personnage."""
msg = "{} s'écrie : un homme à la barre !".format(
personnage.distinction_audible)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire):
"""Extrait les arguments de la volonté."""
return ()
| 37.857143 | 79 | 0.686289 |
import re
from secondaires.navigation.equipage.ordres.tenir_gouvernail import \
TenirGouvernail as OrdreTenirGouvernail
from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer
from secondaires.navigation.equipage.volonte import Volonte
class TenirGouvernail(Volonte):
cle = "tenir_gouvernail"
ordre_court = re.compile(r"^tg$", re.I)
ordre_long = re.compile(r"^tenir\s+gouvernail?$", re.I)
def choisir_matelots(self, exception=None):
proches = []
matelots = self.navire.equipage.get_matelots_libres(exception)
graph = self.navire.graph
gouvernail = self.navire.gouvernail
if gouvernail is None or gouvernail.tenu is not None:
return None
for matelot in matelots:
origine = matelot.salle.mnemonic
destination = gouvernail.parent.mnemonic
if origine == destination:
proches.append((matelot, [], gouvernail))
else:
chemin = graph.get((origine, destination))
if chemin:
proches.append((matelot, chemin, gouvernail))
proches = sorted([couple for couple in proches],
key=lambda couple: len(couple[1]))
if proches:
return proches[0]
return None
def executer(self, sequence):
if sequence is None:
self.terminer()
return
matelot, sorties, gouvernail = sequence
navire = self.navire
ordres = []
if sorties:
aller = LongDeplacer(matelot, navire, *sorties)
ordres.append(aller)
tenir = OrdreTenirGouvernail(matelot, navire)
ordres.append(tenir)
self.ajouter_ordres(matelot, ordres)
def crier_ordres(self, personnage):
msg = "{} s'écrie : un homme à la barre !".format(
personnage.distinction_audible)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire):
return ()
| true | true |
7901e656363d5b25cd254636df77374db1493797 | 1,826 | py | Python | Aula18/rev3.py | marcelabbc07/TrabalhosPython | 91734d13110e4dee12a532dfd7091e36394a6449 | [
"MIT"
] | null | null | null | Aula18/rev3.py | marcelabbc07/TrabalhosPython | 91734d13110e4dee12a532dfd7091e36394a6449 | [
"MIT"
] | null | null | null | Aula18/rev3.py | marcelabbc07/TrabalhosPython | 91734d13110e4dee12a532dfd7091e36394a6449 | [
"MIT"
] | null | null | null | # A lista a seguir possui mais uma lista interna, a lista de preços.
# A lista de preços possui 3 sublistas dentro dela com os preços dos produtos.
# para exemplificar, o preço do mamão é de 10.00 - alface crespa é de 2.99 e o feijão 9.0
# Será solicitado o preço de alguns produtos. para imprimir deve ser por f-string refrenciando o nome com o preço
# da seguinte forma: "O preço do {} é R$ {}"
# print('1: imprima o valor do abacaxi')
# print('2: imprima o valor da rucula')
# print('3: imprima o valor da laranja')
# print('4: imprima o valor do repolho')
# print('5: imprima o valor do feijão')
# print('6: imprima o valor do feijão branco')
# print('7: imprima o valor da vergamota')
# print('8: imprima o valor da alface lisa')
# print('9: imprima o valor do mamão')
# print('10: imprima o valor da soja')
# print('11: imprima o valor da lentilha')
# print('12: imprima o valor da uva')
# print('13: imprima o valor da vagem')
# print('14: imprima o valor do almeirão')
# print('15: imprima o valor da ervilha')
# print('16: imprima o valor da maçã')
lista = [['frutas','verduras','legumes','preço'],
['mamão','abacaxi','laranja','uva','pera','maçã','vergamota'],
['alface crespa', 'alface lisa','rucula','almerão','repolho','salsinha',],
['feijão', 'erviha', 'lentilha','vagem','feijão branco','gão de bico','soja'],
[ [10.00, 2.56, 5.25, 9.5, 10.05, 15, 5.75], [2.99, 2.95, 3.5, 3.25, 5.89, 2.9, 2.5],
[9.0, 5.0, 7.5, 1.75, 10.9, 5.99, 3.55]
]
]
print(lista[4][1])
print(lista[5][2])
print(lista[4][2])
print(lista[5][4])
print(lista[6][0])
print(lista[6][4])
print(lista[4][-1])
print(lista[5][1])
print(lista[4][0])
print(lista[6][-1])
print(lista[6][2])
print(lista[4][3])
print(lista[6][3])
print(lista[5][3])
print(lista[6][1])
print(lista[4][5]) | 40.577778 | 114 | 0.637459 |
lista = [['frutas','verduras','legumes','preço'],
['mamão','abacaxi','laranja','uva','pera','maçã','vergamota'],
['alface crespa', 'alface lisa','rucula','almerão','repolho','salsinha',],
['feijão', 'erviha', 'lentilha','vagem','feijão branco','gão de bico','soja'],
[ [10.00, 2.56, 5.25, 9.5, 10.05, 15, 5.75], [2.99, 2.95, 3.5, 3.25, 5.89, 2.9, 2.5],
[9.0, 5.0, 7.5, 1.75, 10.9, 5.99, 3.55]
]
]
print(lista[4][1])
print(lista[5][2])
print(lista[4][2])
print(lista[5][4])
print(lista[6][0])
print(lista[6][4])
print(lista[4][-1])
print(lista[5][1])
print(lista[4][0])
print(lista[6][-1])
print(lista[6][2])
print(lista[4][3])
print(lista[6][3])
print(lista[5][3])
print(lista[6][1])
print(lista[4][5]) | true | true |
7901e6ac29a993642ab10b26b7f8d1f1b51240a1 | 263 | py | Python | trader/joins/admin.py | volkandkaya/trader | e5013e97a29528e7c0280eeac632a65e4fe0a8af | [
"MIT"
] | null | null | null | trader/joins/admin.py | volkandkaya/trader | e5013e97a29528e7c0280eeac632a65e4fe0a8af | [
"MIT"
] | null | null | null | trader/joins/admin.py | volkandkaya/trader | e5013e97a29528e7c0280eeac632a65e4fe0a8af | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Join
class JoinAdmin(admin.ModelAdmin):
list_display = ['email', 'friend', 'timestamp', 'updated']
class Meta:
model = Join
admin.site.register(Join, JoinAdmin)
| 20.230769 | 62 | 0.707224 | from django.contrib import admin
from .models import Join
class JoinAdmin(admin.ModelAdmin):
list_display = ['email', 'friend', 'timestamp', 'updated']
class Meta:
model = Join
admin.site.register(Join, JoinAdmin)
| true | true |
7901e761d9f518c04e6a5296199f880c8a1b263f | 66,327 | py | Python | numba/core/pythonapi.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | numba/core/pythonapi.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | numba/core/pythonapi.py | blair1306/numba | 3b9647d17d653abac15363da604eeb804dbdd15a | [
"BSD-2-Clause"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
# Registries of boxing / unboxing implementations
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
"""
The facilities required by boxing implementations.
"""
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
"""
The facilities required by unboxing implementations.
"""
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
"""
The facilities required by reflection implementations.
"""
__slots__ = ()
# XXX the error bit is currently unused by consumers (e.g. PyCallWrapper)
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
"""
Encapsulate the result of converting a Python object to a native value,
recording whether the conversion was successful and how to cleanup.
"""
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
"""
Add a constant to the environment, return its index.
"""
# All constants are frozen inside the environment
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
"""
Look up constant number *index* inside the environment body.
A borrowed reference is returned.
The returned LLVM value may have NULL value at runtime which indicates
an error at runtime.
"""
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
# This can happen when the Environment is accidentally released
# and has subsequently been garbage collected.
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
"""
Code generation facilities to call into the CPython C API (and related
helpers).
"""
def __init__(self, context, builder):
"""
Note: Maybe called multiple times when lowering a function
"""
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
# A unique mapping of serialized objects in this module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
# Initialize types
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
"""Emits LLVM code to ensure the `envptr` is not NULL
"""
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
# ------ Python API -----
#
# Basic object API
#
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
#
# Argument unpacking
#
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
#
# Exception and errors
#
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
"""
Raise an arbitrary exception (type or value or (type, args)
or None - if reraising). A reference to the argument is consumed.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
"""
Temporarily push the current error indicator while the code
block is executed. If *keep_new* is True and the code block
raises a new error, the new error is kept, otherwise the old
error indicator is restored at the end of the block.
"""
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
# Code block raised an error, keep it
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
# Restore previous error
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
"""
Get a Python object through its C-accessible *name*
(e.g. "PyExc_ValueError"). The underlying variable must be
a `PyObject *`, and the value of that pointer is returned.
"""
# A LLVM global variable is implicitly a pointer to the declared
# type, so fix up by using pyobj.pointee.
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
#
# Concrete dict API
#
def dict_getitem_string(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
"""Lookup name inside dict
Returns a borrowed reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
"""
Args
-----
keyvalues: iterable of (str, llvm.Value of PyObject*)
"""
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
#
# Concrete number APIs
#
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
# We don't want any clipping, so pass OverflowError as the 2nd arg
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
"""
Convert the given Python integer to a void*. This is recommended
over number_as_ssize_t as it isn't affected by signedness.
"""
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
"""
Return a Python integer from any native integer value.
"""
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
"""
Same as long_from_signed_int, but for unsigned values.
"""
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
"""
Get a Python bool from a LLVM boolean.
"""
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
"""
Read the members of a slice of integers.
Returns a (ok, start, stop, step) tuple where ok is a boolean and
the following members are pointer-sized ints.
"""
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
"""
Warning: Steals reference to ``val``
"""
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
"""
Returns a borrowed reference.
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
"""
Borrow reference
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
"""
Steals a reference to `item`.
"""
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
"""
Ensure the GIL is acquired.
The returned value must be consumed by gil_release().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
"""
Release the acquired GIL by gil_ensure().
Must be paired with a gil_ensure().
"""
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
"""
Release the GIL and return the former thread state
(an opaque non-NULL pointer).
"""
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
"""
Restore the given thread state by reacquiring the GIL.
"""
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
"""
Refer to Python source Include/object.h for macros definition
of the opid.
"""
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
"""
Return obj[key]
"""
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
"""
obj[key] = val
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
"""
del obj[key]
"""
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
"""
Returns a tuple of ``(ok, buffer, length, kind)``.
The ``ok`` is i1 value that is set if ok.
The ``buffer`` is a i8* of the output buffer.
The ``length`` is a i32/i64 (py_ssize_t) of the length of the buffer.
The ``kind`` is a i32 (int32) of the Unicode kind constant
The ``hash`` is a long/uint64_t (py_hash_t) of the Unicode constant hash
"""
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
"""
Dump a Python object on C stderr. For debugging purposes.
"""
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
"""
Allocate a new MemInfo with data payload borrowed from a python
object.
"""
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
"""
Return a pointer to a stack-allocated, zero-initialized Py_buffer.
"""
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
"""
Unserialize some data. *structptr* should be a pointer to
a {i8* data, i32 length} structure.
"""
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
"""
Same as serialize_object(), but don't create a global variable,
simply return a literal {i8* data, i32 length, i8* hashbuf} structure.
"""
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
"""
Serialize the given object in the bitcode, and return it
as a pointer to a {i8* data, i32 length}, structure constant
(suitable for passing to unserialize()).
"""
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
"""
Unbox the Python object as the given Numba type.
A NativeValue instance is returned.
"""
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
"""
Box the native value of the given Numba type. A Python object
pointer is returned (NULL if an error occurred).
This method steals any native (NRT) reference embedded in *val*.
"""
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
"""
Reflect the native value onto its Python original, if any.
An error bit (as an LLVM value) is returned.
"""
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
"""
Extract the generator structure pointer from a generator *obj*
(a _dynfunc.Generator instance).
"""
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
"""
Make a Numba generator (a _dynfunc.Generator instance) from a
generator structure pointer *val*.
*env* is an optional _dynfunc.Environment instance to be wrapped
in the generator.
"""
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
# This is the PyCFunctionWithKeywords generated by PyCallWrapper
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
# This is the raw finalizer generated by _lower_generator_finalize_func()
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
# PyObject *numba_make_generator(state_size, initial_state, nextfunc, finalizer, env)
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
"""Calls into Numba jitted code and propagate error using the Python
calling convention.
Parameters
----------
func : function
The Python function to be compiled. This function is compiled
in nopython-mode.
sig : numba.typing.Signature
The function signature for *func*.
args : Sequence[llvmlite.binding.Value]
LLVM values to use as arguments.
Returns
-------
(is_error, res) : 2-tuple of llvmlite.binding.Value.
is_error : true iff *func* raised an exception.
res : Returned value from *func* iff *is_error* is false.
If *is_error* is true, this method will adapt the nopython exception
into a Python exception. Caller should return NULL to Python to
indicate an error.
"""
# Compile *func*
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
# This error indicates an error in *func* or the caller of this
# method.
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
# Call into *func*
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
# Post-call handling for *func*
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
# Handle error and adapt the nopython exception into cpython exception
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
# Set error state in the Python interpreter
self.context.call_conv.raise_error(builder, self, status)
with no_err:
# Handle returned value
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
"""Internal utils for calling objmode dispatcher from within NPM code.
"""
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
# Add a global variable to cache the objmode dispatcher
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
# Clean up
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
# Incref the dispatcher and cache it
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
| 40.028365 | 93 | 0.614591 | from collections import namedtuple
import contextlib
import pickle
import hashlib
from llvmlite import ir
from llvmlite.llvmpy.core import Type, Constant
import llvmlite.llvmpy.core as lc
import ctypes
from numba import _helperlib
from numba.core import (
types, utils, config, lowering, cgutils, imputils, serialize,
)
PY_UNICODE_1BYTE_KIND = _helperlib.py_unicode_1byte_kind
PY_UNICODE_2BYTE_KIND = _helperlib.py_unicode_2byte_kind
PY_UNICODE_4BYTE_KIND = _helperlib.py_unicode_4byte_kind
PY_UNICODE_WCHAR_KIND = _helperlib.py_unicode_wchar_kind
class _Registry(object):
def __init__(self):
self.functions = {}
def register(self, typeclass):
assert issubclass(typeclass, types.Type)
def decorator(func):
if typeclass in self.functions:
raise KeyError("duplicate registration for %s" % (typeclass,))
self.functions[typeclass] = func
return func
return decorator
def lookup(self, typeclass, default=None):
assert issubclass(typeclass, types.Type)
for cls in typeclass.__mro__:
func = self.functions.get(cls)
if func is not None:
return func
return default
_boxers = _Registry()
_unboxers = _Registry()
_reflectors = _Registry()
box = _boxers.register
unbox = _unboxers.register
reflect = _reflectors.register
class _BoxContext(namedtuple("_BoxContext",
("context", "builder", "pyapi", "env_manager"))):
__slots__ = ()
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
class _UnboxContext(namedtuple("_UnboxContext",
("context", "builder", "pyapi"))):
__slots__ = ()
def unbox(self, typ, obj):
return self.pyapi.to_native_value(typ, obj)
class _ReflectContext(namedtuple("_ReflectContext",
("context", "builder", "pyapi", "env_manager",
"is_error"))):
__slots__ = ()
def set_error(self):
self.builder.store(self.is_error, cgutils.true_bit)
def box(self, typ, val):
return self.pyapi.from_native_value(typ, val, self.env_manager)
def reflect(self, typ, val):
return self.pyapi.reflect_native_value(typ, val, self.env_manager)
class NativeValue(object):
def __init__(self, value, is_error=None, cleanup=None):
self.value = value
self.is_error = is_error if is_error is not None else cgutils.false_bit
self.cleanup = cleanup
class EnvironmentManager(object):
def __init__(self, pyapi, env, env_body, env_ptr):
assert isinstance(env, lowering.Environment)
self.pyapi = pyapi
self.env = env
self.env_body = env_body
self.env_ptr = env_ptr
def add_const(self, const):
if isinstance(const, str):
const = utils.intern(const)
for index, val in enumerate(self.env.consts):
if val is const:
break
else:
index = len(self.env.consts)
self.env.consts.append(const)
return index
def read_const(self, index):
assert index < len(self.env.consts)
builder = self.pyapi.builder
consts = self.env_body.consts
ret = cgutils.alloca_once(builder, self.pyapi.pyobj, zfill=True)
with builder.if_else(cgutils.is_not_null(builder, consts)) as \
(br_not_null, br_null):
with br_not_null:
getitem = self.pyapi.list_getitem(consts, index)
builder.store(getitem, ret)
with br_null:
self.pyapi.err_set_string(
"PyExc_RuntimeError",
"`env.consts` is NULL in `read_const`",
)
return builder.load(ret)
_IteratorLoop = namedtuple('_IteratorLoop', ('value', 'do_break'))
class PythonAPI(object):
def __init__(self, context, builder):
from numba.core import boxing
self.context = context
self.builder = builder
self.module = builder.basic_block.function.module
try:
self.module.__serialized
except AttributeError:
self.module.__serialized = {}
self.pyobj = self.context.get_argument_type(types.pyobject)
self.pyobjptr = self.pyobj.as_pointer()
self.voidptr = Type.pointer(Type.int(8))
self.long = Type.int(ctypes.sizeof(ctypes.c_long) * 8)
self.ulong = self.long
self.longlong = Type.int(ctypes.sizeof(ctypes.c_ulonglong) * 8)
self.ulonglong = self.longlong
self.double = Type.double()
self.py_ssize_t = self.context.get_value_type(types.intp)
self.cstring = Type.pointer(Type.int(8))
self.gil_state = Type.int(_helperlib.py_gil_state_size * 8)
self.py_buffer_t = ir.ArrayType(ir.IntType(8), _helperlib.py_buffer_size)
self.py_hash_t = self.py_ssize_t
self.py_unicode_1byte_kind = _helperlib.py_unicode_1byte_kind
self.py_unicode_2byte_kind = _helperlib.py_unicode_2byte_kind
self.py_unicode_4byte_kind = _helperlib.py_unicode_4byte_kind
self.py_unicode_wchar_kind = _helperlib.py_unicode_wchar_kind
def get_env_manager(self, env, env_body, env_ptr):
return EnvironmentManager(self, env, env_body, env_ptr)
def emit_environment_sentry(self, envptr, return_pyobject=False,
debug_msg=''):
is_null = cgutils.is_null(self.builder, envptr)
with cgutils.if_unlikely(self.builder, is_null):
if return_pyobject:
fnty = self.builder.function.type.pointee
assert fnty.return_type == self.pyobj
self.err_set_string(
"PyExc_RuntimeError", f"missing Environment: {debug_msg}",
)
self.builder.ret(self.get_null_object())
else:
self.context.call_conv.return_user_exc(
self.builder, RuntimeError,
(f"missing Environment: {debug_msg}",),
)
def incref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_IncRef")
self.builder.call(fn, [obj])
def decref(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="Py_DecRef")
self.builder.call(fn, [obj])
def get_type(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="numba_py_type")
return self.builder.call(fn, [obj])
def parse_tuple_and_keywords(self, args, kws, fmt, keywords, *objs):
charptr = Type.pointer(Type.int(8))
charptrary = Type.pointer(charptr)
argtypes = [self.pyobj, self.pyobj, charptr, charptrary]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTupleAndKeywords")
return self.builder.call(fn, [args, kws, fmt, keywords] + list(objs))
def parse_tuple(self, args, fmt, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_ParseTuple")
return self.builder.call(fn, [args, fmt] + list(objs))
def unpack_tuple(self, args, name, n_min, n_max, *objs):
charptr = Type.pointer(Type.int(8))
argtypes = [self.pyobj, charptr, self.py_ssize_t, self.py_ssize_t]
fnty = Type.function(Type.int(), argtypes, var_arg=True)
fn = self._get_function(fnty, name="PyArg_UnpackTuple")
n_min = Constant.int(self.py_ssize_t, n_min)
n_max = Constant.int(self.py_ssize_t, n_max)
if isinstance(name, str):
name = self.context.insert_const_string(self.builder.module, name)
return self.builder.call(fn, [args, name, n_min, n_max] + list(objs))
def err_occurred(self):
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyErr_Occurred")
return self.builder.call(fn, ())
def err_clear(self):
fnty = Type.function(Type.void(), ())
fn = self._get_function(fnty, name="PyErr_Clear")
return self.builder.call(fn, ())
def err_set_string(self, exctype, msg):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyErr_SetString")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg))
def err_format(self, exctype, msg, *format_args):
fnty = Type.function(Type.void(), [self.pyobj, self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PyErr_Format")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
if isinstance(msg, str):
msg = self.context.insert_const_string(self.module, msg)
return self.builder.call(fn, (exctype, msg) + tuple(format_args))
def raise_object(self, exc=None):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_do_raise")
if exc is None:
exc = self.make_none()
return self.builder.call(fn, (exc,))
def err_set_object(self, exctype, excval):
fnty = Type.function(Type.void(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetObject")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype, excval))
def err_set_none(self, exctype):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_SetNone")
if isinstance(exctype, str):
exctype = self.get_c_object(exctype)
return self.builder.call(fn, (exctype,))
def err_write_unraisable(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="PyErr_WriteUnraisable")
return self.builder.call(fn, (obj,))
def err_fetch(self, pty, pval, ptb):
fnty = Type.function(Type.void(), [self.pyobjptr] * 3)
fn = self._get_function(fnty, name="PyErr_Fetch")
return self.builder.call(fn, (pty, pval, ptb))
def err_restore(self, ty, val, tb):
fnty = Type.function(Type.void(), [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyErr_Restore")
return self.builder.call(fn, (ty, val, tb))
@contextlib.contextmanager
def err_push(self, keep_new=False):
pty, pval, ptb = [cgutils.alloca_once(self.builder, self.pyobj)
for i in range(3)]
self.err_fetch(pty, pval, ptb)
yield
ty = self.builder.load(pty)
val = self.builder.load(pval)
tb = self.builder.load(ptb)
if keep_new:
new_error = cgutils.is_not_null(self.builder, self.err_occurred())
with self.builder.if_else(new_error, likely=False) as (if_error, if_ok):
with if_error:
self.decref(ty)
self.decref(val)
self.decref(tb)
with if_ok:
self.err_restore(ty, val, tb)
else:
self.err_restore(ty, val, tb)
def get_c_object(self, name):
return self.context.get_c_value(self.builder, self.pyobj.pointee, name,
dllimport=True)
def raise_missing_global_error(self, name):
msg = "global name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def raise_missing_name_error(self, name):
msg = "name '%s' is not defined" % name
cstr = self.context.insert_const_string(self.module, msg)
self.err_set_string("PyExc_NameError", cstr)
def fatal_error(self, msg):
fnty = Type.function(Type.void(), [self.cstring])
fn = self._get_function(fnty, name="Py_FatalError")
fn.attributes.add("noreturn")
cstr = self.context.insert_const_string(self.module, msg)
self.builder.call(fn, (cstr,))
def dict_getitem_string(self, dic, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyDict_GetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, [dic, cstr])
def dict_getitem(self, dic, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyDict_GetItem")
return self.builder.call(fn, [dic, name])
def dict_new(self, presize=0):
if presize == 0:
fnty = Type.function(self.pyobj, ())
fn = self._get_function(fnty, name="PyDict_New")
return self.builder.call(fn, ())
else:
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="_PyDict_NewPresized")
return self.builder.call(fn,
[Constant.int(self.py_ssize_t, presize)])
def dict_setitem(self, dictobj, nameobj, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.pyobj,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItem")
return self.builder.call(fn, (dictobj, nameobj, valobj))
def dict_setitem_string(self, dictobj, name, valobj):
fnty = Type.function(Type.int(), (self.pyobj, self.cstring,
self.pyobj))
fn = self._get_function(fnty, name="PyDict_SetItemString")
cstr = self.context.insert_const_string(self.module, name)
return self.builder.call(fn, (dictobj, cstr, valobj))
def dict_pack(self, keyvalues):
dictobj = self.dict_new()
with self.if_object_ok(dictobj):
for k, v in keyvalues:
self.dict_setitem_string(dictobj, k, v)
return dictobj
def float_from_double(self, fval):
fnty = Type.function(self.pyobj, [self.double])
fn = self._get_function(fnty, name="PyFloat_FromDouble")
return self.builder.call(fn, [fval])
def number_as_ssize_t(self, numobj):
fnty = Type.function(self.py_ssize_t, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_AsSsize_t")
exc_class = self.get_c_object("PyExc_OverflowError")
return self.builder.call(fn, [numobj, exc_class])
def number_long(self, numobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Long")
return self.builder.call(fn, [numobj])
def long_as_ulonglong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsUnsignedLongLong")
return self.builder.call(fn, [numobj])
def long_as_longlong(self, numobj):
fnty = Type.function(self.ulonglong, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsLongLong")
return self.builder.call(fn, [numobj])
def long_as_voidptr(self, numobj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="PyLong_AsVoidPtr")
return self.builder.call(fn, [numobj])
def _long_from_native_int(self, ival, func_name, native_int_type,
signed):
fnty = Type.function(self.pyobj, [native_int_type])
fn = self._get_function(fnty, name=func_name)
resptr = cgutils.alloca_once(self.builder, self.pyobj)
fn = self._get_function(fnty, name=func_name)
self.builder.store(self.builder.call(fn, [ival]), resptr)
return self.builder.load(resptr)
def long_from_long(self, ival):
func_name = "PyLong_FromLong"
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name=func_name)
return self.builder.call(fn, [ival])
def long_from_ulong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLong",
self.long, signed=False)
def long_from_ssize_t(self, ival):
return self._long_from_native_int(ival, "PyLong_FromSsize_t",
self.py_ssize_t, signed=True)
def long_from_longlong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromLongLong",
self.longlong, signed=True)
def long_from_ulonglong(self, ival):
return self._long_from_native_int(ival, "PyLong_FromUnsignedLongLong",
self.ulonglong, signed=False)
def long_from_signed_int(self, ival):
bits = ival.type.width
if bits <= self.long.width:
return self.long_from_long(self.builder.sext(ival, self.long))
elif bits <= self.longlong.width:
return self.long_from_longlong(self.builder.sext(ival, self.longlong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def long_from_unsigned_int(self, ival):
bits = ival.type.width
if bits <= self.ulong.width:
return self.long_from_ulong(self.builder.zext(ival, self.ulong))
elif bits <= self.ulonglong.width:
return self.long_from_ulonglong(self.builder.zext(ival, self.ulonglong))
else:
raise OverflowError("integer too big (%d bits)" % (bits))
def _get_number_operator(self, name):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyNumber_%s" % name)
return fn
def _call_number_operator(self, name, lhs, rhs, inplace=False):
if inplace:
name = "InPlace" + name
fn = self._get_number_operator(name)
return self.builder.call(fn, [lhs, rhs])
def number_add(self, lhs, rhs, inplace=False):
return self._call_number_operator("Add", lhs, rhs, inplace=inplace)
def number_subtract(self, lhs, rhs, inplace=False):
return self._call_number_operator("Subtract", lhs, rhs, inplace=inplace)
def number_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("Multiply", lhs, rhs, inplace=inplace)
def number_truedivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("TrueDivide", lhs, rhs, inplace=inplace)
def number_floordivide(self, lhs, rhs, inplace=False):
return self._call_number_operator("FloorDivide", lhs, rhs, inplace=inplace)
def number_remainder(self, lhs, rhs, inplace=False):
return self._call_number_operator("Remainder", lhs, rhs, inplace=inplace)
def number_matrix_multiply(self, lhs, rhs, inplace=False):
return self._call_number_operator("MatrixMultiply", lhs, rhs, inplace=inplace)
def number_lshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Lshift", lhs, rhs, inplace=inplace)
def number_rshift(self, lhs, rhs, inplace=False):
return self._call_number_operator("Rshift", lhs, rhs, inplace=inplace)
def number_and(self, lhs, rhs, inplace=False):
return self._call_number_operator("And", lhs, rhs, inplace=inplace)
def number_or(self, lhs, rhs, inplace=False):
return self._call_number_operator("Or", lhs, rhs, inplace=inplace)
def number_xor(self, lhs, rhs, inplace=False):
return self._call_number_operator("Xor", lhs, rhs, inplace=inplace)
def number_power(self, lhs, rhs, inplace=False):
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fname = "PyNumber_InPlacePower" if inplace else "PyNumber_Power"
fn = self._get_function(fnty, fname)
return self.builder.call(fn, [lhs, rhs, self.borrow_none()])
def number_negative(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Negative")
return self.builder.call(fn, (obj,))
def number_positive(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Positive")
return self.builder.call(fn, (obj,))
def number_float(self, val):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Float")
return self.builder.call(fn, [val])
def number_invert(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyNumber_Invert")
return self.builder.call(fn, (obj,))
def float_as_double(self, fobj):
fnty = Type.function(self.double, [self.pyobj])
fn = self._get_function(fnty, name="PyFloat_AsDouble")
return self.builder.call(fn, [fobj])
def bool_from_bool(self, bval):
longval = self.builder.zext(bval, self.long)
return self.bool_from_long(longval)
def bool_from_long(self, ival):
fnty = Type.function(self.pyobj, [self.long])
fn = self._get_function(fnty, name="PyBool_FromLong")
return self.builder.call(fn, [ival])
def complex_from_doubles(self, realval, imagval):
fnty = Type.function(self.pyobj, [Type.double(), Type.double()])
fn = self._get_function(fnty, name="PyComplex_FromDoubles")
return self.builder.call(fn, [realval, imagval])
def complex_real_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_RealAsDouble")
return self.builder.call(fn, [cobj])
def complex_imag_as_double(self, cobj):
fnty = Type.function(Type.double(), [self.pyobj])
fn = self._get_function(fnty, name="PyComplex_ImagAsDouble")
return self.builder.call(fn, [cobj])
#
# Concrete slice API
#
def slice_as_ints(self, obj):
pstart = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstop = cgutils.alloca_once(self.builder, self.py_ssize_t)
pstep = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(Type.int(),
[self.pyobj] + [self.py_ssize_t.as_pointer()] * 3)
fn = self._get_function(fnty, name="numba_unpack_slice")
res = self.builder.call(fn, (obj, pstart, pstop, pstep))
start = self.builder.load(pstart)
stop = self.builder.load(pstop)
step = self.builder.load(pstep)
return cgutils.is_null(self.builder, res), start, stop, step
#
# List and sequence APIs
#
def sequence_getslice(self, obj, start, stop):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t,
self.py_ssize_t])
fn = self._get_function(fnty, name="PySequence_GetSlice")
return self.builder.call(fn, (obj, start, stop))
def sequence_tuple(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySequence_Tuple")
return self.builder.call(fn, [obj])
def list_new(self, szval):
fnty = Type.function(self.pyobj, [self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_New")
return self.builder.call(fn, [szval])
def list_size(self, lst):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyList_Size")
return self.builder.call(fn, [lst])
def list_append(self, lst, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyList_Append")
return self.builder.call(fn, [lst, val])
def list_setitem(self, lst, idx, val):
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.pyobj])
fn = self._get_function(fnty, name="PyList_SetItem")
return self.builder.call(fn, [lst, idx, val])
def list_getitem(self, lst, idx):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyList_GetItem")
if isinstance(idx, int):
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [lst, idx])
def list_setslice(self, lst, start, stop, obj):
if obj is None:
obj = self.get_null_object()
fnty = Type.function(Type.int(), [self.pyobj, self.py_ssize_t,
self.py_ssize_t, self.pyobj])
fn = self._get_function(fnty, name="PyList_SetSlice")
return self.builder.call(fn, (lst, start, stop, obj))
#
# Concrete tuple API
#
def tuple_getitem(self, tup, idx):
fnty = Type.function(self.pyobj, [self.pyobj, self.py_ssize_t])
fn = self._get_function(fnty, name="PyTuple_GetItem")
idx = self.context.get_constant(types.intp, idx)
return self.builder.call(fn, [tup, idx])
def tuple_pack(self, items):
fnty = Type.function(self.pyobj, [self.py_ssize_t], var_arg=True)
fn = self._get_function(fnty, name="PyTuple_Pack")
n = self.context.get_constant(types.intp, len(items))
args = [n]
args.extend(items)
return self.builder.call(fn, args)
def tuple_size(self, tup):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PyTuple_Size")
return self.builder.call(fn, [tup])
def tuple_new(self, count):
fnty = Type.function(self.pyobj, [Type.int()])
fn = self._get_function(fnty, name='PyTuple_New')
return self.builder.call(fn, [self.context.get_constant(types.int32,
count)])
def tuple_setitem(self, tuple_val, index, item):
fnty = Type.function(Type.int(), [self.pyobj, Type.int(), self.pyobj])
setitem_fn = self._get_function(fnty, name='PyTuple_SetItem')
index = self.context.get_constant(types.int32, index)
self.builder.call(setitem_fn, [tuple_val, index, item])
#
# Concrete set API
#
def set_new(self, iterable=None):
if iterable is None:
iterable = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PySet_New")
return self.builder.call(fn, [iterable])
def set_add(self, set, value):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySet_Add")
return self.builder.call(fn, [set, value])
def set_clear(self, set):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PySet_Clear")
return self.builder.call(fn, [set])
def set_size(self, set):
fnty = Type.function(self.py_ssize_t, [self.pyobj])
fn = self._get_function(fnty, name="PySet_Size")
return self.builder.call(fn, [set])
def set_update(self, set, iterable):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="_PySet_Update")
return self.builder.call(fn, [set, iterable])
def set_next_entry(self, set, posptr, keyptr, hashptr):
fnty = Type.function(Type.int(),
[self.pyobj, self.py_ssize_t.as_pointer(),
self.pyobj.as_pointer(), self.py_hash_t.as_pointer()])
fn = self._get_function(fnty, name="_PySet_NextEntry")
return self.builder.call(fn, (set, posptr, keyptr, hashptr))
@contextlib.contextmanager
def set_iterate(self, set):
builder = self.builder
hashptr = cgutils.alloca_once(builder, self.py_hash_t, name="hashptr")
keyptr = cgutils.alloca_once(builder, self.pyobj, name="keyptr")
posptr = cgutils.alloca_once_value(builder,
ir.Constant(self.py_ssize_t, 0),
name="posptr")
bb_body = builder.append_basic_block("bb_body")
bb_end = builder.append_basic_block("bb_end")
builder.branch(bb_body)
def do_break():
builder.branch(bb_end)
with builder.goto_block(bb_body):
r = self.set_next_entry(set, posptr, keyptr, hashptr)
finished = cgutils.is_null(builder, r)
with builder.if_then(finished, likely=False):
builder.branch(bb_end)
yield _IteratorLoop(builder.load(keyptr), do_break)
builder.branch(bb_body)
builder.position_at_end(bb_end)
#
# GIL APIs
#
def gil_ensure(self):
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_ensure")
gilptr = cgutils.alloca_once(self.builder, self.gil_state)
self.builder.call(fn, [gilptr])
return gilptr
def gil_release(self, gil):
gilptrty = Type.pointer(self.gil_state)
fnty = Type.function(Type.void(), [gilptrty])
fn = self._get_function(fnty, "numba_gil_release")
return self.builder.call(fn, [gil])
def save_thread(self):
fnty = Type.function(self.voidptr, [])
fn = self._get_function(fnty, name="PyEval_SaveThread")
return self.builder.call(fn, [])
def restore_thread(self, thread_state):
fnty = Type.function(Type.void(), [self.voidptr])
fn = self._get_function(fnty, name="PyEval_RestoreThread")
self.builder.call(fn, [thread_state])
#
# Generic object private data (a way of associating an arbitrary void *
# pointer to an arbitrary Python object).
#
def object_get_private_data(self, obj):
fnty = Type.function(self.voidptr, [self.pyobj])
fn = self._get_function(fnty, name="numba_get_pyobject_private_data")
return self.builder.call(fn, (obj,))
def object_set_private_data(self, obj, ptr):
fnty = Type.function(Type.void(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_set_pyobject_private_data")
return self.builder.call(fn, (obj, ptr))
def object_reset_private_data(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="numba_reset_pyobject_private_data")
return self.builder.call(fn, (obj,))
#
# Other APIs (organize them better!)
#
def import_module_noblock(self, modname):
fnty = Type.function(self.pyobj, [self.cstring])
fn = self._get_function(fnty, name="PyImport_ImportModuleNoBlock")
return self.builder.call(fn, [modname])
def call_function_objargs(self, callee, objargs):
fnty = Type.function(self.pyobj, [self.pyobj], var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallFunctionObjArgs")
args = [callee] + list(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call_method(self, callee, method, objargs=()):
cname = self.context.insert_const_string(self.module, method)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring, self.cstring],
var_arg=True)
fn = self._get_function(fnty, name="PyObject_CallMethod")
fmt = 'O' * len(objargs)
cfmt = self.context.insert_const_string(self.module, fmt)
args = [callee, cname, cfmt]
if objargs:
args.extend(objargs)
args.append(self.context.get_constant_null(types.pyobject))
return self.builder.call(fn, args)
def call(self, callee, args=None, kws=None):
if args is None:
args = self.get_null_object()
if kws is None:
kws = self.get_null_object()
fnty = Type.function(self.pyobj, [self.pyobj] * 3)
fn = self._get_function(fnty, name="PyObject_Call")
return self.builder.call(fn, (callee, args, kws))
def object_istrue(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_IsTrue")
return self.builder.call(fn, [obj])
def object_not(self, obj):
fnty = Type.function(Type.int(), [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Not")
return self.builder.call(fn, [obj])
def object_richcompare(self, lhs, rhs, opstr):
ops = ['<', '<=', '==', '!=', '>', '>=']
if opstr in ops:
opid = ops.index(opstr)
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, Type.int()])
fn = self._get_function(fnty, name="PyObject_RichCompare")
lopid = self.context.get_constant(types.int32, opid)
return self.builder.call(fn, (lhs, rhs, lopid))
elif opstr == 'is':
bitflag = self.builder.icmp(lc.ICMP_EQ, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr == 'is not':
bitflag = self.builder.icmp(lc.ICMP_NE, lhs, rhs)
return self.bool_from_bool(bitflag)
elif opstr in ('in', 'not in'):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PySequence_Contains")
status = self.builder.call(fn, (rhs, lhs))
negone = self.context.get_constant(types.int32, -1)
is_good = self.builder.icmp(lc.ICMP_NE, status, negone)
# Stack allocate output and initialize to Null
outptr = cgutils.alloca_once_value(self.builder,
Constant.null(self.pyobj))
# If PySequence_Contains returns non-error value
with cgutils.if_likely(self.builder, is_good):
if opstr == 'not in':
status = self.builder.not_(status)
# Store the status as a boolean object
truncated = self.builder.trunc(status, Type.int(1))
self.builder.store(self.bool_from_bool(truncated),
outptr)
return self.builder.load(outptr)
else:
raise NotImplementedError("Unknown operator {op!r}".format(
op=opstr))
def iter_next(self, iterobj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyIter_Next")
return self.builder.call(fn, [iterobj])
def object_getiter(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetIter")
return self.builder.call(fn, [obj])
def object_getattr_string(self, obj, attr):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(self.pyobj, [self.pyobj, self.cstring])
fn = self._get_function(fnty, name="PyObject_GetAttrString")
return self.builder.call(fn, [obj, cstr])
def object_getattr(self, obj, attr):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetAttr")
return self.builder.call(fn, [obj, attr])
def object_setattr_string(self, obj, attr, val):
cstr = self.context.insert_const_string(self.module, attr)
fnty = Type.function(Type.int(), [self.pyobj, self.cstring, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttrString")
return self.builder.call(fn, [obj, cstr, val])
def object_setattr(self, obj, attr, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetAttr")
return self.builder.call(fn, [obj, attr, val])
def object_delattr_string(self, obj, attr):
# PyObject_DelAttrString() is actually a C macro calling
# PyObject_SetAttrString() with value == NULL.
return self.object_setattr_string(obj, attr, self.get_null_object())
def object_delattr(self, obj, attr):
# PyObject_DelAttr() is actually a C macro calling
# PyObject_SetAttr() with value == NULL.
return self.object_setattr(obj, attr, self.get_null_object())
def object_getitem(self, obj, key):
fnty = Type.function(self.pyobj, [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_GetItem")
return self.builder.call(fn, (obj, key))
def object_setitem(self, obj, key, val):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_SetItem")
return self.builder.call(fn, (obj, key, val))
def object_delitem(self, obj, key):
fnty = Type.function(Type.int(), [self.pyobj, self.pyobj])
fn = self._get_function(fnty, name="PyObject_DelItem")
return self.builder.call(fn, (obj, key))
def string_as_string(self, strobj):
fnty = Type.function(self.cstring, [self.pyobj])
fname = "PyUnicode_AsUTF8"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [strobj])
def string_as_string_and_size(self, strobj):
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer()])
fname = "PyUnicode_AsUTF8AndSize"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(fn, [strobj, p_length])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length))
def string_as_string_size_and_kind(self, strobj):
p_length = cgutils.alloca_once(self.builder, self.py_ssize_t)
p_kind = cgutils.alloca_once(self.builder, Type.int())
p_ascii = cgutils.alloca_once(self.builder, Type.int())
p_hash = cgutils.alloca_once(self.builder, self.py_hash_t)
fnty = Type.function(self.cstring, [self.pyobj,
self.py_ssize_t.as_pointer(),
Type.int().as_pointer(),
Type.int().as_pointer(),
self.py_hash_t.as_pointer()])
fname = "numba_extract_unicode"
fn = self._get_function(fnty, name=fname)
buffer = self.builder.call(
fn, [strobj, p_length, p_kind, p_ascii, p_hash])
ok = self.builder.icmp_unsigned('!=',
ir.Constant(buffer.type, None),
buffer)
return (ok, buffer, self.builder.load(p_length),
self.builder.load(p_kind), self.builder.load(p_ascii),
self.builder.load(p_hash))
def string_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyString_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def string_from_string(self, string):
fnty = Type.function(self.pyobj, [self.cstring])
fname = "PyUnicode_FromString"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string])
def string_from_kind_and_data(self, kind, string, size):
fnty = Type.function(self.pyobj, [Type.int(), self.cstring, self.py_ssize_t])
fname = "PyUnicode_FromKindAndData"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [kind, string, size])
def bytes_from_string_and_size(self, string, size):
fnty = Type.function(self.pyobj, [self.cstring, self.py_ssize_t])
fname = "PyBytes_FromStringAndSize"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [string, size])
def object_hash(self, obj):
fnty = Type.function(self.py_hash_t, [self.pyobj,])
fname = "PyObject_Hash"
fn = self._get_function(fnty, name=fname)
return self.builder.call(fn, [obj,])
def object_str(self, obj):
fnty = Type.function(self.pyobj, [self.pyobj])
fn = self._get_function(fnty, name="PyObject_Str")
return self.builder.call(fn, [obj])
def make_none(self):
obj = self.borrow_none()
self.incref(obj)
return obj
def borrow_none(self):
return self.get_c_object("_Py_NoneStruct")
def sys_write_stdout(self, fmt, *args):
fnty = Type.function(Type.void(), [self.cstring], var_arg=True)
fn = self._get_function(fnty, name="PySys_FormatStdout")
return self.builder.call(fn, (fmt,) + args)
def object_dump(self, obj):
fnty = Type.function(Type.void(), [self.pyobj])
fn = self._get_function(fnty, name="_PyObject_Dump")
return self.builder.call(fn, (obj,))
#
# NRT (Numba runtime) APIs
#
def nrt_adapt_ndarray_to_python(self, aryty, ary, dtypeptr):
assert self.context.enable_nrt, "NRT required"
intty = ir.IntType(32)
fnty = Type.function(self.pyobj,
[self.voidptr, intty, intty, self.pyobj])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_to_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
ndim = self.context.get_constant(types.int32, aryty.ndim)
writable = self.context.get_constant(types.int32, int(aryty.mutable))
aryptr = cgutils.alloca_once_value(self.builder, ary)
return self.builder.call(fn, [self.builder.bitcast(aryptr,
self.voidptr),
ndim, writable, dtypeptr])
def nrt_meminfo_new_from_pyobject(self, data, pyobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[cgutils.voidptr_t, cgutils.voidptr_t],
)
fn = mod.get_or_insert_function(
fnty,
name="NRT_meminfo_new_from_pyobject",
)
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [data, pyobj])
def nrt_meminfo_as_pyobject(self, miptr):
mod = self.builder.module
fnty = ir.FunctionType(
self.pyobj,
[cgutils.voidptr_t]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_as_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miptr])
def nrt_meminfo_from_pyobject(self, miobj):
mod = self.builder.module
fnty = ir.FunctionType(
cgutils.voidptr_t,
[self.pyobj]
)
fn = mod.get_or_insert_function(
fnty,
name='NRT_meminfo_from_pyobject',
)
fn.return_value.add_attribute("noalias")
return self.builder.call(fn, [miobj])
def nrt_adapt_ndarray_from_python(self, ary, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_ndarray_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def nrt_adapt_buffer_from_python(self, buf, ptr):
assert self.context.enable_nrt
fnty = Type.function(Type.void(), [Type.pointer(self.py_buffer_t),
self.voidptr])
fn = self._get_function(fnty, name="NRT_adapt_buffer_from_python")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
# ------ utils -----
def _get_function(self, fnty, name):
return self.module.get_or_insert_function(fnty, name=name)
def alloca_obj(self):
return self.builder.alloca(self.pyobj)
def alloca_buffer(self):
# Treat the buffer as an opaque array of bytes
ptr = cgutils.alloca_once_value(self.builder,
lc.Constant.null(self.py_buffer_t))
return ptr
@contextlib.contextmanager
def if_object_ok(self, obj):
with cgutils.if_likely(self.builder,
cgutils.is_not_null(self.builder, obj)):
yield
def print_object(self, obj):
strobj = self.object_str(obj)
cstr = self.string_as_string(strobj)
fmt = self.context.insert_const_string(self.module, "%s")
self.sys_write_stdout(fmt, cstr)
self.decref(strobj)
def print_string(self, text):
fmt = self.context.insert_const_string(self.module, text)
self.sys_write_stdout(fmt)
def get_null_object(self):
return Constant.null(self.pyobj)
def return_none(self):
none = self.make_none()
self.builder.ret(none)
def list_pack(self, items):
n = len(items)
seq = self.list_new(self.context.get_constant(types.intp, n))
with self.if_object_ok(seq):
for i in range(n):
idx = self.context.get_constant(types.intp, i)
self.incref(items[i])
self.list_setitem(seq, idx, items[i])
return seq
def unserialize(self, structptr):
fnty = Type.function(self.pyobj,
(self.voidptr, ir.IntType(32), self.voidptr))
fn = self._get_function(fnty, name="numba_unpickle")
ptr = self.builder.extract_value(self.builder.load(structptr), 0)
n = self.builder.extract_value(self.builder.load(structptr), 1)
hashed = self.builder.extract_value(self.builder.load(structptr), 2)
return self.builder.call(fn, (ptr, n, hashed))
def serialize_uncached(self, obj):
# First make the array constant
data = serialize.dumps(obj)
assert len(data) < 2**31
name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
bdata = cgutils.make_bytearray(data)
# Make SHA1 hash on the pickled content
# NOTE: update buffer size in numba_unpickle() when changing the
# hash algorithm.
hashed = cgutils.make_bytearray(hashlib.sha1(data).digest())
arr = self.context.insert_unique_const(self.module, name, bdata)
hasharr = self.context.insert_unique_const(
self.module, f"{name}.sha1", hashed,
)
# Then populate the structure constant
struct = ir.Constant.literal_struct([
arr.bitcast(self.voidptr),
ir.Constant(ir.IntType(32), arr.type.pointee.count),
hasharr.bitcast(self.voidptr),
])
return struct
def serialize_object(self, obj):
try:
gv = self.module.__serialized[obj]
except KeyError:
struct = self.serialize_uncached(obj)
name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
gv = self.context.insert_unique_const(self.module, name, struct)
# Make the id() (and hence the name) unique while populating the module.
self.module.__serialized[obj] = gv
return gv
def c_api_error(self):
return cgutils.is_not_null(self.builder, self.err_occurred())
def to_native_value(self, typ, obj):
from numba.core.boxing import unbox_unsupported
impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
c = _UnboxContext(self.context, self.builder, self)
return impl(typ, obj, c)
def from_native_return(self, typ, val, env_manager):
assert not isinstance(typ, types.Optional), "callconv should have " \
"prevented the return of " \
"optional value"
out = self.from_native_value(typ, val, env_manager)
return out
def from_native_value(self, typ, val, env_manager=None):
from numba.core.boxing import box_unsupported
impl = _boxers.lookup(typ.__class__, box_unsupported)
c = _BoxContext(self.context, self.builder, self, env_manager)
return impl(typ, val, c)
def reflect_native_value(self, typ, val, env_manager=None):
impl = _reflectors.lookup(typ.__class__)
if impl is None:
# Reflection isn't needed for most types
return cgutils.false_bit
is_error = cgutils.alloca_once_value(self.builder, cgutils.false_bit)
c = _ReflectContext(self.context, self.builder, self, env_manager,
is_error)
impl(typ, val, c)
return self.builder.load(c.is_error)
def to_native_generator(self, obj, typ):
gen_ptr_ty = Type.pointer(self.context.get_data_type(typ))
value = self.context.get_generator_state(self.builder, obj, gen_ptr_ty)
return NativeValue(value)
def from_native_generator(self, val, typ, env=None):
llty = self.context.get_data_type(typ)
assert not llty.is_pointer
gen_struct_size = self.context.get_abi_sizeof(llty)
gendesc = self.context.get_generator_desc(typ)
genfnty = Type.function(self.pyobj, [self.pyobj, self.pyobj, self.pyobj])
genfn = self._get_function(genfnty, name=gendesc.llvm_cpython_wrapper_name)
finalizerty = Type.function(Type.void(), [self.voidptr])
if typ.has_finalizer:
finalizer = self._get_function(finalizerty, name=gendesc.llvm_finalizer_name)
else:
finalizer = Constant.null(Type.pointer(finalizerty))
fnty = Type.function(self.pyobj, [self.py_ssize_t,
self.voidptr,
Type.pointer(genfnty),
Type.pointer(finalizerty),
self.voidptr])
fn = self._get_function(fnty, name="numba_make_generator")
state_size = ir.Constant(self.py_ssize_t, gen_struct_size)
initial_state = self.builder.bitcast(val, self.voidptr)
if env is None:
env = self.get_null_object()
env = self.builder.bitcast(env, self.voidptr)
return self.builder.call(fn,
(state_size, initial_state, genfn, finalizer, env))
def numba_array_adaptor(self, ary, ptr):
assert not self.context.enable_nrt
fnty = Type.function(Type.int(), [self.pyobj, self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_ndarray")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (ary, ptr))
def numba_buffer_adaptor(self, buf, ptr):
fnty = Type.function(Type.void(),
[ir.PointerType(self.py_buffer_t), self.voidptr])
fn = self._get_function(fnty, name="numba_adapt_buffer")
fn.args[0].add_attribute(lc.ATTR_NO_CAPTURE)
fn.args[1].add_attribute(lc.ATTR_NO_CAPTURE)
return self.builder.call(fn, (buf, ptr))
def complex_adaptor(self, cobj, cmplx):
fnty = Type.function(Type.int(), [self.pyobj, cmplx.type])
fn = self._get_function(fnty, name="numba_complex_adaptor")
return self.builder.call(fn, [cobj, cmplx])
def extract_record_data(self, obj, pbuf):
fnty = Type.function(self.voidptr,
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_extract_record_data")
return self.builder.call(fn, [obj, pbuf])
def get_buffer(self, obj, pbuf):
fnty = Type.function(Type.int(),
[self.pyobj, ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_get_buffer")
return self.builder.call(fn, [obj, pbuf])
def release_buffer(self, pbuf):
fnty = Type.function(Type.void(), [ir.PointerType(self.py_buffer_t)])
fn = self._get_function(fnty, name="numba_release_buffer")
return self.builder.call(fn, [pbuf])
def extract_np_datetime(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_datetime")
return self.builder.call(fn, [obj])
def extract_np_timedelta(self, obj):
fnty = Type.function(Type.int(64), [self.pyobj])
fn = self._get_function(fnty, name="numba_extract_np_timedelta")
return self.builder.call(fn, [obj])
def create_np_datetime(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_datetime")
return self.builder.call(fn, [val, unit_code])
def create_np_timedelta(self, val, unit_code):
unit_code = Constant.int(Type.int(), unit_code)
fnty = Type.function(self.pyobj, [Type.int(64), Type.int()])
fn = self._get_function(fnty, name="numba_create_np_timedelta")
return self.builder.call(fn, [val, unit_code])
def recreate_record(self, pdata, size, dtype, env_manager):
fnty = Type.function(self.pyobj, [Type.pointer(Type.int(8)),
Type.int(), self.pyobj])
fn = self._get_function(fnty, name="numba_recreate_record")
dtypeaddr = env_manager.read_const(env_manager.add_const(dtype))
return self.builder.call(fn, [pdata, size, dtypeaddr])
def string_from_constant_string(self, string):
cstr = self.context.insert_const_string(self.module, string)
sz = self.context.get_constant(types.intp, len(string))
return self.string_from_string_and_size(cstr, sz)
def call_jit_code(self, func, sig, args):
builder = self.builder
cres = self.context.compile_subroutine(builder, func, sig)
got_retty = cres.signature.return_type
retty = sig.return_type
if got_retty != retty:
raise errors.LoweringError(
f'mismatching signature {got_retty} != {retty}.\n'
)
status, res = self.context.call_internal_no_propagate(
builder, cres.fndesc, sig, args,
)
is_error_ptr = cgutils.alloca_once(builder, cgutils.bool_t, zfill=True)
res_type = self.context.get_value_type(sig.return_type)
res_ptr = cgutils.alloca_once(builder, res_type, zfill=True)
with builder.if_else(status.is_error) as (has_err, no_err):
with has_err:
builder.store(status.is_error, is_error_ptr)
self.context.call_conv.raise_error(builder, self, status)
with no_err:
res = imputils.fix_returning_optional(
self.context, builder, sig, status, res,
)
builder.store(res, res_ptr)
is_error = builder.load(is_error_ptr)
res = builder.load(res_ptr)
return is_error, res
class ObjModeUtils:
def __init__(self, pyapi):
self.pyapi = pyapi
def load_dispatcher(self, fnty, argtypes):
builder = self.pyapi.builder
tyctx = self.pyapi.context
m = builder.module
gv = ir.GlobalVariable(
m, self.pyapi.pyobj,
name=m.get_unique_name("cached_objmode_dispatcher"),
)
gv.initializer = gv.type.pointee(None)
gv.linkage = 'internal'
cached = builder.load(gv)
with builder.if_then(cgutils.is_null(builder, cached)):
if serialize.is_serialiable(fnty.dispatcher):
cls = type(self)
compiler = self.pyapi.unserialize(
self.pyapi.serialize_object(cls._call_objmode_dispatcher)
)
serialized_dispatcher = self.pyapi.serialize_object(
(fnty.dispatcher, tuple(argtypes)),
)
compile_args = self.pyapi.unserialize(serialized_dispatcher)
callee = self.pyapi.call_function_objargs(
compiler, [compile_args],
)
self.pyapi.decref(compiler)
self.pyapi.decref(compile_args)
else:
entry_pt = fnty.dispatcher.compile(tuple(argtypes))
callee = tyctx.add_dynamic_addr(
builder, id(entry_pt), info="with_objectmode",
)
self.pyapi.incref(callee)
builder.store(callee, gv)
callee = builder.load(gv)
return callee
@staticmethod
def _call_objmode_dispatcher(compile_args):
dispatcher, argtypes = compile_args
entrypt = dispatcher.compile(argtypes)
return entrypt
| true | true |
7901e8af03dd49d8f1599fc5c8694eb98d8de135 | 123,288 | py | Python | arelle/plugin/loadFromExcel.py | CapoeiraShaolin1/Arelle | 260fde1cfc99be690ce9ea6853e05884dcfd2594 | [
"Apache-2.0"
] | 1 | 2021-07-01T17:52:12.000Z | 2021-07-01T17:52:12.000Z | arelle/plugin/loadFromExcel.py | themrinalsinha/Arelle | f4a3c55846253c02c25db2280cf27292f1cf96c1 | [
"Apache-2.0"
] | 3 | 2021-01-07T23:36:40.000Z | 2021-12-13T20:43:27.000Z | arelle/plugin/loadFromExcel.py | themrinalsinha/Arelle | f4a3c55846253c02c25db2280cf27292f1cf96c1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
loadFromExcel.py is an example of a plug-in that will load an extension taxonomy from Excel
input and optionally save an (extension) DTS.
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import os, io, sys, time, re, traceback, json, posixpath
from fnmatch import fnmatch
from collections import defaultdict, OrderedDict
from arelle import PythonUtil, XbrlConst, ModelDocument, UrlUtil
from arelle.PythonUtil import OrderedDefaultDict, OrderedSet
from arelle.ModelDocument import Type, create as createModelDocument
from arelle.ModelValue import qname, QName
from arelle.XbrlConst import (qnLinkLabel, standardLabelRoles, qnLinkReference, standardReferenceRoles,
qnLinkPart, gen, link, defaultLinkRole,
conceptLabel, elementLabel, conceptReference, summationItem
)
qnXbrldtClosed = qname("{http://xbrl.org/2005/xbrldt}xbrldt:closed")
importColHeaderMap = defaultdict(list)
resourceParsePattern = re.compile(r"(label[s]?|reference[s]?|relationship to),?\s*([\w][\w\s#+-:/]+[\w#+-/])(\s*[(]([^)]+)[)])?$")
roleNumberPattern = re.compile(r"(.*)[#]([0-9][0-9A-Za-z]*)")
xlUnicodePattern = re.compile("_x([0-9A-F]{4})_")
excludeDesignatedEnumerations = False
annotateEnumerationsDocumentation = False
annotateElementDocumentation = False
saveXmlLang = None
NULLENTRY = ({},)
facetSortOrder = {
"fractionDigits" : "_00",
"length": "_01",
"minInclusive": "_02",
"maxInclusive": "_03",
"minExclusive": "_04",
"maxExclusive": "_05",
"minLength": "_06",
"maxLength": "_07",
"pattern": "_08",
"totalDigits": "_09",
"whiteSpace": "_10",
"enumeration": "_11"}
def loadFromExcel(cntlr, modelXbrl, excelFile, mappedUri):
from openpyxl import load_workbook
from arelle import ModelDocument, ModelXbrl, XmlUtil
from arelle.ModelDocument import ModelDocumentReference
from arelle.ModelValue import qname
def xlUnicodeChar(match):
return chr(int(match.group(1), 16))
def xlValue(cell): # excel values may have encoded unicode, such as _0000D_
v = cell.value
if isinstance(v, str):
return xlUnicodePattern.sub(xlUnicodeChar, v).replace('\r\n','\n').replace('\r','\n')
return v
defaultLabelLang = saveXmlLang or "en"
importColumnHeaders = {
"名前空間プレフィックス": "prefix",
"prefix": "prefix",
"要素名": "name",
"name": "name",
"type": "type",
"typePrefix": "typePrefix", # usually part of type but optionally separate column
"substitutionGroup": "substitutionGroup",
"periodType": "periodType",
"balance": "balance",
"abstract": "abstract", # contains true if abstract
"nillable": "nillable",
"depth": "depth",
"minLength": "minLength",
"maxLength": "maxLength",
"minInclusive": "minInclusive",
"maxInclusive": "maxInclusive",
"length": "length",
"fixed": "fixed",
"pattern": "pattern",
"enumeration": "enumeration",
"excludedEnumeration": "excludedEnumeration",
"preferred label": "preferredLabel",
"preferredLabel": "preferredLabel",
"presentation parent": "presentationParent", # qname -- instead of label hierarchy and depth
"calculation parent": "calculationParent", # qname
"calculation weight": "calculationWeight",
# label col heading: ("label", role, lang [indented]),
"標準ラベル(日本語)": ("label", XbrlConst.standardLabel, "ja", "indented"),
"冗長ラベル(日本語)": ("label", XbrlConst.verboseLabel, "ja"),
"標準ラベル(英語)": ("label", XbrlConst.standardLabel, "en"),
"冗長ラベル(英語)": ("label", XbrlConst.verboseLabel, "en"),
"用途区分、財務諸表区分及び業種区分のラベル(日本語)": ("labels", XbrlConst.standardLabel, "ja"),
"用途区分、財務諸表区分及び業種区分のラベル(英語)": ("labels", XbrlConst.standardLabel, "en"),
# label [, role [(lang)]] : ("label", http resource role, lang [indented|overridePreferred])
"label": ("label", XbrlConst.standardLabel, defaultLabelLang, "indented"),
"label, standard": ("label", XbrlConst.standardLabel, defaultLabelLang, "overridePreferred"),
"label, terse": ("label", XbrlConst.terseLabel, defaultLabelLang),
"label, verbose": ("label", XbrlConst.verboseLabel, defaultLabelLang),
"label, documentation": ("label", XbrlConst.documentationLabel, defaultLabelLang),
"group": "linkrole",
"linkrole": "linkrole",
"ELR": "linkrole",
"dimension default": "dimensionDefault"
# reference ("reference", reference http resource role, reference part QName)
# reference, required": ("reference", "http://treasury.gov/dataact/role/taxonomyImplementationNote", qname("{http://treasury.gov/dataact/parts-2015-12-31}dataact-part:Required"))
# attribute, qname (attribute on element in xsd)
}
fatalLoadingErrors = []
startedAt = time.time()
if os.path.isabs(excelFile):
# allow relative filenames to loading directory
priorCWD = os.getcwd()
os.chdir(os.path.dirname(excelFile))
else:
priorCWD = None
importExcelBook = load_workbook(excelFile, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
dtsSheet = None
if "XBRL DTS" in sheetNames:
dtsSheet = "XBRL DTS"
elif "DTS" in sheetNames:
dtsSheet = "DTS"
elif "Sheet2" in sheetNames:
dtsSheet = "Sheet2"
if dtsSheet:
dtsWs = importExcelBook[dtsSheet]
else:
dtsWs = None
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )} # xml of imports
importXmlns = {}
hasPreLB = hasCalLB = hasDefLB = hasRefLB = hasGenLB = False
# xxxLB structure [ (elr1, def1, "_ELR_", [roots]), (elr2, def2, "_ELR_", [rootw]) ...]
# roots = (rootHref, None, "_root_", [children])
# children = (childPrefix, childName, arcrole, [grandChildren])
preLB = []
defLB = []
calLB = []
refLB = []
genLB = []
def lbDepthList(lbStruct, depth, parentList=None):
if len(lbStruct) > 0:
if depth == topDepth or not hasDepthColumn:
return lbStruct[-1].childStruct
return lbDepthList(lbStruct[-1].childStruct, depth-1, list)
else:
if hasDepthColumn:
cntlr.addToLog("Depth error, Excel sheet: {excelSheet} row: {excelRow}"
.format(excelSheet=importSheetName, excelRow=iRow),
messageCode="importExcel:depth")
return None
splitString = None # to split repeating groups (order, depth)
importFileName = None # for alternate import file
importSheetNames = []
skipRows = [] # [(from,to),(from,to)] row number starting at 1
genDocs = {} # generated documents (schema + referenced linkbases)
genElementsDoc = None
def newDoc(name):
genDocs[name] = PythonUtil.attrdict(
name = name,
initialComment = None,
schemaDocumentation = None,
extensionSchemaPrefix = "",
extensionSchemaFilename = "",
extensionSchemaRelDirname = None, # only non-null for relative directory path
extensionSchemaNamespaceURI = "",
extensionSchemaVersion = None, # <schema @version>
extensionRoles = {}, # key is roleURI, value is role definition
extensionRoleLabels= defaultdict(set), # key is roleURI, value is set( (lang, label) )
extensionElements = {},
extensionTypes = {}, # attrs are name, base. has facets in separate dict same as elements
extensionLabels = {}, # key = (prefix, name, lang, role), value = label text
extensionReferences = OrderedDefaultDict(OrderedSet), # key = (prefix, name, role) values = (partQn, text)
hasEnumerationDocumentation = False,
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )}, # xml of imports
includes = [], # just schemaLocation
importXmlns = {},
importFilenames = {}, # file names relative to base
childGenDocs = [],
linkbaseRefs = [],
labelLinkbases = [],
referenceLinkbases = [],
hasPreLB = False,
hasCalLB = False,
hasDefLB = False,
hasRefLB = False,
hasGenLB = False,
generated = False
)
return genDocs[name]
thisDoc = newDoc(None)
excelDir = os.path.dirname(excelFile) + os.path.sep
def docRelpath(filename, baseDir=None):
if baseDir is None:
baseDir = thisDoc.extensionSchemaRelDirname
if (baseDir is not None and
not (UrlUtil.isAbsolute(filename) or os.path.isabs(filename))):
return posixpath.relpath(filename, baseDir)
return filename
isUSGAAP = False
isGenerateAndImport = True
extensionPrefixForCoreLabels = None
dtsActionColIndex = 0
dtsFiletypeColIndex = 1
dtsPrefixColIndex = 2
dtsFilenameColIndex = 3
dtsNamespaceURIColIndex = 4
for iRow, row in enumerate(dtsWs.rows if dtsWs else ()):
try:
if (len(row) < 1): # skip if col 1 is non-existent
continue
_col0 = row[0].value
if isinstance(_col0, str) and _col0.startswith("#"): # empty or "#"
continue
if iRow == 0:
# title row may have columns differently laid out
for i, col in enumerate(row):
v = xlValue(col)
if isinstance(v, str):
if v == "specification": dtsActionColIndex = i
if v.startswith("file type"): dtsFiletypeColIndex = i
if v.startswith("prefix"): dtsPrefixColIndex = i
if v.startswith("file, href or role definition"): dtsFilenameColIndex = i
if v.startswith("namespace URI"): dtsNamespaceURIColIndex = i
continue
action = filetype = prefix = filename = namespaceURI = None
if len(row) > dtsActionColIndex: action = xlValue(row[dtsActionColIndex])
if len(row) > dtsFiletypeColIndex: filetype = xlValue(row[dtsFiletypeColIndex])
if len(row) > dtsPrefixColIndex: prefix = xlValue(row[dtsPrefixColIndex])
if len(row) > dtsFilenameColIndex: filename = xlValue(row[dtsFilenameColIndex])
if len(row) > dtsNamespaceURIColIndex: namespaceURI = xlValue(row[dtsNamespaceURIColIndex])
lbType = lang = None
if action == "import":
if filetype in ("role", "arcrole"):
continue
elif filetype == "schema":
thisDoc.imports[prefix] = ( ("namespace", namespaceURI), ("schemaLocation", docRelpath(filename)) )
thisDoc.importXmlns[prefix] = namespaceURI
thisDoc.importFilenames[prefix] = filename
if re.match(r"http://[^/]+/us-gaap/", namespaceURI):
isUSGAAP = True
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
thisDoc.linkbaseRefs.append( (lbType, filename, False) )
elif action == "include" and filename:
thisDoc.includes.append(docRelpath(filename))
elif action == "xmlns" and prefix and namespaceURI:
thisDoc.importXmlns[prefix] = namespaceURI
elif action in ("extension", "generate"):
if filetype == "schema":
if prefix:
# starts new document.
if not thisDoc.name:
del genDocs[thisDoc.name] # remove anonymous doc
thisDoc = newDoc(prefix) # new doc with prefix as its name
thisDoc.extensionSchemaPrefix = prefix
thisDoc.extensionSchemaFilename = filename
thisDoc.extensionSchemaNamespaceURI = namespaceURI
if not UrlUtil.isAbsolute(filename) and not os.path.isabs(filename):
thisDoc.extensionSchemaRelDirname = posixpath.dirname(filename)
else:
thisDoc.extensionSchemaRelDirname = None
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
if len(typeLang) > 1:
lang = referenceRole = typeLang[1]
else:
lang = None
referenceRole = XbrlConst.standardReference
if lbType in ("label", "generic-label"):
# lang, if provided, is a regex pattern
thisDoc.labelLinkbases.append((lbType, lang, filename))
if action == "extension" and not extensionPrefixForCoreLabels:
extensionPrefixForCoreLabels = thisDoc.extensionSchemaPrefix
elif lbType in ("reference", "generic-reference"):
hasRefLB = True
thisDoc.referenceLinkbases.append((lbType, referenceRole, filename))
elif lbType == "presentation":
thisDoc.hasPreLB = hasPreLB = True
elif lbType == "definition":
thisDoc.hasDefLB = hasDefLB = True
elif lbType == "calculation":
thisDoc.hasCalLB = hasCalLB = True
elif lbType == "generic":
thisDoc.hasGenLB = hasGenLB = True
thisDoc.linkbaseRefs.append( (lbType, filename, True) )
elif filetype == "initialComment" and prefix:
thisDoc.initialComment = prefix
elif filetype == "schemaDocumentation" and prefix:
thisDoc.schemaDocumentation = prefix
elif filetype == "enumerationDocumentation":
thisDoc.hasEnumerationDocumentation = True
elif filetype == "role" and namespaceURI: # filename is definition, prefix is optional used-on QNames
thisDoc.extensionRoles[namespaceURI] = (filename, prefix)
elif filetype == "role label" and namespaceURI and prefix: # filename is label, prefix is language
thisDoc.extensionRoleLabels[namespaceURI].add( (filename, prefix) )
elif filetype == "schema-version" and filename:
thisDoc.extensionSchemaVersion = filename
elif filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif filetype == "elements":
genElementsDoc = thisDoc
elif action == "meta" and filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif action == "meta" and filetype == "generate-style" and filename == "import-separately":
isGenerateAndImport = False
elif action == "workbook" and filename:
importFileName = filename
elif action == "worksheet" and filename:
importSheetNames.append(filename)
elif action == "colheader" and filename and namespaceURI:
if namespaceURI == "split":
splitString = filename
else:
importColHeaderMap[filename].append(namespaceURI)
if namespaceURI not in importColumnHeaders:
fatalLoadingErrors.append("colheader {} definition {} not recognized.".format(filename, namespaceURI))
elif action == "skip rows" and filename:
fromRow, _sep, toRow = filename.partition("-")
try:
skipRows.append((int(fromRow), int(toRow) if toRow else int(fromRow)))
except (ValueError, TypeError):
fatalLoadingErrors.append("Exception (at skip rows): {error}, Excel sheet: {excelSheet} row: {excelRow}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow))
except Exception as err:
fatalLoadingErrors.append("Exception: {error}, Excel sheet: {excelSheet} row: {excelRow}, Traceback: {traceback}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2])))
# remove any imported linkbaseRefs that are also generated
for thisDoc in genDocs.values():
linkbaseRefsToRemove = [i
for i, (lbType, filename, generate) in enumerate(thisDoc.linkbaseRefs)
if not generate and (lbType, filename, True) in thisDoc.linkbaseRefs]
while len(linkbaseRefsToRemove):
i = linkbaseRefsToRemove.pop()
thisDoc.linkbaseRefs.pop(i)
dtsWs = None # dereference
genOrder = []
for name, doc in genDocs.items():
insertPos = len(genOrder)
for i, otherDoc in enumerate(genOrder):
if doc.name in otherDoc.imports:
insertPos = i # put this doc before any firstr doc that imports it
break
genOrder.insert(insertPos, doc)
if importFileName: # alternative workbook
importExcelBook = load_workbook(importFileName, read_only=True, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
if importSheetNames:
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
else:
for s in sheetNames:
if s.endswith("Concepts"):
importSheetNames.append(s)
if not importSheetNames:
for s in sheetNames:
if "xbrl" in s.lower() and "dts" not in s:
importSheetNames.append(s)
if not importSheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
if not isUSGAAP and genOrder: # need extra namespace declaration
genOrder[0].importXmlns["iod"] = "http://disclosure.edinet-fsa.go.jp/taxonomy/common/2013-03-31/iod"
# find column headers row
headerCols = OrderedDict()
headerColsAllElrs = set()
hasLinkroleSeparateRow = True
hasPreferredLabelTextColumn = False
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
headerRows = set()
topDepth = 999999
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
continue
headerCols.clear()
headerRows.clear()
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
conceptsWs = importExcelBook[importSheetName]
def setHeaderCols(row):
headerCols.clear()
for iCol, colCell in enumerate(row):
v = xlValue(colCell)
if isinstance(v,str):
v = v.strip()
if v in importColHeaderMap:
for hdr in importColHeaderMap[v]:
if hdr in importColumnHeaders:
headerCols[importColumnHeaders[hdr]] = iCol
elif v in importColumnHeaders:
headerCols[importColumnHeaders[v]] = iCol
elif isinstance(v,str):
if any(v.startswith(r) for r in ("label,", "labels,", "reference,", "references,", "relationship to,")):
# custom/extension label/reference
m = resourceParsePattern.match(v)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
headerCols[(_resourceType, _resourceRole, _resourceLangOrPart)] = iCol
else:
# custom/extension non-label/reference value column
headerCols[v] = iCol
# find out which rows are header rows
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
#for iCol, colCell in enumerate(row):
setHeaderCols(row)
# must have some of these to be a header col
if (sum(1 for h in headerCols if h in ("name", "type", "depth", "periodType")) >= 3 or
sum(1 for h in headerCols if h == "name" or (isinstance(h, tuple) and h[0] == "relationship to")) >= 2):
# it's a header col
headerRows.add(iRow+1)
if 'linkrole' in headerCols:
hasLinkroleSeparateRow = False
if 'preferredLabel' in headerCols and any(isinstance(h, tuple) and h[0] == 'label' and h[1] == '/preferredLabel'
for h in headerCols):
hasPreferredLabelTextColumn = True
if 'depth' in headerCols:
hasDepthColumn = True
if 'presentationParent' in headerCols:
hasPresentationParentColumn = True
if not hasDepthColumn and hasPresentationParentColumn:
topDepth = 0
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
headerCols.clear()
def cellHasValue(row, header, _type):
if header in headerCols:
iCol = headerCols[header]
return iCol < len(row) and isinstance(row[iCol].value, _type)
return False
def cellValue(row, header, strip=False, nameChars=False, default=None):
if header in headerCols:
iCol = headerCols[header]
if iCol < len(row):
v = xlValue(row[iCol])
if strip and isinstance(v, str):
v = v.strip()
if nameChars and isinstance(v, str):
v = ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
if v is None:
return default
return v
return default
def valueNameChars(v):
return ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
def rowPrefixNameValues(row):
prefix = cellValue(row, 'prefix', nameChars=True)
if cellHasValue(row, 'name', str):
if not prefix: # maybe name is a qname
prefix, _sep, _name = cellValue(row, 'name').partition(":")
if not _sep: # no prefix at all, whole string is name
prefix = ""
name = cellValue(row, 'name', nameChars=True)[len(prefix):]
else:
name = cellValue(row, 'name', nameChars=True)
else:
name = None
if not prefix and "prefix" not in headerCols and genElementsDoc is not None:
prefix = genElementsDoc.extensionSchemaPrefix
return prefix, name
def checkImport(thisDoc, qname):
prefix, sep, localName = qname.partition(":")
if sep:
if prefix not in thisDoc.imports:
if prefix == "xbrldt":
thisDoc.imports["xbrldt"] = ("namespace", XbrlConst.xbrldt), ("schemaLocation", "http://www.xbrl.org/2005/xbrldt-2005.xsd")
elif prefix == "nonnum":
thisDoc.imports["nonnum"] = ("namespace", "http://www.xbrl.org/dtr/type/non-numeric"), ("schemaLocation", "http://www.xbrl.org/dtr/type/nonNumeric-2009-12-16.xsd")
elif prefix != thisDoc.extensionSchemaPrefix and prefix != "xs":
cntlr.addToLog("Warning: prefix schema file is not imported for: {qname}"
.format(qname=qname),
messageCode="importExcel:warning", file=thisDoc.extensionSchemaFilename)
# find top depth
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if (iRow + 1) in headerRows:
setHeaderCols(row)
hasConceptAttributeColumn = any(v.startswith("attribute, ") for v in headerCols if isinstance(v,str))
hasRelationshipAttributeColumn = any(v.startswith("relationship attribute, ") for v in headerCols if isinstance(v,str))
elif not (hasLinkroleSeparateRow and (iRow + 1) in headerRows) and 'depth' in headerCols:
depth = cellValue(row, 'depth')
if isinstance(depth, int) and depth < topDepth:
topDepth = depth
# find header rows
currentELR = currentELRdefinition = None
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
useLabels = False
eltEnumRefsParts = None
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
if (all(col.value is None for col in row) or
all(isinstance(row[i].value, str) and row[i].value.strip() == "n/a"
for i in (headerCols.get("name"), headerCols.get("type"), headerCols.get("value"))
if i is not None)):
continue # skip blank row
try:
isHeaderRow = (iRow + 1) in headerRows
isELRrow = hasLinkroleSeparateRow and (iRow + 2) in headerRows
if isHeaderRow:
setHeaderCols(row)
headerColsAllElrs |= _DICT_SET(headerCols.keys()) # accumulate all header cols for role checks
elif isELRrow:
currentELR = currentELRdefinition = None
for colCell in row:
v = str(xlValue(colCell) or '')
if v.startswith("http://"):
currentELR = v
elif not currentELRdefinition and v.endswith(" 科目一覧"):
currentELRdefinition = v[0:-5]
elif not currentELRdefinition:
currentELRdefinition = v
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasPresentationParentColumn:
preRels = set()
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
elif headerCols:
if "linkrole" in headerCols and cellHasValue(row, 'linkrole', str):
v = cellValue(row, 'linkrole', strip=True)
_trialELR = _trialELRdefinition = None
if v.startswith("http://"):
_trialELR = v
elif v.endswith(" 科目一覧"):
_trialELRdefinition = v[0:-5]
else:
_trialELRdefinition = v
if (_trialELR and _trialELR != currentELR) or (_trialELRdefinition and _trialELRdefinition != currentELRdefinition):
currentELR = _trialELR
currentELRdefinition = _trialELRdefinition
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
prefix, name = rowPrefixNameValues(row)
if cellHasValue(row, 'depth', int):
depth = cellValue(row, 'depth')
elif hasDepthColumn:
depth = None # non-ELR section, no depth
else: # depth provided by parent reference
depth = 0
subsGrp = cellValue(row, 'substitutionGroup')
isConcept = subsGrp in ("xbrli:item", "xbrli:tuple",
"xbrldt:hypercubeItem", "xbrldt:dimensionItem")
if (prefix in genDocs) and name not in genDocs[prefix].extensionElements and name:
thisDoc = genDocs[prefix]
# elements row
eltType = cellValue(row, 'type')
eltTypePrefix = cellValue(row, 'typePrefix')
if not eltType:
eltType = 'xbrli:stringItemType'
elif eltTypePrefix and ':' not in eltType:
eltType = eltTypePrefix + ':' + eltType
elif ':' not in eltType and eltType.endswith("ItemType"):
eltType = 'xbrli:' + eltType
abstract = cellValue(row, 'abstract')
nillable = cellValue(row, 'nillable')
balance = cellValue(row, 'balance')
periodType = cellValue(row, 'periodType')
eltAttrs = {"name": name, "id": (prefix or "") + "_" + name}
if eltType:
eltAttrs["type"] = eltType
checkImport(thisDoc, eltType)
if subsGrp:
eltAttrs["substitutionGroup"] = subsGrp
checkImport(thisDoc, subsGrp)
if abstract or subsGrp in ("xbrldt:hypercubeItem", "xbrldt:dimensionItem"):
eltAttrs["abstract"] = abstract or "true"
if nillable:
eltAttrs["nillable"] = nillable
if balance:
eltAttrs["{http://www.xbrl.org/2003/instance}balance"] = balance
if periodType:
eltAttrs["{http://www.xbrl.org/2003/instance}periodType"] = periodType
if hasConceptAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
for header in headerCols:
if isinstance(header, str) and header.startswith("attribute, "):
value = cellValue(row, header)
if value not in (None, ""):
eltAttrs[header[11:]] = value # fix QName later after schemaElt exists
eltFacets = None
eltEnumRefParts = None
if eltType not in ("nonnum:domainItemType", "xbrli:booleanItemType", "xbrli:positiveIntegerItemType", "xbrli:dateItemType",
"xbrli:gYearItemType"):
for facet in ("minLength", "maxLength", "minInclusive", "maxInclusive",
"length", "fixed", "pattern", "enumeration", "excludedEnumeration"):
v = cellValue(row, facet)
if v is not None:
if facet == "enumeration" and v.startswith("See tab "): # check for local or tab-contained enumeration
_match = re.match(r"See tab ([^!]+)([!]([0-9]+):([0-9]+))?", v)
if _match:
_tab, _dummy, _rowFrom, _rowTo = _match.groups()
if _tab in sheetNames:
enumWs = importExcelBook[_tab]
if _rowFrom and _rowTo:
# take cols named "enumeration" and "reference parts"
colHdrs = [enumWs.cell(row=1,column=i).value for i in range(1,enumWs.max_column+1)]
eltEnumValues = []
eltEnumRefsParts = []
for i in range(int(_rowFrom), int(_rowTo)+1):
_parts = []
eltEnumRefsParts.append(_parts)
for j, h in enumerate(colHdrs):
c = enumWs.cell(row=i,column=j+1).value
if c is not None:
if h == "enumeration":
eltEnumValues.append(str(c))
else:
m = resourceParsePattern.match(h)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
_parts.append(((_resourceType, _resourceRole, _resourceLangOrPart), c))
v = "\n".join(eltEnumValues) if eltEnumValues else None
else: # cols 1 and 2 are enum and labels
v = "\n".join(" = ".join(xlValue(col) for col in row if xlValue(col))
for i, row in enumerate(enumWs.rows)
if i > 0) # skip heading row
if v is not None:
if eltFacets is None: eltFacets = {}
eltFacets[facet] = v
# if extension type is this schema, add extensionType for facets
if eltType and ':' in eltType:
_typePrefix, _sep, _typeName = eltType.rpartition(":")
baseType = cellValue(row, 'baseType')
baseTypePrefix = cellValue(row, 'baseTypePrefix')
if baseType and baseTypePrefix:
_baseType = "{}:{}".format(baseTypePrefix, baseType)
elif baseType:
_baseType = baseType
elif _typeName.endswith("ItemType"):
_baseType = "xbrli:tokenItemType" # should be a column??
else:
_baseType = "xs:token"
if _typePrefix in genDocs:
_typeDoc = genDocs[_typePrefix]
if _typeName not in _typeDoc.extensionTypes:
_typeDoc.extensionTypes[_typeName] = ({"name":_typeName, "base":_baseType},eltFacets)
thisDoc.extensionElements[name] = (eltAttrs, None)
else: # not declarable
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
else:
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
thisDoc = None # deref for debugging
useLabels = True
if depth is not None or hasPresentationParentColumn:
if name is None:
_label = None
for colCell in row:
if colCell.value is not None:
_label = xlValue(colCell)
break
print ("Sheet {} row {} has relationships and no \"name\" field, label: {}".format(importSheetName, iRow+1, _label))
if hasPreLB:
preferredLabel = cellValue(row, 'preferredLabel')
if hasDepthColumn:
entryList = lbDepthList(preLB, depth)
if entryList is not None and isConcept:
if not name or not prefix:
_name = "none"
if depth == topDepth:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
entryList.append( LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
role=preferredLabel) )
elif hasPresentationParentColumn:
preParent = cellValue(row, 'presentationParent', default='') # only one top parent makes sense
if preParent:
preParentPrefix, _sep, preParentName = preParent.rpartition(":")
preParentName = valueNameChars(preParentName)
entryList = lbDepthList(preLB, topDepth)
if entryList is not None:
preRel = (preParentPrefix, preParentName, prefix, name, currentELR or currentELRdefinition)
if preRel not in preRels:
entryList.append( LBentry(prefix=preParentPrefix, name=preParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
preferredLabel=preferredLabel )]) )
preRels.add(preRel)
else:
pass
if hasDefLB and topDepth != 999999:
entryList = lbDepthList(defLB, depth)
if entryList is not None:
if depth == topDepth:
if isConcept:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
if (not preferredLabel or # prevent start/end labels from causing duplicate dim-mem relationships
not any(lbEntry.prefix == prefix and lbEntry.name == name
for lbEntry in entryList)):
# check if entry is a typed dimension
eltAttrs = {}
parentLBentry = lbDepthList(defLB, depth - 1)[-1]
parentName = parentLBentry.name
parentEltAttrs = {}
for doc in genDocs.values():
if name in doc.extensionElements:
eltAttrs = doc.extensionElements.get(name, NULLENTRY)[0]
if parentName in doc.extensionElements:
parentEltAttrs = doc.extensionElements.get(parentName, NULLENTRY)[0]
if (isUSGAAP and # check for typed dimensions
parentEltAttrs.get("substitutionGroup") == "xbrldt:dimensionItem"
and eltAttrs.get("type") != "nonnum:domainItemType"):
# typed dimension, no LBentry
typedDomainRef = "#" + eltAttrs.get("id", "")
parentEltAttrs["{http://xbrl.org/2005/xbrldt}typedDomainRef"] = typedDomainRef
elif isConcept:
# explicit dimension
role = None # default for a default dimension
if "dimensionDefault" in headerCols and cellHasValue(row, 'dimensionDefault', (str,bool)):
v = cellValue(row, 'dimensionDefault', strip=True)
if v:
role = "_dimensionDefault_"
entryList.append( LBentry(prefix=prefix, name=name, arcrole="_dimensions_", role=role) )
if hasCalLB:
calcParents = cellValue(row, 'calculationParent', default='').split()
calcWeights = str(cellValue(row, 'calculationWeight', default='')).split() # may be float or string
if calcParents and calcWeights:
# may be multiple parents split by whitespace
for i, calcParent in enumerate(calcParents):
calcWeight = calcWeights[i] if i < len(calcWeights) else calcWeights[-1]
calcParentPrefix, _sep, calcParentName = calcParent.rpartition(":")
calcParentName = valueNameChars(calcParentName)
entryList = lbDepthList(calLB, topDepth)
if entryList is not None:
calRel = (calcParentPrefix, calcParentName, prefix, name)
if calRel not in calRels:
entryList.append( LBentry(prefix=calcParentPrefix, name=calcParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.summationItem, weight=calcWeight )]) )
calRels.add(calRel)
else:
pass
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
# accumulate extension labels and any reference parts
if useLabels or hasRelationshipToCol:
prefix, name = rowPrefixNameValues(row)
if name is not None and (prefix in genDocs or extensionPrefixForCoreLabels or hasRelationshipToCol):
thisDoc = genDocs.get(extensionPrefixForCoreLabels or prefix) # None for relationshipTo a imported concept
preferredLabel = cellValue(row, 'preferredLabel')
for colItem, iCol in headerCols.items():
if isinstance(colItem, tuple):
colItemType = colItem[0]
role = colItem[1]
lang = part = colItem[2] # lang for label, part for reference
cell = row[iCol]
v = xlValue(cell)
if v is None or (isinstance(v, str) and not v):
values = ()
else:
v = str(v) # may be an int or float instead of str
if colItemType in ("label", "reference", "relationship to"):
values = (v,)
elif colItemType in ("labels", "references"):
values = v.split('\n')
if preferredLabel and "indented" in colItem and not hasPreferredLabelTextColumn: # indented column sets preferredLabel if any
role = preferredLabel
for i, value in enumerate(values):
if colItemType == "relationship to": # doesn't require thisDoc
entryList = lbDepthList(genLB, topDepth)
if entryList is not None:
toName = value
if ":" in toName:
toPrefix, _sep, toName = value.partition(":")
else:
toPrefix = prefix
if hasRelationshipAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
relAttrs = None
for header in headerCols:
if isinstance(header, str) and header.startswith("relationship attribute, "):
attrValue = cellValue(row, header)
if attrValue not in (None, ""):
if relAttrs is None: relAttrs = {}
relAttrs[header[24:]] = attrValue # fix QName later after schemaElt exists
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True, childStruct=
[LBentry(prefix=toPrefix, name=toName, arcrole=role, relAttrs=relAttrs)]) )
elif thisDoc is None:
pass
# following options only apply to linkbases of generated taxonomies
elif colItemType in ("label", "labels"):
if isConcept:
if hasPreferredLabelTextColumn and role == "/preferredLabel":
role = preferredLabel
else:
if role == XbrlConst.standardLabel:
role = XbrlConst.genStandardLabel # must go in generic labels LB
elif role == XbrlConst.documentationLabel:
role = XbrlConst.genDocumentationLabel
else:
continue
thisDoc.extensionLabels[prefix, name, lang, role] = value.strip()
elif hasRefLB and colItemType == "reference":
if isConcept:
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, role].add((part, value.strip()))
elif hasRefLB and colItemType == "references":
if isConcept:
# role ending in # is appended with the value ordinal
if role.endswith("#"):
_role = "{}{:05.0f}".format(role, i)
else:
_role = role
_value = value.strip().replace("\\n", "\n")
if part is None: # part space value
_part, _sep, _value = _value.partition(" ")
else:
_part = part
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, _role].add((_part, _value))
if isConcept and eltEnumRefsParts and thisDoc is not None:
for i, _enumRefParts in enumerate(eltEnumRefsParts):
for (colItemType, role, part), value in _enumRefParts:
if colItemType == "reference":
_role = "{}#{:05.0f}".format(role, i+1)
thisDoc.extensionReferences[prefix, name, _role].add((part, value.strip()))
thisDoc = None # deref for debugging
except Exception as err:
fatalLoadingErrors.append("Excel sheet: {excelSheet}, row: {excelRow}, error: {error}, Traceback: {traceback}"
.format(error=err, excelSheet=importSheetName, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2]))) # uncomment to debug raise
if not headerCols:
if not conceptsWs:
fatalLoadingErrors.append("Neither control worksheet (XBRL DTS tab) nor standard columns found, no DTS imported.")
elif not currentELR:
fatalLoadingErrors.append("Extended link role not found, no DTS imported.")
if fatalLoadingErrors:
raise Exception(",\n ".join(fatalLoadingErrors))
if isUSGAAP and hasDefLB:
# move line items above table
def fixUsggapTableDims(lvl1Struct, level=0):
foundTable = False
emptyLinks = []
foundHeadingItems = []
foundLineItems = []
for lvl1Entry in lvl1Struct:
for lvl2Entry in lvl1Entry.childStruct:
if any(lvl2Entry.name.endswith(suffix) for suffix in ("Table", "_table", "Cube", "_cube")):
for lvl3Entry in lvl2Entry.childStruct:
if any(lvl3Entry.name.endswith(suffix) for suffix in ("LineItems", "_line_items")):
foundLineItems.append((lvl1Entry, lvl2Entry, lvl3Entry))
foundTable = True
break
else:
foundHeadingItems.append((lvl1Entry, lvl2Entry))
if not foundLineItems:
foundNestedTable = fixUsggapTableDims(lvl1Entry.childStruct, level+1)
if level == 0 and not foundNestedTable:
emptyLinks.append(lvl1Entry)
foundTable |= foundNestedTable
del foundHeadingItems[:]
#if foundLineItems or foundHeadingItems:
# print("lvlentry {}\n headingITems {}\n emptyLinks {}\n\n".format(foundLineItems, foundHeadingItems, emptyLinks))
for lvl1Entry, lvl2Entry, lvl3Entry in foundLineItems:
i1 = lvl1Entry.childStruct.index(lvl2Entry)
lvl1Entry.childStruct.insert(i1, lvl3Entry) # must keep lvl1Rel if it is __root__
lvl3Entry.childStruct.insert(0, lvl2Entry)
if any(lvl1Entry.name.endswith(suffix)
for suffix in ("Abstract", "_abstract", "Root", "_root", "_package", "_heading")):
lvl1Entry.childStruct.remove(lvl2Entry)
lvl2Entry.childStruct.remove(lvl3Entry)
for lvl1Entry, lvl2Entry in foundHeadingItems:
lvl1Entry.childStruct.remove(lvl2Entry)
for emptyLink in emptyLinks:
lvl1Struct.remove(emptyLink)
return foundTable
fixUsggapTableDims(defLB)
modelDocuments = []
modelXbrl.blockDpmDBrecursion = True
def generateDoc(thisDoc, parentDoc, visitedDocNames):
if thisDoc.name in visitedDocNames:
modelXbrl.error("loadFromExcel:circularDependency",
"Generation order dependency is circular: %(circularDependency)s",
modelXbrl=modelXbrl, circularDependency=",".join(visitedDocNames) + ", " + thisDoc.name)
return
visitedDocNames.append(thisDoc.name)
if XbrlConst.xsd not in thisDoc.importXmlns.values():
eltName = 'schema xmlns="{}"'.format(XbrlConst.xsd)
else:
for k,v in thisDoc.importXmlns.items():
if v == XbrlConst.xsd:
eltName = "{}:schema".format(k)
break
doc = createModelDocument(
modelXbrl,
Type.SCHEMA,
thisDoc.extensionSchemaFilename,
isEntry=(parentDoc is None),
# initialComment="extracted from OIM {}".format(mappedUri),
documentEncoding="utf-8",
base='', # block pathname from becomming absolute
initialXml='''
<{eltName}
targetNamespace="{targetNamespace}"
attributeFormDefault="unqualified"
elementFormDefault="qualified"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:{extensionPrefix}="{targetNamespace}"
{importXmlns}
xmlns:nonnum="http://www.xbrl.org/dtr/type/non-numeric"
xmlns:link="http://www.xbrl.org/2003/linkbase"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrldt="http://xbrl.org/2005/xbrldt"
{schemaVersion}{xmlLang} />
'''.format(eltName=eltName,
targetNamespace=thisDoc.extensionSchemaNamespaceURI,
extensionPrefix=thisDoc.extensionSchemaPrefix,
importXmlns=''.join('xmlns:{0}="{1}"\n'.format(prefix, namespaceURI)
for prefix, namespaceURI in thisDoc.importXmlns.items()),
schemaVersion='version="{}" '.format(thisDoc.extensionSchemaVersion) if thisDoc.extensionSchemaVersion else '',
xmlLang='\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
),
initialComment=thisDoc.initialComment
)
if parentDoc is None:
modelXbrl.modelDocument = doc
thisDoc.generated = True # prevent recursion
doc.loadedFromExcel = True # signal to save generated taoxnomy in saveToFile below
doc.inDTS = True # entry document always in DTS
doc.targetNamespace = thisDoc.extensionSchemaNamespaceURI # not set until schemaDiscover too late otherwise
schemaElt = doc.xmlRootElement
#foreach linkbase
annotationElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "annotation")
if thisDoc.schemaDocumentation:
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=thisDoc.schemaDocumentation)
appinfoElt = XmlUtil.addChild(annotationElt, XbrlConst.xsd, "appinfo")
# add linkbaseRefs
appinfoElt = XmlUtil.descendant(schemaElt, XbrlConst.xsd, "appinfo")
# don't yet add linkbase refs, want to process imports first to get roleType definitions
# add includes
for filename in thisDoc.includes:
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "include", attributes=( ("schemaLocation", filename), ) )
# add imports
for importPrefix, importAttributes in sorted(thisDoc.imports.items(),
key=lambda item:item[1]):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes)
# is the import an xsd which we have to generate
if importPrefix in genDocs and not genDocs[importPrefix].generated:
generateDoc(genDocs[importPrefix], doc, visitedDocNames) # generate document
# add imports for gen LB if any role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
for importAttributes in ((("namespace", XbrlConst.gen), ("schemaLocation", "http://www.xbrl.org/2008/generic-link.xsd")),
(("namespace", XbrlConst.genLabel), ("schemaLocation", "http://www.xbrl.org/2008/generic-label.xsd"))):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes )
_enumNum = [1] # must be inside an object to be referenced in a nested procedure
def addFacets(thisDoc, restrElt, facets):
if facets:
excludedEnumeration = facets.get("excludedEnumeration")
if ((annotateEnumerationsDocumentation and excludedEnumeration == "X")
or excludedEnumeration == "D"):
# if generateEnumerationsDocumentationOnly annotation must be first child element
for facet, facetValue in facets.items():
if facet == "enumeration":
enumerationsDocumentation = []
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
if len(val):
if val == "(empty)":
val = ""
_label = _label.strip()
enumerationsDocumentation.append("{}: {}".format(val, _label) if _label else val)
XmlUtil.addChild(XmlUtil.addChild(restrElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=
" \n".join(enumerationsDocumentation))
for facet, facetValue in sorted(facets.items(), key=lambda i:facetSortOrder.get(i[0],i[0])):
if facet == "enumeration":
if not annotateEnumerationsDocumentation and not excludedEnumeration:
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
_label = _label.strip()
if len(val):
if val == "(empty)":
val = ""
_attributes = {"value":val}
if _label:
_labelsByLang = None
if _label.startswith("{") and _label.endswith("}"):
try:
# multi-lingual labels are json dict
_labelsByLang = json.loads(_label)
except json.decoder.JSONDecodeError:
_labelsByLang = None
_name = "enum{}".format(_enumNum[0])
_attributes["id"] = thisDoc.extensionSchemaPrefix + "_" + _name
_enumNum[0] += 1
if _labelsByLang: #multilingual
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
else: # non-multi-lingual labels
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, defaultLabelLang, XbrlConst.genStandardLabel] = _label
enumElt = XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes=_attributes)
if thisDoc.hasEnumerationDocumentation and _label:
if _labelsByLang: #multilingual
annotationElt = XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation")
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=_langLabel,
attributes={"{http://www.w3.org/XML/1998/namespace}lang": _lang})
else: # non-multi-lingual labels
XmlUtil.addChild(XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=_label)
elif facet != "excludedEnumeration":
XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes={"value":str(facetValue)})
# add elements
for eltName, eltDef in sorted(thisDoc.extensionElements.items(), key=lambda item: item[0]):
eltAttrs, eltFacets = eltDef
if eltFacets and "type" in eltAttrs:
eltType = eltAttrs["type"]
del eltAttrs["type"]
if any(':' in attrname for attrname in eltAttrs.keys()): # fix up any prefixed attr names to be clark notation
for attrname, attrvalue in eltAttrs.copy().items():
if not attrname.startswith('{') and ':' in attrname:
del eltAttrs[attrname]
eltAttrs[schemaElt.prefixedNameQname(attrname).clarkNotation] = attrvalue
isConcept = eltAttrs.get('substitutionGroup') in (
"xbrli:item", "xbrli:tuple", "xbrldt:hypercubeItem", "xbrldt:dimensionItem")
elt = XmlUtil.addChild(schemaElt,
XbrlConst.xsd, "element",
attributes=eltAttrs)
if annotateElementDocumentation:
for labelRole in (XbrlConst.documentationLabel, XbrlConst.genDocumentationLabel):
labelKey = (thisDoc.extensionSchemaPrefix, eltAttrs["name"], defaultLabelLang, labelRole)
if labelKey in thisDoc.extensionLabels:
XmlUtil.addChild(XmlUtil.addChild(elt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=thisDoc.extensionLabels[labelKey])
break # if std doc label found, don't continue to look for generic doc labe
if elt is not None and eltFacets and isConcept:
cmplxType = XmlUtil.addChild(elt, XbrlConst.xsd, "complexType")
cmplxCont = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
restrElt = XmlUtil.addChild(cmplxCont, XbrlConst.xsd, "restriction", attributes={"base": eltType})
addFacets(thisDoc, restrElt, eltFacets)
del eltType
for roleURI, (roleDefinition, usedOnRoles) in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[1]):
roleElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "roleType",
attributes=(("roleURI", roleURI),
("id", "roleType_" + roleURI.rpartition("/")[2])))
if roleDefinition:
XmlUtil.addChild(roleElt, XbrlConst.link, "definition", text=roleDefinition)
if usedOnRoles:
for usedOnRole in usedOnRoles.split():
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=usedOnRole)
else:
if hasPreLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in preLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:presentationLink")
if hasDefLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in defLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:definitionLink")
if hasCalLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in calLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:calculationLink")
if hasGenLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in genLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=qname("{http://xbrl.org/2008/generic}genlink:link"))
# add role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
# add appinfo generic linkbase for gen labels
genLabLB = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbase")
XmlUtil.addChild(genLabLB, XbrlConst.link, "roleRef",
attributes=(("roleURI", XbrlConst.genStandardLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd#standard-label"),
("{http://www.w3.org/1999/xlink}type", "simple")))
XmlUtil.addChild(genLabLB, XbrlConst.link, "arcroleRef",
attributes=(("arcroleURI", elementLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd#element-label"),
("{http://www.w3.org/1999/xlink}type", "simple")))
linkElt = XmlUtil.addChild(genLabLB, qname("{http://xbrl.org/2008/generic}genlink:link"),
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
for roleURI, _defLabel in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[0]):
if roleURI in thisDoc.extensionRoleLabels:
xlLabel = roleURI.rpartition("/")[2]
XmlUtil.addChild(linkElt, XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", "#roleType_" + xlLabel),
("{http://www.w3.org/1999/xlink}label", "loc_" + xlLabel)))
XmlUtil.addChild(linkElt, XbrlConst.qnGenArc,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel),
("{http://www.w3.org/1999/xlink}from", "loc_" + xlLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + xlLabel)))
for (text, lang) in thisDoc.extensionRoleLabels[roleURI]:
XmlUtil.addChild(linkElt, qname("{http://xbrl.org/2008/label}genlabel:label"),
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + xlLabel),
("{http://www.w3.org/1999/xlink}role", XbrlConst.genStandardLabel),
("{http://www.w3.org/XML/1998/namespace}lang", lang)),
text=text)
def addLinkbaseRef(lbType, lbFilename, lbDoc):
role = "http://www.xbrl.org/2003/role/{0}LinkbaseRef".format(lbType)
lbRefElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbaseRef",
attributes=(("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(lbFilename, thisDoc.extensionSchemaRelDirname)),
("{http://www.w3.org/1999/xlink}arcrole", "http://www.w3.org/1999/xlink/properties/linkbase"),
# generic label ref has no role
) + (() if lbType.startswith("generic") else
(("{http://www.w3.org/1999/xlink}role", role),))
)
if lbDoc: # provided for generated linbase refs
doc.referencesDocument[lbDoc] = ModelDocumentReference("href", lbRefElt)
# add referenced (not generated) linkbases
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
if not generate:
# if linkbase is generated by another doc which isn't generated yet, generate it
for otherGenDoc in genDocs.values():
if not otherGenDoc.generated and any(
_otherLbRefType == lbRefType and _otherFilename == filename and _otherGenerate
for _otherLbRefType, _otherFilename, _otherGenerate in otherGenDoc.linkbaseRefs):
generateDoc(otherGenDoc, doc, visitedDocNames) # generate document
addLinkbaseRef(lbRefType, filename, None)
doc.schemaDiscover(schemaElt, False, thisDoc.extensionSchemaNamespaceURI)
# add types after include and import are discovered
# block creating any type which was previously provided by an include of the same namespace
for typeName, typeDef in sorted(thisDoc.extensionTypes.items(), key=lambda item: item[0]):
if qname(thisDoc.extensionSchemaNamespaceURI, typeName) in modelXbrl.qnameTypes:
continue # type already exists, don't duplicate
typeAttrs, typeFacets = typeDef
if typeName.endswith("ItemType") or typeAttrs.get("base", "").endswith("ItemType"):
cmplxType = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "complexType", attributes={"name": typeAttrs["name"]})
contElt = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
else:
contElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "simpleType", attributes={"name": typeAttrs["name"]})
restrElt = XmlUtil.addChild(contElt, XbrlConst.xsd, "restriction", attributes={"base": typeAttrs["base"]})
# remove duplicitous facets already in base type
baseQn = qname(schemaElt, typeAttrs.get("base"))
if typeFacets:
if baseQn and baseQn.namespaceURI not in (XbrlConst.xsd, XbrlConst.xbrli) and baseQn in modelXbrl.qnameTypes:
# remove duplicated facets of underlying type
baseTypeFacets = modelXbrl.qnameTypes[baseQn].facets or () # allow iteration if None
typeFacets = dict((facet, value)
for facet, value in typeFacets.items()
if facet not in baseTypeFacets or str(baseTypeFacets[facet]) != value)
addFacets(thisDoc, restrElt, typeFacets)
# find extension label roles, reference roles and parts
extLabelRoles = {}
extReferenceRoles = {}
extReferenceParts = {}
extReferenceSchemaDocs = {}
extUnrecognizedRoles = set()
relationshipArcroles = {}
relationshipArcqnames = {}
def setExtRefPart(partLocalName):
if partLocalName not in extReferenceParts:
for partConcept in modelXbrl.nameConcepts.get(partLocalName, ()):
if partConcept is not None and partConcept.subGroupHeadQname == qnLinkPart:
extReferenceParts[partLocalName] = partConcept.qname
extReferenceSchemaDocs[partConcept.qname.namespaceURI] = (
partConcept.modelDocument.uri if partConcept.modelDocument.uri.startswith("http://") else
partConcept.modelDocument.basename)
break
for _headerColKey in headerColsAllElrs:
if isinstance(_headerColKey, tuple) and len(_headerColKey) >= 3 and not _headerColKey[1].startswith("http://"):
_resourceType = _headerColKey[0]
_resourceRole = _headerColKey[1]
_resourceLangOrPart = _headerColKey[2]
elif isinstance(_headerColKey, str) and "!reference" in _headerColKey:
m = resourceParsePattern.match(_headerColKey.partition("!")[2])
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2)
_resourceLangOrPart = m.group(4)
else:
continue
_resourceQName, _standardRoles = {
"label": (qnLinkLabel, standardLabelRoles),
"labels": (qnLinkLabel, standardLabelRoles),
"reference": (qnLinkReference, standardReferenceRoles),
"references": (qnLinkReference, standardReferenceRoles)
}.get(_resourceType, (None,()))
_resourceRoleURI = None
# find resource role
for _roleURI in _standardRoles:
if _roleURI.endswith(_resourceRole):
_resourceRoleURI = _roleURI
_resourceRoleMatchPart = _resourceRole
break
if _resourceRoleURI is None: # try custom roles
_resourceRoleMatchPart = _resourceRole.partition("#")[0] # remove # part
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_resourceRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if _resourceQName in _roleType.usedOns:
_resourceRoleURI = _roleURI
break
if _resourceType in ("label", "labels"):
if _resourceRoleURI:
extLabelRoles[_resourceRoleMatchPart] = _resourceRoleURI
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionLabels.keys()):
modelXbrl.error("loadFromExcel:labelResourceRole",
"Label resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType in ("reference", "references"):
if _resourceRoleURI:
extReferenceRoles[_resourceRoleMatchPart] = _resourceRoleURI
# find part QName
setExtRefPart(_resourceLangOrPart)
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionReferences.keys()):
modelXbrl.error("loadFromExcel:referenceResourceRole",
"Reference resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType == "relationship to":
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(_resourceRoleMatchPart):
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
for _resourceQName in _arcroleType.usedOns:
break
break
if _resourceQName is None:
modelXbrl.error("loadFromExcel:relationshipArcrole",
"Relationship arcrole not found: %(arcrole)s",
modelXbrl=modelXbrl, arcrole=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
else:
relationshipArcroles[_resourceRoleMatchPart] = _arcroleURI
relationshipArcqnames[_arcroleURI] = _resourceQName
# label linkbase
for lbType, lang, filename in thisDoc.labelLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
langPattern = re.compile(lang or ".*")
_isGeneric = lbType.startswith("generic")
if _isGeneric and "http://xbrl.org/2008/label" not in modelXbrl.namespaceDocs:
# must pre-load generic linkbases in order to create properly typed elements (before discovery because we're creating elements by lxml)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-link.xsd", isDiscovered=True)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-label.xsd", isDiscovered=True)
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genlabel="http://xbrl.org/2008/label"
""" if _isGeneric else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/label http://www.xbrl.org/2008/generic-label.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#element-label" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
gen if _isGeneric else link,
"link" if _isGeneric else "labelLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
for labelKey, text in thisDoc.extensionLabels.items():
prefix, name, labelLang, role = labelKey
labelLang = labelLang or defaultLabelLang
role = role.partition("#")[0] # remove # part
role = extLabelRoles.get(role, role) # get custom role, if any
if langPattern.match(labelLang) and _isGeneric == (role in (XbrlConst.genStandardLabel, XbrlConst.genDocumentationLabel)):
locLabel = prefix + "_" + name
if locLabel not in locs:
locs.add(locLabel)
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
gen if _isGeneric else link,
"arc" if _isGeneric else "labelArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel if _isGeneric else conceptLabel),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", 1.0)))
XmlUtil.addChild(linkElt,
XbrlConst.genLabel if _isGeneric else XbrlConst.link,
"label",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)) + (
(("{http://www.w3.org/XML/1998/namespace}lang", labelLang),)
if True or lang != saveXmlLang else ()),
text=text)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedLabelRole",
"Label roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
# reference linkbase
for lbType, referenceRole, filename in thisDoc.referenceLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
_isGeneric = lbType.startswith("generic")
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genreference="http://xbrl.org/2008/rerference"
""" if _isGeneric else "",
"".join([" {} {}".format(_ns, _uri) for _ns, _uri in extReferenceSchemaDocs.items()]),
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/reference http://www.xbrl.org/2008/generic-reference.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<roleRef roleURI="http://www.xbrl.org/2008/role/label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#standard-label" xlink:type="simple"/>
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-reference" xlink:href="http://xbrl.org/2008/generic-reference.xsd#element-reference" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
XbrlConst.gen if _isGeneric else XbrlConst.link,
"link" if _isGeneric else "referenceLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
undefinedReferenceParts = set()
for referenceKey, references in thisDoc.extensionReferences.items():
prefix, name, role = referenceKey
role = role.partition("#")[0] # remove # part
role = extReferenceRoles.get(role, role) # get custom role, if any
if fnmatch(role, referenceRole):
locLabel = prefix + "_" + name
# must use separate arcs with order to force Altova to display parts in order
if locLabel not in locs:
locs.add(locLabel)
order = 1
else:
for order in range(2,1000):
_locLabel = "{}_{}".format(locLabel, order)
if _locLabel not in locs:
locLabel = _locLabel
locs.add(locLabel)
break
if order > 999:
print("resource order de-duplicate failure, too many reference parts")
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
XbrlConst.link, "referenceArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", conceptReference),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", order)))
referenceResource = XmlUtil.addChild(linkElt,
XbrlConst.genReference if _isGeneric else XbrlConst.link,
"reference",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)))
for part, text in references: # list to preserve desired order
setExtRefPart(part)
if part in extReferenceParts:
partQn = extReferenceParts.get(part, part) # get part QName if any
XmlUtil.addChild(referenceResource, partQn, text=text)
else:
undefinedReferenceParts.add(part)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
for part in sorted(undefinedReferenceParts):
print("reference part not defined: {}".format(part))
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedReferenceRole",
"Reference roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
prefixedNamespaces = modelXbrl.prefixedNamespaces
def hrefConcept(prefix, name):
qn = qname(prefixedNamespaces[prefix], name)
if qn in modelXbrl.qnameConcepts:
return modelXbrl.qnameConcepts[qn]
elif name in modelXbrl.nameConcepts: # prefix may be null or ambiguous to multiple documents, try concept local name
return modelXbrl.nameConcepts[name][0]
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipElementPrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return None
modelXbrl.error("loadFromExcel:undefinedRelationshipElement",
"QName not defined: %(prefix)s:%(localName)s",
modelXbrl=modelXbrl, prefix=prefix, localName=name)
return None
def prefixedNameQName(prefixedName):
if ":" not in prefixedName:
return prefixedName
prefix, _sep, name = prefixedName.rpartition(":")
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipAttributePrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return prefixedName
return QName(prefix, prefixedNamespaces[prefix], name)
def lbTreeWalk(lbType, parentElt, lbStruct, roleRefs, dimDef=False, locs=None, arcsFromTo=None, fromPrefix=None, fromName=None):
order = 1.0
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct: # skip empty ELRs
continue
role = "unspecified"
if lbEntry.role and lbEntry.role.startswith("http://"): # have a role specified
role = lbEntry.role
elif lbEntry.name: #may be a definition
for linkroleUri, modelRoleTypes in modelXbrl.roleTypes.items():
definition = modelRoleTypes[0].definition
if lbEntry.name == definition and linkroleUri in thisDoc.extensionRoles:
role = linkroleUri
break
if role == "unspecified":
# don't generate for roles not for this schema
continue
#
#modelXbrl.error("loadFromExcel:linkRoleDefinition",
# "Link role has no definition: %(role)s",
# modelXbrl=modelXbrl, role=lbEntry.name, filename=thisDoc.extensionSchemaNamespaceURI)
if role not in thisDoc.extensionRoles:
# don't generate for roles not for this schema
continue
if role == XbrlConst.defaultLinkRole:
pass
elif role in thisDoc.extensionRoles:
roleRefs.add(("roleRef", role, doc.uri + "#roleType_" + role.rpartition("/")[2]))
elif role in modelXbrl.roleTypes: # add roleRef
roleType = modelRoleTypes[0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(role)
linkElt = XmlUtil.addChild(parentElt,
XbrlConst.gen if lbType == "generic" else XbrlConst.link,
"link" if lbType == "generic" else lbType + "Link",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", role)))
locs = set()
arcsFromTo = set()
lbTreeWalk(lbType, linkElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo)
else:
toPrefix = lbEntry.prefix
toName = lbEntry.name
toHref = LBHref(thisDoc, toPrefix, toName)
if toHref is None:
modelXbrl.error("loadFromExcel:invalidQName",
"%(linkbase)s relationship element with prefix '%(prefix)s' localName '%(localName)s' not found",
modelXbrl=modelXbrl, linkbase=lbType, prefix=lbEntry.prefix, localName=lbEntry.name)
continue
if not toPrefix and toName in modelXbrl.nameConcepts:
toPrefix = modelXbrl.nameConcepts[toName][0].qname.prefix
toLabel = "{}_{}".format(toPrefix, toName)
toLabelAlt = None
if not lbEntry.isRoot:
if not fromPrefix and fromName in modelXbrl.nameConcepts:
fromPrefix = modelXbrl.nameConcepts[fromName][0].qname.prefix
fromLabel = "{}_{}".format(fromPrefix, fromName)
if (fromLabel, toLabel) in arcsFromTo:
# need extra loc to prevent arc from/to duplication in ELR
for i in range(1, 1000):
toLabelAlt = "{}_{}".format(toLabel, i)
if (fromLabel, toLabelAlt) not in arcsFromTo:
toLabel = toLabelAlt
break
if (toHref not in locs or toLabelAlt) and not dimDef:
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if not lbEntry.isRoot:
arcsFromTo.add( (fromLabel, toLabel) )
if lbType == "calculation" and lbEntry.weight is not None:
otherAttrs = ( ("weight", lbEntry.weight), )
elif lbType == "presentation" and lbEntry.role:
if not lbEntry.role.startswith("http://"):
# check if any defined labels for this role
_labelRoleMatchPart = "/" + lbEntry.role
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_labelRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if XbrlConst.qnLinkLabel in _roleType.usedOns:
lbEntry.role = _roleURI
break
if not lbEntry.role.startswith("http://"):
# default to built in label roles
lbEntry.role = "http://www.xbrl.org/2003/role/" + lbEntry.role
otherAttrs = ( ("preferredLabel", lbEntry.role), )
if lbEntry.role and lbEntry.role not in XbrlConst.standardLabelRoles:
if lbEntry.role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[lbEntry.role][0]
roleRefs.add(("roleRef", lbEntry.role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(lbEntry.role)
elif lbType == "generic" and lbEntry.arcrole:
if not lbEntry.arcrole.startswith("http://"):
# check if any defined labels for this role
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(lbEntry.arcrole):
lbEntry.arcrole = _arcroleURI
break
otherAttrs = tuple( (prefixedNameQName(_key), _value) # may need to process qname in key into clark name
for _key, _value in (lbEntry.relAttrs.items() if lbEntry.relAttrs is not None else ()))
else:
otherAttrs = ( )
if lbEntry.arcrole == "_dimensions_": # pick proper consecutive arcrole
fromConcept = hrefConcept(fromPrefix, fromName)
toConcept = hrefConcept(toPrefix, toName)
if dimDef: # special case for default dimension
if lbEntry.role != "_dimensionDefault_" and not lbTreeHasDimDefault(lbEntry.childStruct):
continue # forget subtree, no default
if toConcept is not None and (toConcept.isDimensionItem or lbEntry.role == "_dimensionDefault_"):
if (toHref not in locs or toLabelAlt):
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if lbEntry.role != "_dimensionDefault_":
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
else:
XmlUtil.addChild(parentElt, XbrlConst.link, "definitionArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", XbrlConst.dimensionDefault),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
else:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, fromPrefix, fromName)
continue
elif toConcept is not None and toConcept.isHypercubeItem:
arcrole = XbrlConst.all
otherAttrs += ( (XbrlConst.qnXbrldtContextElement, "segment"),
(qnXbrldtClosed, "true") )
elif toConcept is not None and toConcept.isDimensionItem:
arcrole = XbrlConst.hypercubeDimension
elif fromConcept is not None and fromConcept.isDimensionItem:
arcrole = XbrlConst.dimensionDomain
else:
arcrole = XbrlConst.domainMember
else:
arcrole = lbEntry.arcrole
if arcrole in relationshipArcqnames:
arcqname = relationshipArcqnames[arcrole]
arcNS = arcqname.namespaceURI
arcLocalname = arcqname.localName
elif lbType == "generic":
arcNS = XbrlConst.gen
arcLocalname = "arc"
else:
arcNS = XbrlConst.link
arcLocalname = lbType + "Arc"
XmlUtil.addChild(parentElt,
arcNS, arcLocalname,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", arcrole),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
if lbType != "calculation" or lbEntry.isRoot:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
def lbTreeHasDimDefault(lbStruct):
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct:
continue
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
else:
if not lbEntry.isRoot and (lbEntry.arcrole == "_dimensions_" and lbEntry.role == "_dimensionDefault_"):
return True
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
return False
for hasLB, lbType, lbLB in ((hasPreLB and thisDoc.hasPreLB, "presentation", preLB),
(hasDefLB and thisDoc.hasDefLB, "definition", defLB),
(hasCalLB and thisDoc.hasCalLB, "calculation", calLB),
(hasGenLB and thisDoc.hasGenLB, "generic", genLB)):
if hasLB:
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
thisDoc.thisLBdir = posixpath.dirname(filename)
if generate and lbType == lbRefType:
# output presentation linkbase
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base='', initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
/>
""".format("""
xmlns:generic="http://xbrl.org/2008/generic"
""" if lbType == "generic" else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
""" if lbType == "generic" else ""
),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
addLinkbaseRef(lbRefType, filename, lbDoc)
lbElt = lbDoc.xmlRootElement
roleRefs = set()
if lbType == "definition":
roleRefs.update((("arcroleRef", XbrlConst.all, "http://www.xbrl.org/2005/xbrldt-2005.xsd#all"),
("arcroleRef", XbrlConst.dimensionDefault, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-default"),
("arcroleRef", XbrlConst.dimensionDomain, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-domain"),
("arcroleRef", XbrlConst.domainMember, "http://www.xbrl.org/2005/xbrldt-2005.xsd#domain-member"),
("arcroleRef", XbrlConst.hypercubeDimension, "http://www.xbrl.org/2005/xbrldt-2005.xsd#hypercube-dimension")))
elif lbType == "generic":
for _arcroleURI in relationshipArcroles.values():
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
roleRefs.add(("arcroleRef", _arcroleURI, _arcroleType.modelDocument.uri + "#" + _arcroleType.id))
break
lbTreeWalk(lbType, lbElt, lbLB, roleRefs)
if lbType == "definition" and lbTreeHasDimDefault(lbLB):
lbTreeWalk(lbType, lbElt, lbLB, roleRefs, dimDef=True) # second tree walk for any dimension-defaults
firstLinkElt = None
for firstLinkElt in lbElt.iterchildren():
break
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(href, thisDoc.thisLBdir))),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
break
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedRole",
"%(lbType)s linkbase roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, lbType=lbType, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
visitedDocNames.pop()
def LBHref(thisDoc, prefix, name):
if not prefix and name in modelXbrl.nameConcepts:
_concept = modelXbrl.nameConcepts[name][0]
filename = _concept.modelDocument.uri
prefix = _concept.qname.prefix
elif prefix == thisDoc.extensionSchemaPrefix:
filename = thisDoc.extensionSchemaFilename
elif prefix in thisDoc.importFilenames:
filename = thisDoc.importFilenames[prefix]
elif prefix in genDocs:
doc = genDocs[prefix]
if not doc.generated:
# try to load recursively
generateDoc(doc, thisDoc)
if doc.generated:
filename = doc.extensionSchemaFilename
else:
return None
elif name in modelXbrl.nameConcepts:
filename = None
for _concept in modelXbrl.nameConcepts[name]:
if prefix == _concept.qname.prefix:
filename = _concept.modelDocument.uri
break
if not filename:
return None
else:
return None
return "{0}#{1}_{2}".format(docRelpath(filename, thisDoc.thisLBdir), prefix, name)
for thisDoc in genOrder:
if not thisDoc.generated:
generateDoc(thisDoc, None, [])
#cntlr.addToLog("Completed in {0:.2} secs".format(time.time() - startedAt),
# messageCode="loadFromExcel:info")
if priorCWD:
os.chdir(priorCWD) # restore prior current working directory
return modelXbrl.modelDocument
def isExcelPath(filepath):
return os.path.splitext(filepath)[1] in (".xlsx", ".xls", ".xlsm")
def isExcelLoadable(modelXbrl, mappedUri, normalizedUri, filepath, **kwargs):
return isExcelPath(filepath)
def excelLoaderFilingStart(cntlr, options, filesource, entrypointFiles, *args, **kwargs):
global excludeDesignatedEnumerations, annotateEnumerationsDocumentation, annotateElementDocumentation, saveXmlLang
excludeDesignatedEnumerations = options.ensure_value("excludeDesignatedEnumerations", False)
annotateEnumerationsDocumentation = options.ensure_value("annotateEnumerationsDocumentation", False)
annotateElementDocumentation = options.ensure_value("annotateElementDocumentation", False)
saveXmlLang = options.ensure_value("saveLang", None)
def excelLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
if not isExcelLoadable(modelXbrl, mappedUri, None, filepath):
return None # not an OIM file
cntlr = modelXbrl.modelManager.cntlr
cntlr.showStatus(_("Loading Excel file: {0}").format(os.path.basename(filepath)))
doc = loadFromExcel(cntlr, modelXbrl, filepath, mappedUri)
if doc is None:
return None # not an OIM file
modelXbrl.loadedFromExcel = True
return doc
def saveDts(cntlr, modelXbrl, outputDtsDir):
from arelle import ModelDocument
import shutil
excelFileDir = os.path.dirname(modelXbrl.fileSource.url)
def saveToFile(url):
if os.path.isabs(url):
return url
filepath = os.path.join(outputDtsDir, url)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
return filepath
# save generated schema and their linkbases
for doc in modelXbrl.urlDocs.values():
if getattr(doc, "loadedFromExcel", False):
doc.save(saveToFile(doc.uri), updateFileHistory=False)
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(doc.uri)))
for refDoc in doc.referencesDocument.keys():
if refDoc.inDTS:
if refDoc.type == ModelDocument.Type.LINKBASE:
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(refDoc.uri)))
refDoc.save(saveToFile(refDoc.uri), updateFileHistory=False)
elif not (UrlUtil.isAbsolute(doc.uri) or os.path.isabs(doc.uri) or outputDtsDir == excelFileDir):
srcfile = os.path.join(excelFileDir, doc.uri)
destfile = saveToFile(doc.uri)
if os.path.exists(srcfile):
if not os.path.exists(destfile):
shutil.copyfile(srcfile, destfile)
else:
modelXbrl.error("loadFromExcel:missingReference",
"Missing source file to copy to output DTS directory: %(missingFile)s",
modelXbrl=modelXbrl, missingFile=doc.uri)
def guiXbrlLoaded(cntlr, modelXbrl, attach, *args, **kwargs):
if cntlr.hasGui and getattr(modelXbrl, "loadedFromExcel", False):
from tkinter.filedialog import askdirectory
outputDtsDir = askdirectory(parent=cntlr.parent,
initialdir=cntlr.config.setdefault("outputDtsDir","."),
title='Please select a directory for output DTS Contents')
cntlr.config["outputDtsDir"] = outputDtsDir
cntlr.saveConfig()
if outputDtsDir:
saveDts(cntlr, modelXbrl, outputDtsDir)
cntlr.showStatus(_("Excel loading completed"), 3500)
def cmdLineXbrlLoaded(cntlr, options, modelXbrl, *args, **kwargs):
if options.saveExcelDTSdirectory and getattr(modelXbrl, "loadedFromExcel", False):
saveDts(cntlr, modelXbrl, options.saveExcelDTSdirectory)
def excelLoaderOptionExtender(parser, *args, **kwargs):
parser.add_option("--save-Excel-DTS-directory",
action="store",
dest="saveExcelDTSdirectory",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--exclude-designated-enumerations",
action="store_true",
dest="excludeDesignatedEnumerations",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-enumerations-documentation",
action="store_true",
dest="annotateEnumerationsDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-element-documentation",
action="store_true",
dest="annotateElementDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--save-lang",
action="store",
dest="saveLang",
help=_("Save an xml:lang on top level elements (schema, linkbase)."))
class LBentry:
__slots__ = ("prefix", "name", "arcrole", "role", "childStruct", "preferredLabel", "relAttrs")
def __init__(self, prefix=None, name=None, arcrole=None, role=None, weight=None,
isELR=False, isRoot=False, childStruct=None, preferredLabel=None, relAttrs=None):
if childStruct is not None:
self.childStruct = childStruct
else:
self.childStruct = []
self.prefix = prefix
self.name = name
if isELR:
self.arcrole = "_ELR_"
elif isRoot:
self.arcrole = "_root_"
else:
self.arcrole = arcrole
if weight is not None: # summationItem
self.role = weight
else:
self.role = role # resource role, or "default" if conept is a default dimension
self.preferredLabel = preferredLabel
self.relAttrs = relAttrs
@property
def isELR(self):
return self.arcrole == "_ELR_"
@property
def isRoot(self):
return self.arcrole == "_root_"
@property
def weight(self):
if self.arcrole == summationItem:
return self.role
return None
def __repr__(self):
return "LBentry(prefix={},name={})".format(self.prefix,self.name)
__pluginInfo__ = {
'name': 'Load From Excel',
'version': '1.02',
'description': "This plug-in loads XBRL from Excel and saves the resulting XBRL DTS.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2013-2017 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'ModelDocument.IsPullLoadable': isExcelLoadable,
'ModelDocument.PullLoader': excelLoader,
'CntlrWinMain.Xbrl.Loaded': guiXbrlLoaded,
'CntlrCmdLine.Filing.Start': excelLoaderFilingStart,
'CntlrCmdLine.Options': excelLoaderOptionExtender,
'CntlrCmdLine.Xbrl.Loaded': cmdLineXbrlLoaded
}
| 61.984917 | 187 | 0.503301 |
import os, io, sys, time, re, traceback, json, posixpath
from fnmatch import fnmatch
from collections import defaultdict, OrderedDict
from arelle import PythonUtil, XbrlConst, ModelDocument, UrlUtil
from arelle.PythonUtil import OrderedDefaultDict, OrderedSet
from arelle.ModelDocument import Type, create as createModelDocument
from arelle.ModelValue import qname, QName
from arelle.XbrlConst import (qnLinkLabel, standardLabelRoles, qnLinkReference, standardReferenceRoles,
qnLinkPart, gen, link, defaultLinkRole,
conceptLabel, elementLabel, conceptReference, summationItem
)
qnXbrldtClosed = qname("{http://xbrl.org/2005/xbrldt}xbrldt:closed")
importColHeaderMap = defaultdict(list)
resourceParsePattern = re.compile(r"(label[s]?|reference[s]?|relationship to),?\s*([\w][\w\s#+-:/]+[\w#+-/])(\s*[(]([^)]+)[)])?$")
roleNumberPattern = re.compile(r"(.*)[#]([0-9][0-9A-Za-z]*)")
xlUnicodePattern = re.compile("_x([0-9A-F]{4})_")
excludeDesignatedEnumerations = False
annotateEnumerationsDocumentation = False
annotateElementDocumentation = False
saveXmlLang = None
NULLENTRY = ({},)
facetSortOrder = {
"fractionDigits" : "_00",
"length": "_01",
"minInclusive": "_02",
"maxInclusive": "_03",
"minExclusive": "_04",
"maxExclusive": "_05",
"minLength": "_06",
"maxLength": "_07",
"pattern": "_08",
"totalDigits": "_09",
"whiteSpace": "_10",
"enumeration": "_11"}
def loadFromExcel(cntlr, modelXbrl, excelFile, mappedUri):
from openpyxl import load_workbook
from arelle import ModelDocument, ModelXbrl, XmlUtil
from arelle.ModelDocument import ModelDocumentReference
from arelle.ModelValue import qname
def xlUnicodeChar(match):
return chr(int(match.group(1), 16))
def xlValue(cell):
v = cell.value
if isinstance(v, str):
return xlUnicodePattern.sub(xlUnicodeChar, v).replace('\r\n','\n').replace('\r','\n')
return v
defaultLabelLang = saveXmlLang or "en"
importColumnHeaders = {
"名前空間プレフィックス": "prefix",
"prefix": "prefix",
"要素名": "name",
"name": "name",
"type": "type",
"typePrefix": "typePrefix",
"substitutionGroup": "substitutionGroup",
"periodType": "periodType",
"balance": "balance",
"abstract": "abstract",
"nillable": "nillable",
"depth": "depth",
"minLength": "minLength",
"maxLength": "maxLength",
"minInclusive": "minInclusive",
"maxInclusive": "maxInclusive",
"length": "length",
"fixed": "fixed",
"pattern": "pattern",
"enumeration": "enumeration",
"excludedEnumeration": "excludedEnumeration",
"preferred label": "preferredLabel",
"preferredLabel": "preferredLabel",
"presentation parent": "presentationParent",
"calculation parent": "calculationParent",
"calculation weight": "calculationWeight",
"標準ラベル(日本語)": ("label", XbrlConst.standardLabel, "ja", "indented"),
"冗長ラベル(日本語)": ("label", XbrlConst.verboseLabel, "ja"),
"標準ラベル(英語)": ("label", XbrlConst.standardLabel, "en"),
"冗長ラベル(英語)": ("label", XbrlConst.verboseLabel, "en"),
"用途区分、財務諸表区分及び業種区分のラベル(日本語)": ("labels", XbrlConst.standardLabel, "ja"),
"用途区分、財務諸表区分及び業種区分のラベル(英語)": ("labels", XbrlConst.standardLabel, "en"),
"label": ("label", XbrlConst.standardLabel, defaultLabelLang, "indented"),
"label, standard": ("label", XbrlConst.standardLabel, defaultLabelLang, "overridePreferred"),
"label, terse": ("label", XbrlConst.terseLabel, defaultLabelLang),
"label, verbose": ("label", XbrlConst.verboseLabel, defaultLabelLang),
"label, documentation": ("label", XbrlConst.documentationLabel, defaultLabelLang),
"group": "linkrole",
"linkrole": "linkrole",
"ELR": "linkrole",
"dimension default": "dimensionDefault"
# attribute, qname (attribute on element in xsd)
}
fatalLoadingErrors = []
startedAt = time.time()
if os.path.isabs(excelFile):
# allow relative filenames to loading directory
priorCWD = os.getcwd()
os.chdir(os.path.dirname(excelFile))
else:
priorCWD = None
importExcelBook = load_workbook(excelFile, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
dtsSheet = None
if "XBRL DTS" in sheetNames:
dtsSheet = "XBRL DTS"
elif "DTS" in sheetNames:
dtsSheet = "DTS"
elif "Sheet2" in sheetNames:
dtsSheet = "Sheet2"
if dtsSheet:
dtsWs = importExcelBook[dtsSheet]
else:
dtsWs = None
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )} # xml of imports
importXmlns = {}
hasPreLB = hasCalLB = hasDefLB = hasRefLB = hasGenLB = False
# xxxLB structure [ (elr1, def1, "_ELR_", [roots]), (elr2, def2, "_ELR_", [rootw]) ...]
# roots = (rootHref, None, "_root_", [children])
# children = (childPrefix, childName, arcrole, [grandChildren])
preLB = []
defLB = []
calLB = []
refLB = []
genLB = []
def lbDepthList(lbStruct, depth, parentList=None):
if len(lbStruct) > 0:
if depth == topDepth or not hasDepthColumn:
return lbStruct[-1].childStruct
return lbDepthList(lbStruct[-1].childStruct, depth-1, list)
else:
if hasDepthColumn:
cntlr.addToLog("Depth error, Excel sheet: {excelSheet} row: {excelRow}"
.format(excelSheet=importSheetName, excelRow=iRow),
messageCode="importExcel:depth")
return None
splitString = None # to split repeating groups (order, depth)
importFileName = None # for alternate import file
importSheetNames = []
skipRows = [] # [(from,to),(from,to)] row number starting at 1
genDocs = {} # generated documents (schema + referenced linkbases)
genElementsDoc = None
def newDoc(name):
genDocs[name] = PythonUtil.attrdict(
name = name,
initialComment = None,
schemaDocumentation = None,
extensionSchemaPrefix = "",
extensionSchemaFilename = "",
extensionSchemaRelDirname = None, # only non-null for relative directory path
extensionSchemaNamespaceURI = "",
extensionSchemaVersion = None, # <schema @version>
extensionRoles = {}, # key is roleURI, value is role definition
extensionRoleLabels= defaultdict(set), # key is roleURI, value is set( (lang, label) )
extensionElements = {},
extensionTypes = {}, # attrs are name, base. has facets in separate dict same as elements
extensionLabels = {}, # key = (prefix, name, lang, role), value = label text
extensionReferences = OrderedDefaultDict(OrderedSet), # key = (prefix, name, role) values = (partQn, text)
hasEnumerationDocumentation = False,
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )}, # xml of imports
includes = [], # just schemaLocation
importXmlns = {},
importFilenames = {}, # file names relative to base
childGenDocs = [],
linkbaseRefs = [],
labelLinkbases = [],
referenceLinkbases = [],
hasPreLB = False,
hasCalLB = False,
hasDefLB = False,
hasRefLB = False,
hasGenLB = False,
generated = False
)
return genDocs[name]
thisDoc = newDoc(None)
excelDir = os.path.dirname(excelFile) + os.path.sep
def docRelpath(filename, baseDir=None):
if baseDir is None:
baseDir = thisDoc.extensionSchemaRelDirname
if (baseDir is not None and
not (UrlUtil.isAbsolute(filename) or os.path.isabs(filename))):
return posixpath.relpath(filename, baseDir)
return filename
isUSGAAP = False
isGenerateAndImport = True
extensionPrefixForCoreLabels = None
dtsActionColIndex = 0
dtsFiletypeColIndex = 1
dtsPrefixColIndex = 2
dtsFilenameColIndex = 3
dtsNamespaceURIColIndex = 4
for iRow, row in enumerate(dtsWs.rows if dtsWs else ()):
try:
if (len(row) < 1): # skip if col 1 is non-existent
continue
_col0 = row[0].value
if isinstance(_col0, str) and _col0.startswith(" continue
if iRow == 0:
# title row may have columns differently laid out
for i, col in enumerate(row):
v = xlValue(col)
if isinstance(v, str):
if v == "specification": dtsActionColIndex = i
if v.startswith("file type"): dtsFiletypeColIndex = i
if v.startswith("prefix"): dtsPrefixColIndex = i
if v.startswith("file, href or role definition"): dtsFilenameColIndex = i
if v.startswith("namespace URI"): dtsNamespaceURIColIndex = i
continue
action = filetype = prefix = filename = namespaceURI = None
if len(row) > dtsActionColIndex: action = xlValue(row[dtsActionColIndex])
if len(row) > dtsFiletypeColIndex: filetype = xlValue(row[dtsFiletypeColIndex])
if len(row) > dtsPrefixColIndex: prefix = xlValue(row[dtsPrefixColIndex])
if len(row) > dtsFilenameColIndex: filename = xlValue(row[dtsFilenameColIndex])
if len(row) > dtsNamespaceURIColIndex: namespaceURI = xlValue(row[dtsNamespaceURIColIndex])
lbType = lang = None
if action == "import":
if filetype in ("role", "arcrole"):
continue
elif filetype == "schema":
thisDoc.imports[prefix] = ( ("namespace", namespaceURI), ("schemaLocation", docRelpath(filename)) )
thisDoc.importXmlns[prefix] = namespaceURI
thisDoc.importFilenames[prefix] = filename
if re.match(r"http://[^/]+/us-gaap/", namespaceURI):
isUSGAAP = True
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
thisDoc.linkbaseRefs.append( (lbType, filename, False) )
elif action == "include" and filename:
thisDoc.includes.append(docRelpath(filename))
elif action == "xmlns" and prefix and namespaceURI:
thisDoc.importXmlns[prefix] = namespaceURI
elif action in ("extension", "generate"):
if filetype == "schema":
if prefix:
# starts new document.
if not thisDoc.name:
del genDocs[thisDoc.name] # remove anonymous doc
thisDoc = newDoc(prefix) # new doc with prefix as its name
thisDoc.extensionSchemaPrefix = prefix
thisDoc.extensionSchemaFilename = filename
thisDoc.extensionSchemaNamespaceURI = namespaceURI
if not UrlUtil.isAbsolute(filename) and not os.path.isabs(filename):
thisDoc.extensionSchemaRelDirname = posixpath.dirname(filename)
else:
thisDoc.extensionSchemaRelDirname = None
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
if len(typeLang) > 1:
lang = referenceRole = typeLang[1]
else:
lang = None
referenceRole = XbrlConst.standardReference
if lbType in ("label", "generic-label"):
# lang, if provided, is a regex pattern
thisDoc.labelLinkbases.append((lbType, lang, filename))
if action == "extension" and not extensionPrefixForCoreLabels:
extensionPrefixForCoreLabels = thisDoc.extensionSchemaPrefix
elif lbType in ("reference", "generic-reference"):
hasRefLB = True
thisDoc.referenceLinkbases.append((lbType, referenceRole, filename))
elif lbType == "presentation":
thisDoc.hasPreLB = hasPreLB = True
elif lbType == "definition":
thisDoc.hasDefLB = hasDefLB = True
elif lbType == "calculation":
thisDoc.hasCalLB = hasCalLB = True
elif lbType == "generic":
thisDoc.hasGenLB = hasGenLB = True
thisDoc.linkbaseRefs.append( (lbType, filename, True) )
elif filetype == "initialComment" and prefix:
thisDoc.initialComment = prefix
elif filetype == "schemaDocumentation" and prefix:
thisDoc.schemaDocumentation = prefix
elif filetype == "enumerationDocumentation":
thisDoc.hasEnumerationDocumentation = True
elif filetype == "role" and namespaceURI: # filename is definition, prefix is optional used-on QNames
thisDoc.extensionRoles[namespaceURI] = (filename, prefix)
elif filetype == "role label" and namespaceURI and prefix: # filename is label, prefix is language
thisDoc.extensionRoleLabels[namespaceURI].add( (filename, prefix) )
elif filetype == "schema-version" and filename:
thisDoc.extensionSchemaVersion = filename
elif filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif filetype == "elements":
genElementsDoc = thisDoc
elif action == "meta" and filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif action == "meta" and filetype == "generate-style" and filename == "import-separately":
isGenerateAndImport = False
elif action == "workbook" and filename:
importFileName = filename
elif action == "worksheet" and filename:
importSheetNames.append(filename)
elif action == "colheader" and filename and namespaceURI:
if namespaceURI == "split":
splitString = filename
else:
importColHeaderMap[filename].append(namespaceURI)
if namespaceURI not in importColumnHeaders:
fatalLoadingErrors.append("colheader {} definition {} not recognized.".format(filename, namespaceURI))
elif action == "skip rows" and filename:
fromRow, _sep, toRow = filename.partition("-")
try:
skipRows.append((int(fromRow), int(toRow) if toRow else int(fromRow)))
except (ValueError, TypeError):
fatalLoadingErrors.append("Exception (at skip rows): {error}, Excel sheet: {excelSheet} row: {excelRow}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow))
except Exception as err:
fatalLoadingErrors.append("Exception: {error}, Excel sheet: {excelSheet} row: {excelRow}, Traceback: {traceback}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2])))
# remove any imported linkbaseRefs that are also generated
for thisDoc in genDocs.values():
linkbaseRefsToRemove = [i
for i, (lbType, filename, generate) in enumerate(thisDoc.linkbaseRefs)
if not generate and (lbType, filename, True) in thisDoc.linkbaseRefs]
while len(linkbaseRefsToRemove):
i = linkbaseRefsToRemove.pop()
thisDoc.linkbaseRefs.pop(i)
dtsWs = None # dereference
genOrder = []
for name, doc in genDocs.items():
insertPos = len(genOrder)
for i, otherDoc in enumerate(genOrder):
if doc.name in otherDoc.imports:
insertPos = i # put this doc before any firstr doc that imports it
break
genOrder.insert(insertPos, doc)
if importFileName: # alternative workbook
importExcelBook = load_workbook(importFileName, read_only=True, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
if importSheetNames:
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
else:
for s in sheetNames:
if s.endswith("Concepts"):
importSheetNames.append(s)
if not importSheetNames:
for s in sheetNames:
if "xbrl" in s.lower() and "dts" not in s:
importSheetNames.append(s)
if not importSheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
if not isUSGAAP and genOrder: # need extra namespace declaration
genOrder[0].importXmlns["iod"] = "http://disclosure.edinet-fsa.go.jp/taxonomy/common/2013-03-31/iod"
# find column headers row
headerCols = OrderedDict()
headerColsAllElrs = set()
hasLinkroleSeparateRow = True
hasPreferredLabelTextColumn = False
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
headerRows = set()
topDepth = 999999
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
continue
headerCols.clear()
headerRows.clear()
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
conceptsWs = importExcelBook[importSheetName]
def setHeaderCols(row):
headerCols.clear()
for iCol, colCell in enumerate(row):
v = xlValue(colCell)
if isinstance(v,str):
v = v.strip()
if v in importColHeaderMap:
for hdr in importColHeaderMap[v]:
if hdr in importColumnHeaders:
headerCols[importColumnHeaders[hdr]] = iCol
elif v in importColumnHeaders:
headerCols[importColumnHeaders[v]] = iCol
elif isinstance(v,str):
if any(v.startswith(r) for r in ("label,", "labels,", "reference,", "references,", "relationship to,")):
# custom/extension label/reference
m = resourceParsePattern.match(v)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
headerCols[(_resourceType, _resourceRole, _resourceLangOrPart)] = iCol
else:
# custom/extension non-label/reference value column
headerCols[v] = iCol
# find out which rows are header rows
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
#for iCol, colCell in enumerate(row):
setHeaderCols(row)
# must have some of these to be a header col
if (sum(1 for h in headerCols if h in ("name", "type", "depth", "periodType")) >= 3 or
sum(1 for h in headerCols if h == "name" or (isinstance(h, tuple) and h[0] == "relationship to")) >= 2):
# it's a header col
headerRows.add(iRow+1)
if 'linkrole' in headerCols:
hasLinkroleSeparateRow = False
if 'preferredLabel' in headerCols and any(isinstance(h, tuple) and h[0] == 'label' and h[1] == '/preferredLabel'
for h in headerCols):
hasPreferredLabelTextColumn = True
if 'depth' in headerCols:
hasDepthColumn = True
if 'presentationParent' in headerCols:
hasPresentationParentColumn = True
if not hasDepthColumn and hasPresentationParentColumn:
topDepth = 0
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
headerCols.clear()
def cellHasValue(row, header, _type):
if header in headerCols:
iCol = headerCols[header]
return iCol < len(row) and isinstance(row[iCol].value, _type)
return False
def cellValue(row, header, strip=False, nameChars=False, default=None):
if header in headerCols:
iCol = headerCols[header]
if iCol < len(row):
v = xlValue(row[iCol])
if strip and isinstance(v, str):
v = v.strip()
if nameChars and isinstance(v, str):
v = ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
if v is None:
return default
return v
return default
def valueNameChars(v):
return ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
def rowPrefixNameValues(row):
prefix = cellValue(row, 'prefix', nameChars=True)
if cellHasValue(row, 'name', str):
if not prefix: # maybe name is a qname
prefix, _sep, _name = cellValue(row, 'name').partition(":")
if not _sep: # no prefix at all, whole string is name
prefix = ""
name = cellValue(row, 'name', nameChars=True)[len(prefix):]
else:
name = cellValue(row, 'name', nameChars=True)
else:
name = None
if not prefix and "prefix" not in headerCols and genElementsDoc is not None:
prefix = genElementsDoc.extensionSchemaPrefix
return prefix, name
def checkImport(thisDoc, qname):
prefix, sep, localName = qname.partition(":")
if sep:
if prefix not in thisDoc.imports:
if prefix == "xbrldt":
thisDoc.imports["xbrldt"] = ("namespace", XbrlConst.xbrldt), ("schemaLocation", "http://www.xbrl.org/2005/xbrldt-2005.xsd")
elif prefix == "nonnum":
thisDoc.imports["nonnum"] = ("namespace", "http://www.xbrl.org/dtr/type/non-numeric"), ("schemaLocation", "http://www.xbrl.org/dtr/type/nonNumeric-2009-12-16.xsd")
elif prefix != thisDoc.extensionSchemaPrefix and prefix != "xs":
cntlr.addToLog("Warning: prefix schema file is not imported for: {qname}"
.format(qname=qname),
messageCode="importExcel:warning", file=thisDoc.extensionSchemaFilename)
# find top depth
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if (iRow + 1) in headerRows:
setHeaderCols(row)
hasConceptAttributeColumn = any(v.startswith("attribute, ") for v in headerCols if isinstance(v,str))
hasRelationshipAttributeColumn = any(v.startswith("relationship attribute, ") for v in headerCols if isinstance(v,str))
elif not (hasLinkroleSeparateRow and (iRow + 1) in headerRows) and 'depth' in headerCols:
depth = cellValue(row, 'depth')
if isinstance(depth, int) and depth < topDepth:
topDepth = depth
# find header rows
currentELR = currentELRdefinition = None
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
useLabels = False
eltEnumRefsParts = None
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
if (all(col.value is None for col in row) or
all(isinstance(row[i].value, str) and row[i].value.strip() == "n/a"
for i in (headerCols.get("name"), headerCols.get("type"), headerCols.get("value"))
if i is not None)):
continue # skip blank row
try:
isHeaderRow = (iRow + 1) in headerRows
isELRrow = hasLinkroleSeparateRow and (iRow + 2) in headerRows
if isHeaderRow:
setHeaderCols(row)
headerColsAllElrs |= _DICT_SET(headerCols.keys()) # accumulate all header cols for role checks
elif isELRrow:
currentELR = currentELRdefinition = None
for colCell in row:
v = str(xlValue(colCell) or '')
if v.startswith("http://"):
currentELR = v
elif not currentELRdefinition and v.endswith(" 科目一覧"):
currentELRdefinition = v[0:-5]
elif not currentELRdefinition:
currentELRdefinition = v
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasPresentationParentColumn:
preRels = set()
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
elif headerCols:
if "linkrole" in headerCols and cellHasValue(row, 'linkrole', str):
v = cellValue(row, 'linkrole', strip=True)
_trialELR = _trialELRdefinition = None
if v.startswith("http://"):
_trialELR = v
elif v.endswith(" 科目一覧"):
_trialELRdefinition = v[0:-5]
else:
_trialELRdefinition = v
if (_trialELR and _trialELR != currentELR) or (_trialELRdefinition and _trialELRdefinition != currentELRdefinition):
currentELR = _trialELR
currentELRdefinition = _trialELRdefinition
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
prefix, name = rowPrefixNameValues(row)
if cellHasValue(row, 'depth', int):
depth = cellValue(row, 'depth')
elif hasDepthColumn:
depth = None # non-ELR section, no depth
else: # depth provided by parent reference
depth = 0
subsGrp = cellValue(row, 'substitutionGroup')
isConcept = subsGrp in ("xbrli:item", "xbrli:tuple",
"xbrldt:hypercubeItem", "xbrldt:dimensionItem")
if (prefix in genDocs) and name not in genDocs[prefix].extensionElements and name:
thisDoc = genDocs[prefix]
# elements row
eltType = cellValue(row, 'type')
eltTypePrefix = cellValue(row, 'typePrefix')
if not eltType:
eltType = 'xbrli:stringItemType'
elif eltTypePrefix and ':' not in eltType:
eltType = eltTypePrefix + ':' + eltType
elif ':' not in eltType and eltType.endswith("ItemType"):
eltType = 'xbrli:' + eltType
abstract = cellValue(row, 'abstract')
nillable = cellValue(row, 'nillable')
balance = cellValue(row, 'balance')
periodType = cellValue(row, 'periodType')
eltAttrs = {"name": name, "id": (prefix or "") + "_" + name}
if eltType:
eltAttrs["type"] = eltType
checkImport(thisDoc, eltType)
if subsGrp:
eltAttrs["substitutionGroup"] = subsGrp
checkImport(thisDoc, subsGrp)
if abstract or subsGrp in ("xbrldt:hypercubeItem", "xbrldt:dimensionItem"):
eltAttrs["abstract"] = abstract or "true"
if nillable:
eltAttrs["nillable"] = nillable
if balance:
eltAttrs["{http://www.xbrl.org/2003/instance}balance"] = balance
if periodType:
eltAttrs["{http://www.xbrl.org/2003/instance}periodType"] = periodType
if hasConceptAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
for header in headerCols:
if isinstance(header, str) and header.startswith("attribute, "):
value = cellValue(row, header)
if value not in (None, ""):
eltAttrs[header[11:]] = value # fix QName later after schemaElt exists
eltFacets = None
eltEnumRefParts = None
if eltType not in ("nonnum:domainItemType", "xbrli:booleanItemType", "xbrli:positiveIntegerItemType", "xbrli:dateItemType",
"xbrli:gYearItemType"):
for facet in ("minLength", "maxLength", "minInclusive", "maxInclusive",
"length", "fixed", "pattern", "enumeration", "excludedEnumeration"):
v = cellValue(row, facet)
if v is not None:
if facet == "enumeration" and v.startswith("See tab "): # check for local or tab-contained enumeration
_match = re.match(r"See tab ([^!]+)([!]([0-9]+):([0-9]+))?", v)
if _match:
_tab, _dummy, _rowFrom, _rowTo = _match.groups()
if _tab in sheetNames:
enumWs = importExcelBook[_tab]
if _rowFrom and _rowTo:
# take cols named "enumeration" and "reference parts"
colHdrs = [enumWs.cell(row=1,column=i).value for i in range(1,enumWs.max_column+1)]
eltEnumValues = []
eltEnumRefsParts = []
for i in range(int(_rowFrom), int(_rowTo)+1):
_parts = []
eltEnumRefsParts.append(_parts)
for j, h in enumerate(colHdrs):
c = enumWs.cell(row=i,column=j+1).value
if c is not None:
if h == "enumeration":
eltEnumValues.append(str(c))
else:
m = resourceParsePattern.match(h)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
_parts.append(((_resourceType, _resourceRole, _resourceLangOrPart), c))
v = "\n".join(eltEnumValues) if eltEnumValues else None
else: # cols 1 and 2 are enum and labels
v = "\n".join(" = ".join(xlValue(col) for col in row if xlValue(col))
for i, row in enumerate(enumWs.rows)
if i > 0) # skip heading row
if v is not None:
if eltFacets is None: eltFacets = {}
eltFacets[facet] = v
# if extension type is this schema, add extensionType for facets
if eltType and ':' in eltType:
_typePrefix, _sep, _typeName = eltType.rpartition(":")
baseType = cellValue(row, 'baseType')
baseTypePrefix = cellValue(row, 'baseTypePrefix')
if baseType and baseTypePrefix:
_baseType = "{}:{}".format(baseTypePrefix, baseType)
elif baseType:
_baseType = baseType
elif _typeName.endswith("ItemType"):
_baseType = "xbrli:tokenItemType" # should be a column??
else:
_baseType = "xs:token"
if _typePrefix in genDocs:
_typeDoc = genDocs[_typePrefix]
if _typeName not in _typeDoc.extensionTypes:
_typeDoc.extensionTypes[_typeName] = ({"name":_typeName, "base":_baseType},eltFacets)
thisDoc.extensionElements[name] = (eltAttrs, None)
else: # not declarable
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
else:
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
thisDoc = None # deref for debugging
useLabels = True
if depth is not None or hasPresentationParentColumn:
if name is None:
_label = None
for colCell in row:
if colCell.value is not None:
_label = xlValue(colCell)
break
print ("Sheet {} row {} has relationships and no \"name\" field, label: {}".format(importSheetName, iRow+1, _label))
if hasPreLB:
preferredLabel = cellValue(row, 'preferredLabel')
if hasDepthColumn:
entryList = lbDepthList(preLB, depth)
if entryList is not None and isConcept:
if not name or not prefix:
_name = "none"
if depth == topDepth:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
entryList.append( LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
role=preferredLabel) )
elif hasPresentationParentColumn:
preParent = cellValue(row, 'presentationParent', default='') # only one top parent makes sense
if preParent:
preParentPrefix, _sep, preParentName = preParent.rpartition(":")
preParentName = valueNameChars(preParentName)
entryList = lbDepthList(preLB, topDepth)
if entryList is not None:
preRel = (preParentPrefix, preParentName, prefix, name, currentELR or currentELRdefinition)
if preRel not in preRels:
entryList.append( LBentry(prefix=preParentPrefix, name=preParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
preferredLabel=preferredLabel )]) )
preRels.add(preRel)
else:
pass
if hasDefLB and topDepth != 999999:
entryList = lbDepthList(defLB, depth)
if entryList is not None:
if depth == topDepth:
if isConcept:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
if (not preferredLabel or # prevent start/end labels from causing duplicate dim-mem relationships
not any(lbEntry.prefix == prefix and lbEntry.name == name
for lbEntry in entryList)):
# check if entry is a typed dimension
eltAttrs = {}
parentLBentry = lbDepthList(defLB, depth - 1)[-1]
parentName = parentLBentry.name
parentEltAttrs = {}
for doc in genDocs.values():
if name in doc.extensionElements:
eltAttrs = doc.extensionElements.get(name, NULLENTRY)[0]
if parentName in doc.extensionElements:
parentEltAttrs = doc.extensionElements.get(parentName, NULLENTRY)[0]
if (isUSGAAP and # check for typed dimensions
parentEltAttrs.get("substitutionGroup") == "xbrldt:dimensionItem"
and eltAttrs.get("type") != "nonnum:domainItemType"):
# typed dimension, no LBentry
typedDomainRef = "#" + eltAttrs.get("id", "")
parentEltAttrs["{http://xbrl.org/2005/xbrldt}typedDomainRef"] = typedDomainRef
elif isConcept:
# explicit dimension
role = None # default for a default dimension
if "dimensionDefault" in headerCols and cellHasValue(row, 'dimensionDefault', (str,bool)):
v = cellValue(row, 'dimensionDefault', strip=True)
if v:
role = "_dimensionDefault_"
entryList.append( LBentry(prefix=prefix, name=name, arcrole="_dimensions_", role=role) )
if hasCalLB:
calcParents = cellValue(row, 'calculationParent', default='').split()
calcWeights = str(cellValue(row, 'calculationWeight', default='')).split() # may be float or string
if calcParents and calcWeights:
# may be multiple parents split by whitespace
for i, calcParent in enumerate(calcParents):
calcWeight = calcWeights[i] if i < len(calcWeights) else calcWeights[-1]
calcParentPrefix, _sep, calcParentName = calcParent.rpartition(":")
calcParentName = valueNameChars(calcParentName)
entryList = lbDepthList(calLB, topDepth)
if entryList is not None:
calRel = (calcParentPrefix, calcParentName, prefix, name)
if calRel not in calRels:
entryList.append( LBentry(prefix=calcParentPrefix, name=calcParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.summationItem, weight=calcWeight )]) )
calRels.add(calRel)
else:
pass
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
# accumulate extension labels and any reference parts
if useLabels or hasRelationshipToCol:
prefix, name = rowPrefixNameValues(row)
if name is not None and (prefix in genDocs or extensionPrefixForCoreLabels or hasRelationshipToCol):
thisDoc = genDocs.get(extensionPrefixForCoreLabels or prefix) # None for relationshipTo a imported concept
preferredLabel = cellValue(row, 'preferredLabel')
for colItem, iCol in headerCols.items():
if isinstance(colItem, tuple):
colItemType = colItem[0]
role = colItem[1]
lang = part = colItem[2] # lang for label, part for reference
cell = row[iCol]
v = xlValue(cell)
if v is None or (isinstance(v, str) and not v):
values = ()
else:
v = str(v) # may be an int or float instead of str
if colItemType in ("label", "reference", "relationship to"):
values = (v,)
elif colItemType in ("labels", "references"):
values = v.split('\n')
if preferredLabel and "indented" in colItem and not hasPreferredLabelTextColumn: # indented column sets preferredLabel if any
role = preferredLabel
for i, value in enumerate(values):
if colItemType == "relationship to": # doesn't require thisDoc
entryList = lbDepthList(genLB, topDepth)
if entryList is not None:
toName = value
if ":" in toName:
toPrefix, _sep, toName = value.partition(":")
else:
toPrefix = prefix
if hasRelationshipAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
relAttrs = None
for header in headerCols:
if isinstance(header, str) and header.startswith("relationship attribute, "):
attrValue = cellValue(row, header)
if attrValue not in (None, ""):
if relAttrs is None: relAttrs = {}
relAttrs[header[24:]] = attrValue # fix QName later after schemaElt exists
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True, childStruct=
[LBentry(prefix=toPrefix, name=toName, arcrole=role, relAttrs=relAttrs)]) )
elif thisDoc is None:
pass
# following options only apply to linkbases of generated taxonomies
elif colItemType in ("label", "labels"):
if isConcept:
if hasPreferredLabelTextColumn and role == "/preferredLabel":
role = preferredLabel
else:
if role == XbrlConst.standardLabel:
role = XbrlConst.genStandardLabel # must go in generic labels LB
elif role == XbrlConst.documentationLabel:
role = XbrlConst.genDocumentationLabel
else:
continue
thisDoc.extensionLabels[prefix, name, lang, role] = value.strip()
elif hasRefLB and colItemType == "reference":
if isConcept:
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, role].add((part, value.strip()))
elif hasRefLB and colItemType == "references":
if isConcept:
# role ending in # is appended with the value ordinal
if role.endswith("
_role = "{}{:05.0f}".format(role, i)
else:
_role = role
_value = value.strip().replace("\\n", "\n")
if part is None: # part space value
_part, _sep, _value = _value.partition(" ")
else:
_part = part
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, _role].add((_part, _value))
if isConcept and eltEnumRefsParts and thisDoc is not None:
for i, _enumRefParts in enumerate(eltEnumRefsParts):
for (colItemType, role, part), value in _enumRefParts:
if colItemType == "reference":
_role = "{}
thisDoc.extensionReferences[prefix, name, _role].add((part, value.strip()))
thisDoc = None # deref for debugging
except Exception as err:
fatalLoadingErrors.append("Excel sheet: {excelSheet}, row: {excelRow}, error: {error}, Traceback: {traceback}"
.format(error=err, excelSheet=importSheetName, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2]))) # uncomment to debug raise
if not headerCols:
if not conceptsWs:
fatalLoadingErrors.append("Neither control worksheet (XBRL DTS tab) nor standard columns found, no DTS imported.")
elif not currentELR:
fatalLoadingErrors.append("Extended link role not found, no DTS imported.")
if fatalLoadingErrors:
raise Exception(",\n ".join(fatalLoadingErrors))
if isUSGAAP and hasDefLB:
# move line items above table
def fixUsggapTableDims(lvl1Struct, level=0):
foundTable = False
emptyLinks = []
foundHeadingItems = []
foundLineItems = []
for lvl1Entry in lvl1Struct:
for lvl2Entry in lvl1Entry.childStruct:
if any(lvl2Entry.name.endswith(suffix) for suffix in ("Table", "_table", "Cube", "_cube")):
for lvl3Entry in lvl2Entry.childStruct:
if any(lvl3Entry.name.endswith(suffix) for suffix in ("LineItems", "_line_items")):
foundLineItems.append((lvl1Entry, lvl2Entry, lvl3Entry))
foundTable = True
break
else:
foundHeadingItems.append((lvl1Entry, lvl2Entry))
if not foundLineItems:
foundNestedTable = fixUsggapTableDims(lvl1Entry.childStruct, level+1)
if level == 0 and not foundNestedTable:
emptyLinks.append(lvl1Entry)
foundTable |= foundNestedTable
del foundHeadingItems[:]
#if foundLineItems or foundHeadingItems:
# print("lvlentry {}\n headingITems {}\n emptyLinks {}\n\n".format(foundLineItems, foundHeadingItems, emptyLinks))
for lvl1Entry, lvl2Entry, lvl3Entry in foundLineItems:
i1 = lvl1Entry.childStruct.index(lvl2Entry)
lvl1Entry.childStruct.insert(i1, lvl3Entry) # must keep lvl1Rel if it is __root__
lvl3Entry.childStruct.insert(0, lvl2Entry)
if any(lvl1Entry.name.endswith(suffix)
for suffix in ("Abstract", "_abstract", "Root", "_root", "_package", "_heading")):
lvl1Entry.childStruct.remove(lvl2Entry)
lvl2Entry.childStruct.remove(lvl3Entry)
for lvl1Entry, lvl2Entry in foundHeadingItems:
lvl1Entry.childStruct.remove(lvl2Entry)
for emptyLink in emptyLinks:
lvl1Struct.remove(emptyLink)
return foundTable
fixUsggapTableDims(defLB)
modelDocuments = []
modelXbrl.blockDpmDBrecursion = True
def generateDoc(thisDoc, parentDoc, visitedDocNames):
if thisDoc.name in visitedDocNames:
modelXbrl.error("loadFromExcel:circularDependency",
"Generation order dependency is circular: %(circularDependency)s",
modelXbrl=modelXbrl, circularDependency=",".join(visitedDocNames) + ", " + thisDoc.name)
return
visitedDocNames.append(thisDoc.name)
if XbrlConst.xsd not in thisDoc.importXmlns.values():
eltName = 'schema xmlns="{}"'.format(XbrlConst.xsd)
else:
for k,v in thisDoc.importXmlns.items():
if v == XbrlConst.xsd:
eltName = "{}:schema".format(k)
break
doc = createModelDocument(
modelXbrl,
Type.SCHEMA,
thisDoc.extensionSchemaFilename,
isEntry=(parentDoc is None),
# initialComment="extracted from OIM {}".format(mappedUri),
documentEncoding="utf-8",
base='', # block pathname from becomming absolute
initialXml='''
<{eltName}
targetNamespace="{targetNamespace}"
attributeFormDefault="unqualified"
elementFormDefault="qualified"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:{extensionPrefix}="{targetNamespace}"
{importXmlns}
xmlns:nonnum="http://www.xbrl.org/dtr/type/non-numeric"
xmlns:link="http://www.xbrl.org/2003/linkbase"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrldt="http://xbrl.org/2005/xbrldt"
{schemaVersion}{xmlLang} />
'''.format(eltName=eltName,
targetNamespace=thisDoc.extensionSchemaNamespaceURI,
extensionPrefix=thisDoc.extensionSchemaPrefix,
importXmlns=''.join('xmlns:{0}="{1}"\n'.format(prefix, namespaceURI)
for prefix, namespaceURI in thisDoc.importXmlns.items()),
schemaVersion='version="{}" '.format(thisDoc.extensionSchemaVersion) if thisDoc.extensionSchemaVersion else '',
xmlLang='\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
),
initialComment=thisDoc.initialComment
)
if parentDoc is None:
modelXbrl.modelDocument = doc
thisDoc.generated = True # prevent recursion
doc.loadedFromExcel = True # signal to save generated taoxnomy in saveToFile below
doc.inDTS = True # entry document always in DTS
doc.targetNamespace = thisDoc.extensionSchemaNamespaceURI # not set until schemaDiscover too late otherwise
schemaElt = doc.xmlRootElement
#foreach linkbase
annotationElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "annotation")
if thisDoc.schemaDocumentation:
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=thisDoc.schemaDocumentation)
appinfoElt = XmlUtil.addChild(annotationElt, XbrlConst.xsd, "appinfo")
# add linkbaseRefs
appinfoElt = XmlUtil.descendant(schemaElt, XbrlConst.xsd, "appinfo")
# don't yet add linkbase refs, want to process imports first to get roleType definitions
# add includes
for filename in thisDoc.includes:
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "include", attributes=( ("schemaLocation", filename), ) )
# add imports
for importPrefix, importAttributes in sorted(thisDoc.imports.items(),
key=lambda item:item[1]):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes)
# is the import an xsd which we have to generate
if importPrefix in genDocs and not genDocs[importPrefix].generated:
generateDoc(genDocs[importPrefix], doc, visitedDocNames) # generate document
# add imports for gen LB if any role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
for importAttributes in ((("namespace", XbrlConst.gen), ("schemaLocation", "http://www.xbrl.org/2008/generic-link.xsd")),
(("namespace", XbrlConst.genLabel), ("schemaLocation", "http://www.xbrl.org/2008/generic-label.xsd"))):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes )
_enumNum = [1] # must be inside an object to be referenced in a nested procedure
def addFacets(thisDoc, restrElt, facets):
if facets:
excludedEnumeration = facets.get("excludedEnumeration")
if ((annotateEnumerationsDocumentation and excludedEnumeration == "X")
or excludedEnumeration == "D"):
# if generateEnumerationsDocumentationOnly annotation must be first child element
for facet, facetValue in facets.items():
if facet == "enumeration":
enumerationsDocumentation = []
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
if len(val):
if val == "(empty)":
val = ""
_label = _label.strip()
enumerationsDocumentation.append("{}: {}".format(val, _label) if _label else val)
XmlUtil.addChild(XmlUtil.addChild(restrElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=
" \n".join(enumerationsDocumentation))
for facet, facetValue in sorted(facets.items(), key=lambda i:facetSortOrder.get(i[0],i[0])):
if facet == "enumeration":
if not annotateEnumerationsDocumentation and not excludedEnumeration:
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
_label = _label.strip()
if len(val):
if val == "(empty)":
val = ""
_attributes = {"value":val}
if _label:
_labelsByLang = None
if _label.startswith("{") and _label.endswith("}"):
try:
# multi-lingual labels are json dict
_labelsByLang = json.loads(_label)
except json.decoder.JSONDecodeError:
_labelsByLang = None
_name = "enum{}".format(_enumNum[0])
_attributes["id"] = thisDoc.extensionSchemaPrefix + "_" + _name
_enumNum[0] += 1
if _labelsByLang: #multilingual
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
else: # non-multi-lingual labels
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, defaultLabelLang, XbrlConst.genStandardLabel] = _label
enumElt = XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes=_attributes)
if thisDoc.hasEnumerationDocumentation and _label:
if _labelsByLang: #multilingual
annotationElt = XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation")
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=_langLabel,
attributes={"{http://www.w3.org/XML/1998/namespace}lang": _lang})
else: # non-multi-lingual labels
XmlUtil.addChild(XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=_label)
elif facet != "excludedEnumeration":
XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes={"value":str(facetValue)})
# add elements
for eltName, eltDef in sorted(thisDoc.extensionElements.items(), key=lambda item: item[0]):
eltAttrs, eltFacets = eltDef
if eltFacets and "type" in eltAttrs:
eltType = eltAttrs["type"]
del eltAttrs["type"]
if any(':' in attrname for attrname in eltAttrs.keys()): # fix up any prefixed attr names to be clark notation
for attrname, attrvalue in eltAttrs.copy().items():
if not attrname.startswith('{') and ':' in attrname:
del eltAttrs[attrname]
eltAttrs[schemaElt.prefixedNameQname(attrname).clarkNotation] = attrvalue
isConcept = eltAttrs.get('substitutionGroup') in (
"xbrli:item", "xbrli:tuple", "xbrldt:hypercubeItem", "xbrldt:dimensionItem")
elt = XmlUtil.addChild(schemaElt,
XbrlConst.xsd, "element",
attributes=eltAttrs)
if annotateElementDocumentation:
for labelRole in (XbrlConst.documentationLabel, XbrlConst.genDocumentationLabel):
labelKey = (thisDoc.extensionSchemaPrefix, eltAttrs["name"], defaultLabelLang, labelRole)
if labelKey in thisDoc.extensionLabels:
XmlUtil.addChild(XmlUtil.addChild(elt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=thisDoc.extensionLabels[labelKey])
break # if std doc label found, don't continue to look for generic doc labe
if elt is not None and eltFacets and isConcept:
cmplxType = XmlUtil.addChild(elt, XbrlConst.xsd, "complexType")
cmplxCont = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
restrElt = XmlUtil.addChild(cmplxCont, XbrlConst.xsd, "restriction", attributes={"base": eltType})
addFacets(thisDoc, restrElt, eltFacets)
del eltType
for roleURI, (roleDefinition, usedOnRoles) in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[1]):
roleElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "roleType",
attributes=(("roleURI", roleURI),
("id", "roleType_" + roleURI.rpartition("/")[2])))
if roleDefinition:
XmlUtil.addChild(roleElt, XbrlConst.link, "definition", text=roleDefinition)
if usedOnRoles:
for usedOnRole in usedOnRoles.split():
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=usedOnRole)
else:
if hasPreLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in preLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:presentationLink")
if hasDefLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in defLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:definitionLink")
if hasCalLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in calLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:calculationLink")
if hasGenLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in genLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=qname("{http://xbrl.org/2008/generic}genlink:link"))
# add role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
# add appinfo generic linkbase for gen labels
genLabLB = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbase")
XmlUtil.addChild(genLabLB, XbrlConst.link, "roleRef",
attributes=(("roleURI", XbrlConst.genStandardLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd
("{http://www.w3.org/1999/xlink}type", "simple")))
XmlUtil.addChild(genLabLB, XbrlConst.link, "arcroleRef",
attributes=(("arcroleURI", elementLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd
("{http://www.w3.org/1999/xlink}type", "simple")))
linkElt = XmlUtil.addChild(genLabLB, qname("{http://xbrl.org/2008/generic}genlink:link"),
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
for roleURI, _defLabel in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[0]):
if roleURI in thisDoc.extensionRoleLabels:
xlLabel = roleURI.rpartition("/")[2]
XmlUtil.addChild(linkElt, XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", "
("{http://www.w3.org/1999/xlink}label", "loc_" + xlLabel)))
XmlUtil.addChild(linkElt, XbrlConst.qnGenArc,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel),
("{http://www.w3.org/1999/xlink}from", "loc_" + xlLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + xlLabel)))
for (text, lang) in thisDoc.extensionRoleLabels[roleURI]:
XmlUtil.addChild(linkElt, qname("{http://xbrl.org/2008/label}genlabel:label"),
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + xlLabel),
("{http://www.w3.org/1999/xlink}role", XbrlConst.genStandardLabel),
("{http://www.w3.org/XML/1998/namespace}lang", lang)),
text=text)
def addLinkbaseRef(lbType, lbFilename, lbDoc):
role = "http://www.xbrl.org/2003/role/{0}LinkbaseRef".format(lbType)
lbRefElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbaseRef",
attributes=(("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(lbFilename, thisDoc.extensionSchemaRelDirname)),
("{http://www.w3.org/1999/xlink}arcrole", "http://www.w3.org/1999/xlink/properties/linkbase"),
# generic label ref has no role
) + (() if lbType.startswith("generic") else
(("{http://www.w3.org/1999/xlink}role", role),))
)
if lbDoc: # provided for generated linbase refs
doc.referencesDocument[lbDoc] = ModelDocumentReference("href", lbRefElt)
# add referenced (not generated) linkbases
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
if not generate:
# if linkbase is generated by another doc which isn't generated yet, generate it
for otherGenDoc in genDocs.values():
if not otherGenDoc.generated and any(
_otherLbRefType == lbRefType and _otherFilename == filename and _otherGenerate
for _otherLbRefType, _otherFilename, _otherGenerate in otherGenDoc.linkbaseRefs):
generateDoc(otherGenDoc, doc, visitedDocNames) # generate document
addLinkbaseRef(lbRefType, filename, None)
doc.schemaDiscover(schemaElt, False, thisDoc.extensionSchemaNamespaceURI)
# add types after include and import are discovered
# block creating any type which was previously provided by an include of the same namespace
for typeName, typeDef in sorted(thisDoc.extensionTypes.items(), key=lambda item: item[0]):
if qname(thisDoc.extensionSchemaNamespaceURI, typeName) in modelXbrl.qnameTypes:
continue # type already exists, don't duplicate
typeAttrs, typeFacets = typeDef
if typeName.endswith("ItemType") or typeAttrs.get("base", "").endswith("ItemType"):
cmplxType = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "complexType", attributes={"name": typeAttrs["name"]})
contElt = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
else:
contElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "simpleType", attributes={"name": typeAttrs["name"]})
restrElt = XmlUtil.addChild(contElt, XbrlConst.xsd, "restriction", attributes={"base": typeAttrs["base"]})
# remove duplicitous facets already in base type
baseQn = qname(schemaElt, typeAttrs.get("base"))
if typeFacets:
if baseQn and baseQn.namespaceURI not in (XbrlConst.xsd, XbrlConst.xbrli) and baseQn in modelXbrl.qnameTypes:
# remove duplicated facets of underlying type
baseTypeFacets = modelXbrl.qnameTypes[baseQn].facets or () # allow iteration if None
typeFacets = dict((facet, value)
for facet, value in typeFacets.items()
if facet not in baseTypeFacets or str(baseTypeFacets[facet]) != value)
addFacets(thisDoc, restrElt, typeFacets)
# find extension label roles, reference roles and parts
extLabelRoles = {}
extReferenceRoles = {}
extReferenceParts = {}
extReferenceSchemaDocs = {}
extUnrecognizedRoles = set()
relationshipArcroles = {}
relationshipArcqnames = {}
def setExtRefPart(partLocalName):
if partLocalName not in extReferenceParts:
for partConcept in modelXbrl.nameConcepts.get(partLocalName, ()):
if partConcept is not None and partConcept.subGroupHeadQname == qnLinkPart:
extReferenceParts[partLocalName] = partConcept.qname
extReferenceSchemaDocs[partConcept.qname.namespaceURI] = (
partConcept.modelDocument.uri if partConcept.modelDocument.uri.startswith("http://") else
partConcept.modelDocument.basename)
break
for _headerColKey in headerColsAllElrs:
if isinstance(_headerColKey, tuple) and len(_headerColKey) >= 3 and not _headerColKey[1].startswith("http://"):
_resourceType = _headerColKey[0]
_resourceRole = _headerColKey[1]
_resourceLangOrPart = _headerColKey[2]
elif isinstance(_headerColKey, str) and "!reference" in _headerColKey:
m = resourceParsePattern.match(_headerColKey.partition("!")[2])
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2)
_resourceLangOrPart = m.group(4)
else:
continue
_resourceQName, _standardRoles = {
"label": (qnLinkLabel, standardLabelRoles),
"labels": (qnLinkLabel, standardLabelRoles),
"reference": (qnLinkReference, standardReferenceRoles),
"references": (qnLinkReference, standardReferenceRoles)
}.get(_resourceType, (None,()))
_resourceRoleURI = None
# find resource role
for _roleURI in _standardRoles:
if _roleURI.endswith(_resourceRole):
_resourceRoleURI = _roleURI
_resourceRoleMatchPart = _resourceRole
break
if _resourceRoleURI is None: # try custom roles
_resourceRoleMatchPart = _resourceRole.partition("
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_resourceRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if _resourceQName in _roleType.usedOns:
_resourceRoleURI = _roleURI
break
if _resourceType in ("label", "labels"):
if _resourceRoleURI:
extLabelRoles[_resourceRoleMatchPart] = _resourceRoleURI
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionLabels.keys()):
modelXbrl.error("loadFromExcel:labelResourceRole",
"Label resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType in ("reference", "references"):
if _resourceRoleURI:
extReferenceRoles[_resourceRoleMatchPart] = _resourceRoleURI
# find part QName
setExtRefPart(_resourceLangOrPart)
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionReferences.keys()):
modelXbrl.error("loadFromExcel:referenceResourceRole",
"Reference resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType == "relationship to":
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(_resourceRoleMatchPart):
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
for _resourceQName in _arcroleType.usedOns:
break
break
if _resourceQName is None:
modelXbrl.error("loadFromExcel:relationshipArcrole",
"Relationship arcrole not found: %(arcrole)s",
modelXbrl=modelXbrl, arcrole=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
else:
relationshipArcroles[_resourceRoleMatchPart] = _arcroleURI
relationshipArcqnames[_arcroleURI] = _resourceQName
# label linkbase
for lbType, lang, filename in thisDoc.labelLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
langPattern = re.compile(lang or ".*")
_isGeneric = lbType.startswith("generic")
if _isGeneric and "http://xbrl.org/2008/label" not in modelXbrl.namespaceDocs:
# must pre-load generic linkbases in order to create properly typed elements (before discovery because we're creating elements by lxml)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-link.xsd", isDiscovered=True)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-label.xsd", isDiscovered=True)
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genlabel="http://xbrl.org/2008/label"
""" if _isGeneric else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/label http://www.xbrl.org/2008/generic-label.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#element-label" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
gen if _isGeneric else link,
"link" if _isGeneric else "labelLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
for labelKey, text in thisDoc.extensionLabels.items():
prefix, name, labelLang, role = labelKey
labelLang = labelLang or defaultLabelLang
role = role.partition("#")[0] # remove # part
role = extLabelRoles.get(role, role) # get custom role, if any
if langPattern.match(labelLang) and _isGeneric == (role in (XbrlConst.genStandardLabel, XbrlConst.genDocumentationLabel)):
locLabel = prefix + "_" + name
if locLabel not in locs:
locs.add(locLabel)
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
gen if _isGeneric else link,
"arc" if _isGeneric else "labelArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel if _isGeneric else conceptLabel),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", 1.0)))
XmlUtil.addChild(linkElt,
XbrlConst.genLabel if _isGeneric else XbrlConst.link,
"label",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)) + (
(("{http://www.w3.org/XML/1998/namespace}lang", labelLang),)
if True or lang != saveXmlLang else ()),
text=text)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedLabelRole",
"Label roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
# reference linkbase
for lbType, referenceRole, filename in thisDoc.referenceLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
_isGeneric = lbType.startswith("generic")
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genreference="http://xbrl.org/2008/rerference"
""" if _isGeneric else "",
"".join([" {} {}".format(_ns, _uri) for _ns, _uri in extReferenceSchemaDocs.items()]),
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/reference http://www.xbrl.org/2008/generic-reference.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<roleRef roleURI="http://www.xbrl.org/2008/role/label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#standard-label" xlink:type="simple"/>
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-reference" xlink:href="http://xbrl.org/2008/generic-reference.xsd#element-reference" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
XbrlConst.gen if _isGeneric else XbrlConst.link,
"link" if _isGeneric else "referenceLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
undefinedReferenceParts = set()
for referenceKey, references in thisDoc.extensionReferences.items():
prefix, name, role = referenceKey
role = role.partition("#")[0] # remove # part
role = extReferenceRoles.get(role, role) # get custom role, if any
if fnmatch(role, referenceRole):
locLabel = prefix + "_" + name
# must use separate arcs with order to force Altova to display parts in order
if locLabel not in locs:
locs.add(locLabel)
order = 1
else:
for order in range(2,1000):
_locLabel = "{}_{}".format(locLabel, order)
if _locLabel not in locs:
locLabel = _locLabel
locs.add(locLabel)
break
if order > 999:
print("resource order de-duplicate failure, too many reference parts")
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
XbrlConst.link, "referenceArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", conceptReference),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", order)))
referenceResource = XmlUtil.addChild(linkElt,
XbrlConst.genReference if _isGeneric else XbrlConst.link,
"reference",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)))
for part, text in references: # list to preserve desired order
setExtRefPart(part)
if part in extReferenceParts:
partQn = extReferenceParts.get(part, part) # get part QName if any
XmlUtil.addChild(referenceResource, partQn, text=text)
else:
undefinedReferenceParts.add(part)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
for part in sorted(undefinedReferenceParts):
print("reference part not defined: {}".format(part))
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedReferenceRole",
"Reference roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
prefixedNamespaces = modelXbrl.prefixedNamespaces
def hrefConcept(prefix, name):
qn = qname(prefixedNamespaces[prefix], name)
if qn in modelXbrl.qnameConcepts:
return modelXbrl.qnameConcepts[qn]
elif name in modelXbrl.nameConcepts: # prefix may be null or ambiguous to multiple documents, try concept local name
return modelXbrl.nameConcepts[name][0]
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipElementPrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return None
modelXbrl.error("loadFromExcel:undefinedRelationshipElement",
"QName not defined: %(prefix)s:%(localName)s",
modelXbrl=modelXbrl, prefix=prefix, localName=name)
return None
def prefixedNameQName(prefixedName):
if ":" not in prefixedName:
return prefixedName
prefix, _sep, name = prefixedName.rpartition(":")
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipAttributePrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return prefixedName
return QName(prefix, prefixedNamespaces[prefix], name)
def lbTreeWalk(lbType, parentElt, lbStruct, roleRefs, dimDef=False, locs=None, arcsFromTo=None, fromPrefix=None, fromName=None):
order = 1.0
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct: # skip empty ELRs
continue
role = "unspecified"
if lbEntry.role and lbEntry.role.startswith("http://"): # have a role specified
role = lbEntry.role
elif lbEntry.name: #may be a definition
for linkroleUri, modelRoleTypes in modelXbrl.roleTypes.items():
definition = modelRoleTypes[0].definition
if lbEntry.name == definition and linkroleUri in thisDoc.extensionRoles:
role = linkroleUri
break
if role == "unspecified":
# don't generate for roles not for this schema
continue
#
#modelXbrl.error("loadFromExcel:linkRoleDefinition",
# "Link role has no definition: %(role)s",
# modelXbrl=modelXbrl, role=lbEntry.name, filename=thisDoc.extensionSchemaNamespaceURI)
if role not in thisDoc.extensionRoles:
# don't generate for roles not for this schema
continue
if role == XbrlConst.defaultLinkRole:
pass
elif role in thisDoc.extensionRoles:
roleRefs.add(("roleRef", role, doc.uri + "#roleType_" + role.rpartition("/")[2]))
elif role in modelXbrl.roleTypes: # add roleRef
roleType = modelRoleTypes[0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(role)
linkElt = XmlUtil.addChild(parentElt,
XbrlConst.gen if lbType == "generic" else XbrlConst.link,
"link" if lbType == "generic" else lbType + "Link",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", role)))
locs = set()
arcsFromTo = set()
lbTreeWalk(lbType, linkElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo)
else:
toPrefix = lbEntry.prefix
toName = lbEntry.name
toHref = LBHref(thisDoc, toPrefix, toName)
if toHref is None:
modelXbrl.error("loadFromExcel:invalidQName",
"%(linkbase)s relationship element with prefix '%(prefix)s' localName '%(localName)s' not found",
modelXbrl=modelXbrl, linkbase=lbType, prefix=lbEntry.prefix, localName=lbEntry.name)
continue
if not toPrefix and toName in modelXbrl.nameConcepts:
toPrefix = modelXbrl.nameConcepts[toName][0].qname.prefix
toLabel = "{}_{}".format(toPrefix, toName)
toLabelAlt = None
if not lbEntry.isRoot:
if not fromPrefix and fromName in modelXbrl.nameConcepts:
fromPrefix = modelXbrl.nameConcepts[fromName][0].qname.prefix
fromLabel = "{}_{}".format(fromPrefix, fromName)
if (fromLabel, toLabel) in arcsFromTo:
# need extra loc to prevent arc from/to duplication in ELR
for i in range(1, 1000):
toLabelAlt = "{}_{}".format(toLabel, i)
if (fromLabel, toLabelAlt) not in arcsFromTo:
toLabel = toLabelAlt
break
if (toHref not in locs or toLabelAlt) and not dimDef:
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if not lbEntry.isRoot:
arcsFromTo.add( (fromLabel, toLabel) )
if lbType == "calculation" and lbEntry.weight is not None:
otherAttrs = ( ("weight", lbEntry.weight), )
elif lbType == "presentation" and lbEntry.role:
if not lbEntry.role.startswith("http://"):
# check if any defined labels for this role
_labelRoleMatchPart = "/" + lbEntry.role
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_labelRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if XbrlConst.qnLinkLabel in _roleType.usedOns:
lbEntry.role = _roleURI
break
if not lbEntry.role.startswith("http://"):
# default to built in label roles
lbEntry.role = "http://www.xbrl.org/2003/role/" + lbEntry.role
otherAttrs = ( ("preferredLabel", lbEntry.role), )
if lbEntry.role and lbEntry.role not in XbrlConst.standardLabelRoles:
if lbEntry.role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[lbEntry.role][0]
roleRefs.add(("roleRef", lbEntry.role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(lbEntry.role)
elif lbType == "generic" and lbEntry.arcrole:
if not lbEntry.arcrole.startswith("http://"):
# check if any defined labels for this role
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(lbEntry.arcrole):
lbEntry.arcrole = _arcroleURI
break
otherAttrs = tuple( (prefixedNameQName(_key), _value) # may need to process qname in key into clark name
for _key, _value in (lbEntry.relAttrs.items() if lbEntry.relAttrs is not None else ()))
else:
otherAttrs = ( )
if lbEntry.arcrole == "_dimensions_": # pick proper consecutive arcrole
fromConcept = hrefConcept(fromPrefix, fromName)
toConcept = hrefConcept(toPrefix, toName)
if dimDef: # special case for default dimension
if lbEntry.role != "_dimensionDefault_" and not lbTreeHasDimDefault(lbEntry.childStruct):
continue # forget subtree, no default
if toConcept is not None and (toConcept.isDimensionItem or lbEntry.role == "_dimensionDefault_"):
if (toHref not in locs or toLabelAlt):
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if lbEntry.role != "_dimensionDefault_":
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
else:
XmlUtil.addChild(parentElt, XbrlConst.link, "definitionArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", XbrlConst.dimensionDefault),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
else:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, fromPrefix, fromName)
continue
elif toConcept is not None and toConcept.isHypercubeItem:
arcrole = XbrlConst.all
otherAttrs += ( (XbrlConst.qnXbrldtContextElement, "segment"),
(qnXbrldtClosed, "true") )
elif toConcept is not None and toConcept.isDimensionItem:
arcrole = XbrlConst.hypercubeDimension
elif fromConcept is not None and fromConcept.isDimensionItem:
arcrole = XbrlConst.dimensionDomain
else:
arcrole = XbrlConst.domainMember
else:
arcrole = lbEntry.arcrole
if arcrole in relationshipArcqnames:
arcqname = relationshipArcqnames[arcrole]
arcNS = arcqname.namespaceURI
arcLocalname = arcqname.localName
elif lbType == "generic":
arcNS = XbrlConst.gen
arcLocalname = "arc"
else:
arcNS = XbrlConst.link
arcLocalname = lbType + "Arc"
XmlUtil.addChild(parentElt,
arcNS, arcLocalname,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", arcrole),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
if lbType != "calculation" or lbEntry.isRoot:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
def lbTreeHasDimDefault(lbStruct):
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct:
continue
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
else:
if not lbEntry.isRoot and (lbEntry.arcrole == "_dimensions_" and lbEntry.role == "_dimensionDefault_"):
return True
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
return False
for hasLB, lbType, lbLB in ((hasPreLB and thisDoc.hasPreLB, "presentation", preLB),
(hasDefLB and thisDoc.hasDefLB, "definition", defLB),
(hasCalLB and thisDoc.hasCalLB, "calculation", calLB),
(hasGenLB and thisDoc.hasGenLB, "generic", genLB)):
if hasLB:
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
thisDoc.thisLBdir = posixpath.dirname(filename)
if generate and lbType == lbRefType:
# output presentation linkbase
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base='', initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
/>
""".format("""
xmlns:generic="http://xbrl.org/2008/generic"
""" if lbType == "generic" else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
""" if lbType == "generic" else ""
),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
addLinkbaseRef(lbRefType, filename, lbDoc)
lbElt = lbDoc.xmlRootElement
roleRefs = set()
if lbType == "definition":
roleRefs.update((("arcroleRef", XbrlConst.all, "http://www.xbrl.org/2005/xbrldt-2005.xsd#all"),
("arcroleRef", XbrlConst.dimensionDefault, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-default"),
("arcroleRef", XbrlConst.dimensionDomain, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-domain"),
("arcroleRef", XbrlConst.domainMember, "http://www.xbrl.org/2005/xbrldt-2005.xsd#domain-member"),
("arcroleRef", XbrlConst.hypercubeDimension, "http://www.xbrl.org/2005/xbrldt-2005.xsd#hypercube-dimension")))
elif lbType == "generic":
for _arcroleURI in relationshipArcroles.values():
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
roleRefs.add(("arcroleRef", _arcroleURI, _arcroleType.modelDocument.uri + "#" + _arcroleType.id))
break
lbTreeWalk(lbType, lbElt, lbLB, roleRefs)
if lbType == "definition" and lbTreeHasDimDefault(lbLB):
lbTreeWalk(lbType, lbElt, lbLB, roleRefs, dimDef=True) # second tree walk for any dimension-defaults
firstLinkElt = None
for firstLinkElt in lbElt.iterchildren():
break
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(href, thisDoc.thisLBdir))),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
break
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedRole",
"%(lbType)s linkbase roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, lbType=lbType, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
visitedDocNames.pop()
def LBHref(thisDoc, prefix, name):
if not prefix and name in modelXbrl.nameConcepts:
_concept = modelXbrl.nameConcepts[name][0]
filename = _concept.modelDocument.uri
prefix = _concept.qname.prefix
elif prefix == thisDoc.extensionSchemaPrefix:
filename = thisDoc.extensionSchemaFilename
elif prefix in thisDoc.importFilenames:
filename = thisDoc.importFilenames[prefix]
elif prefix in genDocs:
doc = genDocs[prefix]
if not doc.generated:
# try to load recursively
generateDoc(doc, thisDoc)
if doc.generated:
filename = doc.extensionSchemaFilename
else:
return None
elif name in modelXbrl.nameConcepts:
filename = None
for _concept in modelXbrl.nameConcepts[name]:
if prefix == _concept.qname.prefix:
filename = _concept.modelDocument.uri
break
if not filename:
return None
else:
return None
return "{0}#{1}_{2}".format(docRelpath(filename, thisDoc.thisLBdir), prefix, name)
for thisDoc in genOrder:
if not thisDoc.generated:
generateDoc(thisDoc, None, [])
#cntlr.addToLog("Completed in {0:.2} secs".format(time.time() - startedAt),
# messageCode="loadFromExcel:info")
if priorCWD:
os.chdir(priorCWD) # restore prior current working directory
return modelXbrl.modelDocument
def isExcelPath(filepath):
return os.path.splitext(filepath)[1] in (".xlsx", ".xls", ".xlsm")
def isExcelLoadable(modelXbrl, mappedUri, normalizedUri, filepath, **kwargs):
return isExcelPath(filepath)
def excelLoaderFilingStart(cntlr, options, filesource, entrypointFiles, *args, **kwargs):
global excludeDesignatedEnumerations, annotateEnumerationsDocumentation, annotateElementDocumentation, saveXmlLang
excludeDesignatedEnumerations = options.ensure_value("excludeDesignatedEnumerations", False)
annotateEnumerationsDocumentation = options.ensure_value("annotateEnumerationsDocumentation", False)
annotateElementDocumentation = options.ensure_value("annotateElementDocumentation", False)
saveXmlLang = options.ensure_value("saveLang", None)
def excelLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
if not isExcelLoadable(modelXbrl, mappedUri, None, filepath):
return None # not an OIM file
cntlr = modelXbrl.modelManager.cntlr
cntlr.showStatus(_("Loading Excel file: {0}").format(os.path.basename(filepath)))
doc = loadFromExcel(cntlr, modelXbrl, filepath, mappedUri)
if doc is None:
return None # not an OIM file
modelXbrl.loadedFromExcel = True
return doc
def saveDts(cntlr, modelXbrl, outputDtsDir):
from arelle import ModelDocument
import shutil
excelFileDir = os.path.dirname(modelXbrl.fileSource.url)
def saveToFile(url):
if os.path.isabs(url):
return url
filepath = os.path.join(outputDtsDir, url)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
return filepath
# save generated schema and their linkbases
for doc in modelXbrl.urlDocs.values():
if getattr(doc, "loadedFromExcel", False):
doc.save(saveToFile(doc.uri), updateFileHistory=False)
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(doc.uri)))
for refDoc in doc.referencesDocument.keys():
if refDoc.inDTS:
if refDoc.type == ModelDocument.Type.LINKBASE:
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(refDoc.uri)))
refDoc.save(saveToFile(refDoc.uri), updateFileHistory=False)
elif not (UrlUtil.isAbsolute(doc.uri) or os.path.isabs(doc.uri) or outputDtsDir == excelFileDir):
srcfile = os.path.join(excelFileDir, doc.uri)
destfile = saveToFile(doc.uri)
if os.path.exists(srcfile):
if not os.path.exists(destfile):
shutil.copyfile(srcfile, destfile)
else:
modelXbrl.error("loadFromExcel:missingReference",
"Missing source file to copy to output DTS directory: %(missingFile)s",
modelXbrl=modelXbrl, missingFile=doc.uri)
def guiXbrlLoaded(cntlr, modelXbrl, attach, *args, **kwargs):
if cntlr.hasGui and getattr(modelXbrl, "loadedFromExcel", False):
from tkinter.filedialog import askdirectory
outputDtsDir = askdirectory(parent=cntlr.parent,
initialdir=cntlr.config.setdefault("outputDtsDir","."),
title='Please select a directory for output DTS Contents')
cntlr.config["outputDtsDir"] = outputDtsDir
cntlr.saveConfig()
if outputDtsDir:
saveDts(cntlr, modelXbrl, outputDtsDir)
cntlr.showStatus(_("Excel loading completed"), 3500)
def cmdLineXbrlLoaded(cntlr, options, modelXbrl, *args, **kwargs):
if options.saveExcelDTSdirectory and getattr(modelXbrl, "loadedFromExcel", False):
saveDts(cntlr, modelXbrl, options.saveExcelDTSdirectory)
def excelLoaderOptionExtender(parser, *args, **kwargs):
parser.add_option("--save-Excel-DTS-directory",
action="store",
dest="saveExcelDTSdirectory",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--exclude-designated-enumerations",
action="store_true",
dest="excludeDesignatedEnumerations",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-enumerations-documentation",
action="store_true",
dest="annotateEnumerationsDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-element-documentation",
action="store_true",
dest="annotateElementDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--save-lang",
action="store",
dest="saveLang",
help=_("Save an xml:lang on top level elements (schema, linkbase)."))
class LBentry:
__slots__ = ("prefix", "name", "arcrole", "role", "childStruct", "preferredLabel", "relAttrs")
def __init__(self, prefix=None, name=None, arcrole=None, role=None, weight=None,
isELR=False, isRoot=False, childStruct=None, preferredLabel=None, relAttrs=None):
if childStruct is not None:
self.childStruct = childStruct
else:
self.childStruct = []
self.prefix = prefix
self.name = name
if isELR:
self.arcrole = "_ELR_"
elif isRoot:
self.arcrole = "_root_"
else:
self.arcrole = arcrole
if weight is not None: # summationItem
self.role = weight
else:
self.role = role # resource role, or "default" if conept is a default dimension
self.preferredLabel = preferredLabel
self.relAttrs = relAttrs
@property
def isELR(self):
return self.arcrole == "_ELR_"
@property
def isRoot(self):
return self.arcrole == "_root_"
@property
def weight(self):
if self.arcrole == summationItem:
return self.role
return None
def __repr__(self):
return "LBentry(prefix={},name={})".format(self.prefix,self.name)
__pluginInfo__ = {
'name': 'Load From Excel',
'version': '1.02',
'description': "This plug-in loads XBRL from Excel and saves the resulting XBRL DTS.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2013-2017 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'ModelDocument.IsPullLoadable': isExcelLoadable,
'ModelDocument.PullLoader': excelLoader,
'CntlrWinMain.Xbrl.Loaded': guiXbrlLoaded,
'CntlrCmdLine.Filing.Start': excelLoaderFilingStart,
'CntlrCmdLine.Options': excelLoaderOptionExtender,
'CntlrCmdLine.Xbrl.Loaded': cmdLineXbrlLoaded
}
| true | true |
7901e9e62fed81e76333d88028ff97cf25b274f1 | 25,423 | py | Python | tests/test_queryset.py | spacemanspiff2007/tortoise-orm | 2591bd1cae75236779c21be559bf191fcc41ffe2 | [
"Apache-2.0"
] | null | null | null | tests/test_queryset.py | spacemanspiff2007/tortoise-orm | 2591bd1cae75236779c21be559bf191fcc41ffe2 | [
"Apache-2.0"
] | null | null | null | tests/test_queryset.py | spacemanspiff2007/tortoise-orm | 2591bd1cae75236779c21be559bf191fcc41ffe2 | [
"Apache-2.0"
] | null | null | null | from tests.testmodels import Event, IntFields, MinRelation, Node, Reporter, Team, Tournament, Tree
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import (
DoesNotExist,
FieldError,
IntegrityError,
MultipleObjectsReturned,
ParamsError,
)
from tortoise.expressions import F, RawSQL, Subquery
# TODO: Test the many exceptions in QuerySet
# TODO: .filter(intnum_null=None) does not work as expected
class TestQueryset(test.TestCase):
async def asyncSetUp(self):
await super().asyncSetUp()
# Build large dataset
self.intfields = [await IntFields.create(intnum=val) for val in range(10, 100, 3)]
self.db = Tortoise.get_connection("models")
async def test_all_count(self):
self.assertEqual(await IntFields.all().count(), 30)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 0)
async def test_exists(self):
ret = await IntFields.filter(intnum=0).exists()
self.assertFalse(ret)
ret = await IntFields.filter(intnum=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__gt=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__lt=10).exists()
self.assertFalse(ret)
async def test_limit_count(self):
self.assertEqual(await IntFields.all().limit(10).count(), 10)
async def test_limit_negative(self):
with self.assertRaisesRegex(ParamsError, "Limit should be non-negative number"):
await IntFields.all().limit(-10)
async def test_offset_count(self):
self.assertEqual(await IntFields.all().offset(10).count(), 20)
async def test_offset_negative(self):
with self.assertRaisesRegex(ParamsError, "Offset should be non-negative number"):
await IntFields.all().offset(-10)
async def test_join_count(self):
tour = await Tournament.create(name="moo")
await MinRelation.create(tournament=tour)
self.assertEqual(await MinRelation.all().count(), 1)
self.assertEqual(await MinRelation.filter(tournament__id=tour.id).count(), 1)
async def test_modify_dataset(self):
# Modify dataset
rows_affected = await IntFields.filter(intnum__gte=70).update(intnum_null=80)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 10)
self.assertEqual(await IntFields.filter(intnum_null__isnull=True).count(), 20)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(await IntFields.filter(intnum_null=None).count(), 0)
self.assertEqual(await IntFields.filter(intnum_null=-1).count(), 20)
async def test_distinct(self):
# Test distinct
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(
await IntFields.all()
.order_by("intnum_null")
.distinct()
.values_list("intnum_null", flat=True),
[-1, 80],
)
self.assertEqual(
await IntFields.all().order_by("intnum_null").distinct().values("intnum_null"),
[{"intnum_null": -1}, {"intnum_null": 80}],
)
async def test_limit_offset_values_list(self):
# Test limit/offset/ordering values_list
self.assertEqual(
await IntFields.all().order_by("intnum").limit(10).values_list("intnum", flat=True),
[10, 13, 16, 19, 22, 25, 28, 31, 34, 37],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(10)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(20)
.values_list("intnum", flat=True),
[70, 73, 76, 79, 82, 85, 88, 91, 94, 97],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(30)
.values_list("intnum", flat=True),
[],
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(10).values_list("intnum", flat=True),
[97, 94, 91, 88, 85, 82, 79, 76, 73, 70],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.filter(intnum__gte=40)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
async def test_limit_offset_values(self):
# Test limit/offset/ordering values
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).values("intnum"),
[{"intnum": 10}, {"intnum": 13}, {"intnum": 16}, {"intnum": 19}, {"intnum": 22}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(10).values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(30).values("intnum"), []
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(5).values("intnum"),
[{"intnum": 97}, {"intnum": 94}, {"intnum": 91}, {"intnum": 88}, {"intnum": 85}],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(5)
.filter(intnum__gte=40)
.values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
async def test_in_bulk(self):
id_list = [item.pk for item in await IntFields.all().only("id").limit(2)]
ret = await IntFields.in_bulk(id_list=id_list)
self.assertEqual(list(ret.keys()), id_list)
async def test_first(self):
# Test first
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first()).intnum, 40
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values())[
"intnum"
],
40,
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values_list())[
1
],
40,
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values_list(),
None,
)
async def test_get_or_none(self):
self.assertEqual((await IntFields.all().get_or_none(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values_list())[1], 40)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values_list(),
None,
)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values_list()
async def test_get(self):
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
# Test get
self.assertEqual((await IntFields.all().get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get(intnum=40).values_list())[1], 40)
self.assertEqual((await IntFields.all().all().all().all().all().get(intnum=40)).intnum, 40)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values())["intnum"], 40
)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values_list())[1], 40
)
self.assertEqual((await IntFields.get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.get(intnum=40).values_list())[1], 40)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values_list()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values_list()
async def test_delete(self):
# Test delete
await (await IntFields.get(intnum=40)).delete()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=40)
self.assertEqual(await IntFields.all().count(), 29)
rows_affected = (
await IntFields.all().order_by("intnum").limit(10).filter(intnum__gte=70).delete()
)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.all().count(), 19)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit(self):
await IntFields.all().limit(1).delete()
self.assertEqual(await IntFields.all().count(), 29)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit_order_by(self):
await IntFields.all().limit(1).order_by("-id").delete()
self.assertEqual(await IntFields.all().count(), 29)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=97)
async def test_async_iter(self):
counter = 0
async for _ in IntFields.all():
counter += 1
self.assertEqual(await IntFields.all().count(), counter)
async def test_update_basic(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=2147483646)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
self.assertEqual(obj.intnum_null, None)
async def test_update_f_expression(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=F("intnum") - 1)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
async def test_update_badparam(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(FieldError, "Unknown keyword argument"):
await IntFields.filter(id=obj0.id).update(badparam=1)
async def test_update_pk(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(IntegrityError, "is PK and can not be updated"):
await IntFields.filter(id=obj0.id).update(id=1)
async def test_update_virtual(self):
tour = await Tournament.create(name="moo")
obj0 = await MinRelation.create(tournament=tour)
with self.assertRaisesRegex(FieldError, "is virtual and can not be updated"):
await MinRelation.filter(id=obj0.id).update(participants=[])
async def test_bad_ordering(self):
with self.assertRaisesRegex(FieldError, "Unknown field moo1fip for model IntFields"):
await IntFields.all().order_by("moo1fip")
async def test_duplicate_values(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", "intnum")
async def test_duplicate_values_list(self):
await IntFields.all().values_list("intnum", "intnum")
async def test_duplicate_values_kw(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", intnum="intnum_null")
async def test_duplicate_values_kw_badmap(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "intnum2" for model "IntFields"'):
await IntFields.all().values(intnum="intnum2")
async def test_bad_values(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values("int2num")
async def test_bad_values_list(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values_list("int2num")
async def test_many_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list("intnum", "intnum_null", flat=True)
async def test_all_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list(flat=True)
async def test_all_values_list(self):
data = await IntFields.all().order_by("id").values_list()
self.assertEqual(data[2], (self.intfields[2].id, 16, None))
async def test_all_values(self):
data = await IntFields.all().order_by("id").values()
self.assertEqual(data[2], {"id": self.intfields[2].id, "intnum": 16, "intnum_null": None})
async def test_order_by_bad_value(self):
with self.assertRaisesRegex(FieldError, "Unknown field badid for model IntFields"):
await IntFields.all().order_by("badid").values_list()
async def test_annotate_order_expression(self):
data = (
await IntFields.annotate(idp=F("id") + 1)
.order_by("-idp")
.first()
.values_list("id", "idp")
)
self.assertEqual(data[0] + 1, data[1])
async def test_annotate_expression_filter(self):
count = await IntFields.annotate(intnum=F("intnum") + 1).filter(intnum__gt=30).count()
self.assertEqual(count, 23)
async def test_get_raw_sql(self):
sql = IntFields.all().sql()
self.assertRegex(sql, r"^SELECT.+FROM.+")
@test.requireCapability(support_index_hint=True)
async def test_force_index(self):
sql = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_force_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).force_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = (
IntFields.filter(pk=1).force_index("index_name").values_list("id").sql()
)
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).force_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).force_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index(self):
sql = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).use_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = IntFields.filter(pk=1).use_index("index_name").values_list("id").sql()
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).use_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).use_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_for_update=True)
async def test_select_for_update(self):
sql1 = IntFields.filter(pk=1).only("id").select_for_update().sql()
sql2 = IntFields.filter(pk=1).only("id").select_for_update(nowait=True).sql()
sql3 = IntFields.filter(pk=1).only("id").select_for_update(skip_locked=True).sql()
sql4 = IntFields.filter(pk=1).only("id").select_for_update(of=("intfields",)).sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "postgres":
self.assertEqual(
sql1,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE',
)
self.assertEqual(
sql2,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE NOWAIT',
)
self.assertEqual(
sql3,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE SKIP LOCKED',
)
self.assertEqual(
sql4,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE OF "intfields"',
)
elif dialect == "mysql":
self.assertEqual(
sql1,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE",
)
self.assertEqual(
sql2,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE NOWAIT",
)
self.assertEqual(
sql3,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE SKIP LOCKED",
)
self.assertEqual(
sql4,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE OF `intfields`",
)
async def test_select_related(self):
tournament = await Tournament.create(name="1")
reporter = await Reporter.create(name="Reporter")
event = await Event.create(name="1", tournament=tournament, reporter=reporter)
event = await Event.all().select_related("tournament", "reporter").get(pk=event.pk)
self.assertEqual(event.tournament.pk, tournament.pk)
self.assertEqual(event.reporter.pk, reporter.pk)
async def test_select_related_with_two_same_models(self):
parent_node = await Node.create(name="1")
child_node = await Node.create(name="2")
tree = await Tree.create(parent=parent_node, child=child_node)
tree = await Tree.all().select_related("parent", "child").get(pk=tree.pk)
self.assertEqual(tree.parent.pk, parent_node.pk)
self.assertEqual(tree.parent.name, parent_node.name)
self.assertEqual(tree.child.pk, child_node.pk)
self.assertEqual(tree.child.name, child_node.name)
@test.requireCapability(dialect="postgres")
async def test_postgres_search(self):
name = "hello world"
await Tournament.create(name=name)
ret = await Tournament.filter(name__search="hello").first()
self.assertEqual(ret.name, name)
async def test_subquery_select(self):
t1 = await Tournament.create(name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(ids=Subquery(Tournament.filter(pk=t1.pk).values("id")))
.values("ids", "id")
)
self.assertEqual(ret, [{"id": t1.pk, "ids": t1.pk}])
async def test_subquery_access(self):
"""This test ensures that accessing a query does not modify it (#780)"""
tournament_1 = await Tournament.create(name="1")
event_1 = await Event.create(event_id=1, name="event 1", tournament=tournament_1)
event_2 = await Event.create(event_id=2, name="event 2", tournament=tournament_1)
team_1 = await Team.create(id=1, name="team 1")
team_2 = await Team.create(id=2, name="team 2")
await event_1.participants.add(team_1)
await event_2.participants.add(team_1, team_2)
self.assertEqual(await event_1.participants.all(), [team_1])
self.assertEqual(await event_2.participants.all(), [team_1, team_2])
sub_query_team_1 = Subquery(Event.filter(participants__id=1).values("event_id"))
sub_query_team_2 = Subquery(Event.filter(participants__id=2).values("event_id"))
query = Event.filter(pk__in=sub_query_team_1) # should select event 1 and event 2
query = query.filter(pk__in=sub_query_team_2) # should select only event 2
self.assertEqual(query.sql(), query.sql())
self.assertEqual(await query.count(), await query.count())
self.assertEqual(await query.count(), 1)
self.assertEqual(await query.all(), [event_2])
async def test_subquery_filter(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=Subquery(Tournament.filter(pk=t1.pk).values("id"))).first()
self.assertEqual(ret, t1)
async def test_raw_sql_count(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(count=RawSQL("count(*)")).values("count")
self.assertEqual(ret, [{"count": 1}])
async def test_raw_sql_select(self):
t1 = await Tournament.create(id=1, name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(idp=RawSQL("id + 1"))
.filter(idp=2)
.values("idp")
)
self.assertEqual(ret, [{"idp": 2}])
async def test_raw_sql_filter(self):
ret = await Tournament.filter(pk=RawSQL("id + 1"))
self.assertEqual(ret, [])
async def test_annotation_field_priorior_to_model_field(self):
# Sometimes, field name in annotates also exist in model field sets
# and may need lift the former's priority in select query construction.
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(id=RawSQL("id + 1")).values("id")
self.assertEqual(ret, [{"id": t1.pk + 1}])
| 40.353968 | 100 | 0.619911 | from tests.testmodels import Event, IntFields, MinRelation, Node, Reporter, Team, Tournament, Tree
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import (
DoesNotExist,
FieldError,
IntegrityError,
MultipleObjectsReturned,
ParamsError,
)
from tortoise.expressions import F, RawSQL, Subquery
class TestQueryset(test.TestCase):
async def asyncSetUp(self):
await super().asyncSetUp()
self.intfields = [await IntFields.create(intnum=val) for val in range(10, 100, 3)]
self.db = Tortoise.get_connection("models")
async def test_all_count(self):
self.assertEqual(await IntFields.all().count(), 30)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 0)
async def test_exists(self):
ret = await IntFields.filter(intnum=0).exists()
self.assertFalse(ret)
ret = await IntFields.filter(intnum=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__gt=10).exists()
self.assertTrue(ret)
ret = await IntFields.filter(intnum__lt=10).exists()
self.assertFalse(ret)
async def test_limit_count(self):
self.assertEqual(await IntFields.all().limit(10).count(), 10)
async def test_limit_negative(self):
with self.assertRaisesRegex(ParamsError, "Limit should be non-negative number"):
await IntFields.all().limit(-10)
async def test_offset_count(self):
self.assertEqual(await IntFields.all().offset(10).count(), 20)
async def test_offset_negative(self):
with self.assertRaisesRegex(ParamsError, "Offset should be non-negative number"):
await IntFields.all().offset(-10)
async def test_join_count(self):
tour = await Tournament.create(name="moo")
await MinRelation.create(tournament=tour)
self.assertEqual(await MinRelation.all().count(), 1)
self.assertEqual(await MinRelation.filter(tournament__id=tour.id).count(), 1)
async def test_modify_dataset(self):
rows_affected = await IntFields.filter(intnum__gte=70).update(intnum_null=80)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.filter(intnum_null=80).count(), 10)
self.assertEqual(await IntFields.filter(intnum_null__isnull=True).count(), 20)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(await IntFields.filter(intnum_null=None).count(), 0)
self.assertEqual(await IntFields.filter(intnum_null=-1).count(), 20)
async def test_distinct(self):
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
await IntFields.filter(intnum_null__isnull=True).update(intnum_null=-1)
self.assertEqual(
await IntFields.all()
.order_by("intnum_null")
.distinct()
.values_list("intnum_null", flat=True),
[-1, 80],
)
self.assertEqual(
await IntFields.all().order_by("intnum_null").distinct().values("intnum_null"),
[{"intnum_null": -1}, {"intnum_null": 80}],
)
async def test_limit_offset_values_list(self):
self.assertEqual(
await IntFields.all().order_by("intnum").limit(10).values_list("intnum", flat=True),
[10, 13, 16, 19, 22, 25, 28, 31, 34, 37],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(10)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(20)
.values_list("intnum", flat=True),
[70, 73, 76, 79, 82, 85, 88, 91, 94, 97],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.offset(30)
.values_list("intnum", flat=True),
[],
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(10).values_list("intnum", flat=True),
[97, 94, 91, 88, 85, 82, 79, 76, 73, 70],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(10)
.filter(intnum__gte=40)
.values_list("intnum", flat=True),
[40, 43, 46, 49, 52, 55, 58, 61, 64, 67],
)
async def test_limit_offset_values(self):
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).values("intnum"),
[{"intnum": 10}, {"intnum": 13}, {"intnum": 16}, {"intnum": 19}, {"intnum": 22}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(10).values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
self.assertEqual(
await IntFields.all().order_by("intnum").limit(5).offset(30).values("intnum"), []
)
self.assertEqual(
await IntFields.all().order_by("-intnum").limit(5).values("intnum"),
[{"intnum": 97}, {"intnum": 94}, {"intnum": 91}, {"intnum": 88}, {"intnum": 85}],
)
self.assertEqual(
await IntFields.all()
.order_by("intnum")
.limit(5)
.filter(intnum__gte=40)
.values("intnum"),
[{"intnum": 40}, {"intnum": 43}, {"intnum": 46}, {"intnum": 49}, {"intnum": 52}],
)
async def test_in_bulk(self):
id_list = [item.pk for item in await IntFields.all().only("id").limit(2)]
ret = await IntFields.in_bulk(id_list=id_list)
self.assertEqual(list(ret.keys()), id_list)
async def test_first(self):
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first()).intnum, 40
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values())[
"intnum"
],
40,
)
self.assertEqual(
(await IntFields.all().order_by("intnum").filter(intnum__gte=40).first().values_list())[
1
],
40,
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").filter(intnum__gte=400).first().values_list(),
None,
)
async def test_get_or_none(self):
self.assertEqual((await IntFields.all().get_or_none(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get_or_none(intnum=40).values_list())[1], 40)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values(), None
)
self.assertEqual(
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=400).values_list(),
None,
)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().order_by("intnum").get_or_none(intnum__gte=40).values_list()
async def test_get(self):
await IntFields.filter(intnum__gte=70).update(intnum_null=80)
self.assertEqual((await IntFields.all().get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.all().get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.all().get(intnum=40).values_list())[1], 40)
self.assertEqual((await IntFields.all().all().all().all().all().get(intnum=40)).intnum, 40)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values())["intnum"], 40
)
self.assertEqual(
(await IntFields.all().all().all().all().all().get(intnum=40).values_list())[1], 40
)
self.assertEqual((await IntFields.get(intnum=40)).intnum, 40)
self.assertEqual((await IntFields.get(intnum=40).values())["intnum"], 40)
self.assertEqual((await IntFields.get(intnum=40).values_list())[1], 40)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.all().get(intnum=41).values_list()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=41).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.all().get(intnum_null=80).values_list()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80)
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values()
with self.assertRaises(MultipleObjectsReturned):
await IntFields.get(intnum_null=80).values_list()
async def test_delete(self):
await (await IntFields.get(intnum=40)).delete()
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=40)
self.assertEqual(await IntFields.all().count(), 29)
rows_affected = (
await IntFields.all().order_by("intnum").limit(10).filter(intnum__gte=70).delete()
)
self.assertEqual(rows_affected, 10)
self.assertEqual(await IntFields.all().count(), 19)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit(self):
await IntFields.all().limit(1).delete()
self.assertEqual(await IntFields.all().count(), 29)
@test.requireCapability(support_update_limit_order_by=True)
async def test_delete_limit_order_by(self):
await IntFields.all().limit(1).order_by("-id").delete()
self.assertEqual(await IntFields.all().count(), 29)
with self.assertRaises(DoesNotExist):
await IntFields.get(intnum=97)
async def test_async_iter(self):
counter = 0
async for _ in IntFields.all():
counter += 1
self.assertEqual(await IntFields.all().count(), counter)
async def test_update_basic(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=2147483646)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
self.assertEqual(obj.intnum_null, None)
async def test_update_f_expression(self):
obj0 = await IntFields.create(intnum=2147483647)
await IntFields.filter(id=obj0.id).update(intnum=F("intnum") - 1)
obj = await IntFields.get(id=obj0.id)
self.assertEqual(obj.intnum, 2147483646)
async def test_update_badparam(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(FieldError, "Unknown keyword argument"):
await IntFields.filter(id=obj0.id).update(badparam=1)
async def test_update_pk(self):
obj0 = await IntFields.create(intnum=2147483647)
with self.assertRaisesRegex(IntegrityError, "is PK and can not be updated"):
await IntFields.filter(id=obj0.id).update(id=1)
async def test_update_virtual(self):
tour = await Tournament.create(name="moo")
obj0 = await MinRelation.create(tournament=tour)
with self.assertRaisesRegex(FieldError, "is virtual and can not be updated"):
await MinRelation.filter(id=obj0.id).update(participants=[])
async def test_bad_ordering(self):
with self.assertRaisesRegex(FieldError, "Unknown field moo1fip for model IntFields"):
await IntFields.all().order_by("moo1fip")
async def test_duplicate_values(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", "intnum")
async def test_duplicate_values_list(self):
await IntFields.all().values_list("intnum", "intnum")
async def test_duplicate_values_kw(self):
with self.assertRaisesRegex(FieldError, "Duplicate key intnum"):
await IntFields.all().values("intnum", intnum="intnum_null")
async def test_duplicate_values_kw_badmap(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "intnum2" for model "IntFields"'):
await IntFields.all().values(intnum="intnum2")
async def test_bad_values(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values("int2num")
async def test_bad_values_list(self):
with self.assertRaisesRegex(FieldError, 'Unknown field "int2num" for model "IntFields"'):
await IntFields.all().values_list("int2num")
async def test_many_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list("intnum", "intnum_null", flat=True)
async def test_all_flat_values_list(self):
with self.assertRaisesRegex(
TypeError, "You can flat value_list only if contains one field"
):
await IntFields.all().values_list(flat=True)
async def test_all_values_list(self):
data = await IntFields.all().order_by("id").values_list()
self.assertEqual(data[2], (self.intfields[2].id, 16, None))
async def test_all_values(self):
data = await IntFields.all().order_by("id").values()
self.assertEqual(data[2], {"id": self.intfields[2].id, "intnum": 16, "intnum_null": None})
async def test_order_by_bad_value(self):
with self.assertRaisesRegex(FieldError, "Unknown field badid for model IntFields"):
await IntFields.all().order_by("badid").values_list()
async def test_annotate_order_expression(self):
data = (
await IntFields.annotate(idp=F("id") + 1)
.order_by("-idp")
.first()
.values_list("id", "idp")
)
self.assertEqual(data[0] + 1, data[1])
async def test_annotate_expression_filter(self):
count = await IntFields.annotate(intnum=F("intnum") + 1).filter(intnum__gt=30).count()
self.assertEqual(count, 23)
async def test_get_raw_sql(self):
sql = IntFields.all().sql()
self.assertRegex(sql, r"^SELECT.+FROM.+")
@test.requireCapability(support_index_hint=True)
async def test_force_index(self):
sql = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").force_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_force_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).force_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = (
IntFields.filter(pk=1).force_index("index_name").values_list("id").sql()
)
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).force_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).force_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` FORCE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index(self):
sql = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_again = IntFields.filter(pk=1).only("id").use_index("index_name").sql()
self.assertEqual(
sql_again,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
@test.requireCapability(support_index_hint=True)
async def test_use_index_avaiable_in_more_query(self):
sql_ValuesQuery = IntFields.filter(pk=1).use_index("index_name").values("id").sql()
self.assertEqual(
sql_ValuesQuery,
"SELECT `id` `id` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ValuesListQuery = IntFields.filter(pk=1).use_index("index_name").values_list("id").sql()
self.assertEqual(
sql_ValuesListQuery,
"SELECT `id` `0` FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_CountQuery = IntFields.filter(pk=1).use_index("index_name").count().sql()
self.assertEqual(
sql_CountQuery,
"SELECT COUNT(*) FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1",
)
sql_ExistsQuery = IntFields.filter(pk=1).use_index("index_name").exists().sql()
self.assertEqual(
sql_ExistsQuery,
"SELECT 1 FROM `intfields` USE INDEX (`index_name`) WHERE `id`=1 LIMIT 1",
)
@test.requireCapability(support_for_update=True)
async def test_select_for_update(self):
sql1 = IntFields.filter(pk=1).only("id").select_for_update().sql()
sql2 = IntFields.filter(pk=1).only("id").select_for_update(nowait=True).sql()
sql3 = IntFields.filter(pk=1).only("id").select_for_update(skip_locked=True).sql()
sql4 = IntFields.filter(pk=1).only("id").select_for_update(of=("intfields",)).sql()
dialect = self.db.schema_generator.DIALECT
if dialect == "postgres":
self.assertEqual(
sql1,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE',
)
self.assertEqual(
sql2,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE NOWAIT',
)
self.assertEqual(
sql3,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE SKIP LOCKED',
)
self.assertEqual(
sql4,
'SELECT "id" "id" FROM "intfields" WHERE "id"=1 FOR UPDATE OF "intfields"',
)
elif dialect == "mysql":
self.assertEqual(
sql1,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE",
)
self.assertEqual(
sql2,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE NOWAIT",
)
self.assertEqual(
sql3,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE SKIP LOCKED",
)
self.assertEqual(
sql4,
"SELECT `id` `id` FROM `intfields` WHERE `id`=1 FOR UPDATE OF `intfields`",
)
async def test_select_related(self):
tournament = await Tournament.create(name="1")
reporter = await Reporter.create(name="Reporter")
event = await Event.create(name="1", tournament=tournament, reporter=reporter)
event = await Event.all().select_related("tournament", "reporter").get(pk=event.pk)
self.assertEqual(event.tournament.pk, tournament.pk)
self.assertEqual(event.reporter.pk, reporter.pk)
async def test_select_related_with_two_same_models(self):
parent_node = await Node.create(name="1")
child_node = await Node.create(name="2")
tree = await Tree.create(parent=parent_node, child=child_node)
tree = await Tree.all().select_related("parent", "child").get(pk=tree.pk)
self.assertEqual(tree.parent.pk, parent_node.pk)
self.assertEqual(tree.parent.name, parent_node.name)
self.assertEqual(tree.child.pk, child_node.pk)
self.assertEqual(tree.child.name, child_node.name)
@test.requireCapability(dialect="postgres")
async def test_postgres_search(self):
name = "hello world"
await Tournament.create(name=name)
ret = await Tournament.filter(name__search="hello").first()
self.assertEqual(ret.name, name)
async def test_subquery_select(self):
t1 = await Tournament.create(name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(ids=Subquery(Tournament.filter(pk=t1.pk).values("id")))
.values("ids", "id")
)
self.assertEqual(ret, [{"id": t1.pk, "ids": t1.pk}])
async def test_subquery_access(self):
tournament_1 = await Tournament.create(name="1")
event_1 = await Event.create(event_id=1, name="event 1", tournament=tournament_1)
event_2 = await Event.create(event_id=2, name="event 2", tournament=tournament_1)
team_1 = await Team.create(id=1, name="team 1")
team_2 = await Team.create(id=2, name="team 2")
await event_1.participants.add(team_1)
await event_2.participants.add(team_1, team_2)
self.assertEqual(await event_1.participants.all(), [team_1])
self.assertEqual(await event_2.participants.all(), [team_1, team_2])
sub_query_team_1 = Subquery(Event.filter(participants__id=1).values("event_id"))
sub_query_team_2 = Subquery(Event.filter(participants__id=2).values("event_id"))
query = Event.filter(pk__in=sub_query_team_1)
query = query.filter(pk__in=sub_query_team_2)
self.assertEqual(query.sql(), query.sql())
self.assertEqual(await query.count(), await query.count())
self.assertEqual(await query.count(), 1)
self.assertEqual(await query.all(), [event_2])
async def test_subquery_filter(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=Subquery(Tournament.filter(pk=t1.pk).values("id"))).first()
self.assertEqual(ret, t1)
async def test_raw_sql_count(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(count=RawSQL("count(*)")).values("count")
self.assertEqual(ret, [{"count": 1}])
async def test_raw_sql_select(self):
t1 = await Tournament.create(id=1, name="1")
ret = (
await Tournament.filter(pk=t1.pk)
.annotate(idp=RawSQL("id + 1"))
.filter(idp=2)
.values("idp")
)
self.assertEqual(ret, [{"idp": 2}])
async def test_raw_sql_filter(self):
ret = await Tournament.filter(pk=RawSQL("id + 1"))
self.assertEqual(ret, [])
async def test_annotation_field_priorior_to_model_field(self):
t1 = await Tournament.create(name="1")
ret = await Tournament.filter(pk=t1.pk).annotate(id=RawSQL("id + 1")).values("id")
self.assertEqual(ret, [{"id": t1.pk + 1}])
| true | true |
7901ea333d5165e5889b2bbfea30ba4301065462 | 33 | py | Python | app/api/rates/__init__.py | erkandem/rates_app | d7f8eaf9aeb606edb14f8e6766fe644319191037 | [
"MIT"
] | null | null | null | app/api/rates/__init__.py | erkandem/rates_app | d7f8eaf9aeb606edb14f8e6766fe644319191037 | [
"MIT"
] | null | null | null | app/api/rates/__init__.py | erkandem/rates_app | d7f8eaf9aeb606edb14f8e6766fe644319191037 | [
"MIT"
] | null | null | null | from .euro import api as euro_ns
| 16.5 | 32 | 0.787879 | from .euro import api as euro_ns
| true | true |
7901ea9f970a56936e3d9a1d4eaa2dfca3a7c203 | 9,906 | py | Python | _ext.py | cyco/osgameclones | 7c11e8128f14efb182fe2d391d5ccb91bd55082c | [
"CC-BY-4.0",
"MIT"
] | null | null | null | _ext.py | cyco/osgameclones | 7c11e8128f14efb182fe2d391d5ccb91bd55082c | [
"CC-BY-4.0",
"MIT"
] | null | null | null | _ext.py | cyco/osgameclones | 7c11e8128f14efb182fe2d391d5ccb91bd55082c | [
"CC-BY-4.0",
"MIT"
] | null | null | null | import copy
import sys
import pprint
import os, os.path as op
from datetime import date, datetime, timedelta
from collections import OrderedDict
from functools import partial
from urllib.parse import urlparse
import yaml
from natsort import natsorted, ns
from pykwalify.core import Core
def abort(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def validate(item, key):
for name in names(item):
if not (isinstance(name, str) or
(len(name) == 2 and
all(isinstance(x, str) for x in name))):
abort('Error: %r should be a string or a list of two strings' % name)
games = item[key]
if (not isinstance(games, list) or
not all(isinstance(x, dict) for x in games)):
print('Error: this should be a list of dicts:')
abort(pprint.pformat(games))
return names, games
def names(item):
return [item['name']] + item.get('names', [])
def game_name(game):
return game['name'][0] if isinstance(game['name'], list) else game['name']
def parse_tag(tag):
return tag.replace(' ', '-').lower()
def parse_unicode(text):
if isinstance(text, str):
return text
if isinstance(text, (list, tuple)):
result = []
for item in text:
result.append(parse_unicode(item))
return result
def parse_unicode_tag(tag):
return parse_tag(parse_unicode(tag))
def parse_tags(entry, keys):
tags = []
for key in keys:
if key in entry:
val = entry.get(key)
if isinstance(val, str):
tags.append(parse_tag(val))
tags.append(parse_unicode_tag(val))
elif isinstance(val, list):
tags += [parse_tag(v) for v in val]
tags += [parse_unicode_tag(v) for v in val]
else:
abort('Error: %s\'s key "%s" is not valid (%s)' %
(entry['name'], key, type(val).__name__))
result = []
for tag in tags:
if tag not in result:
result.append(tag)
return result
def parse_global_tags(site, item, tag, item_key: str):
if tag in item:
if not getattr(site, tag, False):
setattr(site, tag, {})
if isinstance(item[tag], str):
item[tag] = [item[tag]]
for t in item[tag]:
tagObj = getattr(site, tag, False)
if not tagObj.get(t, False):
tagObj[t] = {'tag_count': 0, 'keys': set()}
if item_key not in tagObj[t]['keys']:
tagObj[t]['tag_count'] += 1
tagObj[t]['keys'].add(item_key)
setattr(site, tag, OrderedDict(sorted(getattr(site, tag, {}).items())))
def parse_item(entry, entry_tags=[], meta={}, meta_tags=[]):
updated = entry.get('updated') or date(1970, 1, 1)
if isinstance(updated, str):
updated = datetime.strptime(updated, "%Y-%m-%d").date()
result = dict(entry,
new=(date.today() - updated) < timedelta(days=30),
tags=parse_tags(entry, entry_tags) + parse_tags(meta, meta_tags),
updated=updated)
if "repo" in result:
# Try to add extra repo information, like icons, badges
repo_parsed = urlparse(result["repo"])
domain = repo_parsed.netloc
ext = os.path.splitext(result["repo"])[1]
if "github.com" in domain:
try:
# https://github.com/<user>/<repo>
_, user, repo, *_ = repo_parsed.path.split("/")
except ValueError:
result["repoiconname"] = "github"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitHub"
else:
result["repobadge"] = f'<img class="badge lazyload" alt="GitHub stars" data-src="https://img.shields.io/github/stars/{user}/{repo}?style=flat-square&logo=github" src="https://img.shields.io/badge/stars-%3F-blue?style=flat-square&logo=github">'
elif (".google.com" in domain or
"googlecode.com" in domain):
result["repoiconname"] = "google"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Google Code"
elif "bitbucket.org" in domain:
result["repoiconname"] = "bitbucket"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Bitbucket"
elif "gitlab.com" in domain or domain.startswith("gitlab."):
result["repoiconname"] = "gitlab"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitLab"
elif "sourceforge.net" in domain:
try:
# https://sourceforge.net/projects/<repo>
_, _, repo, *_ = repo_parsed.path.split("/")
except ValueError:
pass
else:
result["repobadge"] = f'<img class="badge lazyload" alt="Sourceforge downloads" data-src="https://img.shields.io/sourceforge/dt/{repo}?style=flat-square" src="https://img.shields.io/badge/downloads-%3F-brightgreen?style=flat-square">'
elif ext in (".gz", ".zip", ".tar", ".tgz", ".tbz2", ".bz2", ".xz", ".rar"):
result["repoiconname"] = "box"
result["repoiconstyle"] = "fas"
result["repotitle"] = "Archive"
return result
def parse_items(site, item, key):
if not (item.get(key) and validate(item, key)):
return
if not getattr(site, key, False):
setattr(site, key, [])
meta_tags = ['genre', 'subgenre', 'theme']
game_tags = [
'status',
'development',
'lang',
'framework',
'content',
'license',
'multiplayer',
'type'
]
meta = item.get('meta', {})
meta["names_ascii"] = parse_unicode(names(item))
meta["external"] = item.get('external', {})
parse_global_tags(site, meta, 'genre', item['name'])
parse_global_tags(site, meta, 'subgenre', item['name'])
parse_global_tags(site, meta, 'theme', item['name'])
parse_fn = partial(parse_item, entry_tags=game_tags, meta=meta, meta_tags=meta_tags)
for game in item[key]:
parse_global_tags(site, game, 'lang', game['name'])
item = (names(item), meta, [parse_fn(i) for i in item[key]])
getattr(site, key).append(item)
def show_error(game_name, error_str):
print(f'\033[91m {game_name}\033[0m')
print(f' {error_str}')
def show_errors(errors):
print('\n')
for error in errors:
show_error(error["name"], error["error"])
print(f'\n {len(errors)} errors\n')
sys.exit(1)
def show_validation_errors(data, validation_errors):
errors = []
for error in validation_errors:
path = error.path.split('/')
game = data[int(path[1])]
name = game_name(game)
errors.append({"name": name, "error": error.__repr__()})
show_errors(errors)
def validate_with_schema(source_data, schema_file):
core = Core(source_data=source_data, schema_files=[schema_file])
try:
core.validate(raise_exception=True)
except Exception as error:
if len(core.errors) > 0:
show_validation_errors(source_data, core.errors)
else:
raise error
def parse_data(site):
base = op.dirname(__file__)
originals = []
for fn in os.listdir(op.join(base, 'originals')):
if fn.endswith('.yaml'):
originals.extend(yaml.safe_load(open(op.join(base, 'originals', fn), encoding="utf-8")))
def sort_key(game):
name = game_name(game)
# Always sort SCUMM first
if name == 'SCUMM':
return '0'
if name.startswith('The '):
return name[4:]
return name
originals = natsorted(originals, key=sort_key, alg=ns.IGNORECASE)
print(str(len(originals)) + ' games in total')
validate_with_schema(originals, 'schema/originals.yaml')
clones = []
for fn in sorted(os.listdir(op.join(base, 'games'))):
if fn.endswith('.yaml'):
clones.extend(yaml.safe_load(open(op.join(base, 'games', fn), encoding="utf-8")))
print(str(len(clones)) + ' clones in total')
validate_with_schema(clones, 'schema/games.yaml')
errors = []
originals_map = {}
for item in originals:
name = game_name(item)
if name in originals_map:
errors.append({
"name": name,
"error": "Duplicate original game '%s'" % name
})
originals_map[name] = item
if len(errors) > 0:
show_errors(errors)
for clone in clones:
if 'originals' not in clone:
show_errors([{
"name": clone["name"],
"error": "Unable to find 'remakes' or 'clones' in game"
}])
for original in clone['originals']:
if original not in originals_map:
errors.append({
"name": clone["name"],
"error": "Original game '%s' not found" % original
})
if "updated" not in clone:
print(f"{clone['name']} has no updated field")
else:
if isinstance(clone['updated'], str):
clone['updated'] = datetime.strptime(clone['updated'], "%Y-%m-%d").date()
if "status" not in clone:
print(f"{clone['name']} has no status field")
oldest_games = sorted([(clone['name'], clone['updated']) for clone in clones if 'updated' in clone], key=lambda x: x[1])[:5]
print(f"Oldest 5 games: {oldest_games}")
if len(errors) > 0:
show_errors(errors)
for item in originals:
# Recombine originals and clones
combined = copy.deepcopy(item)
name = game_name(combined)
combined['games'] = [
clone for clone in clones
if name in clone['originals']
]
parse_items(site, combined, 'games')
| 32.058252 | 259 | 0.569352 | import copy
import sys
import pprint
import os, os.path as op
from datetime import date, datetime, timedelta
from collections import OrderedDict
from functools import partial
from urllib.parse import urlparse
import yaml
from natsort import natsorted, ns
from pykwalify.core import Core
def abort(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def validate(item, key):
for name in names(item):
if not (isinstance(name, str) or
(len(name) == 2 and
all(isinstance(x, str) for x in name))):
abort('Error: %r should be a string or a list of two strings' % name)
games = item[key]
if (not isinstance(games, list) or
not all(isinstance(x, dict) for x in games)):
print('Error: this should be a list of dicts:')
abort(pprint.pformat(games))
return names, games
def names(item):
return [item['name']] + item.get('names', [])
def game_name(game):
return game['name'][0] if isinstance(game['name'], list) else game['name']
def parse_tag(tag):
return tag.replace(' ', '-').lower()
def parse_unicode(text):
if isinstance(text, str):
return text
if isinstance(text, (list, tuple)):
result = []
for item in text:
result.append(parse_unicode(item))
return result
def parse_unicode_tag(tag):
return parse_tag(parse_unicode(tag))
def parse_tags(entry, keys):
tags = []
for key in keys:
if key in entry:
val = entry.get(key)
if isinstance(val, str):
tags.append(parse_tag(val))
tags.append(parse_unicode_tag(val))
elif isinstance(val, list):
tags += [parse_tag(v) for v in val]
tags += [parse_unicode_tag(v) for v in val]
else:
abort('Error: %s\'s key "%s" is not valid (%s)' %
(entry['name'], key, type(val).__name__))
result = []
for tag in tags:
if tag not in result:
result.append(tag)
return result
def parse_global_tags(site, item, tag, item_key: str):
if tag in item:
if not getattr(site, tag, False):
setattr(site, tag, {})
if isinstance(item[tag], str):
item[tag] = [item[tag]]
for t in item[tag]:
tagObj = getattr(site, tag, False)
if not tagObj.get(t, False):
tagObj[t] = {'tag_count': 0, 'keys': set()}
if item_key not in tagObj[t]['keys']:
tagObj[t]['tag_count'] += 1
tagObj[t]['keys'].add(item_key)
setattr(site, tag, OrderedDict(sorted(getattr(site, tag, {}).items())))
def parse_item(entry, entry_tags=[], meta={}, meta_tags=[]):
updated = entry.get('updated') or date(1970, 1, 1)
if isinstance(updated, str):
updated = datetime.strptime(updated, "%Y-%m-%d").date()
result = dict(entry,
new=(date.today() - updated) < timedelta(days=30),
tags=parse_tags(entry, entry_tags) + parse_tags(meta, meta_tags),
updated=updated)
if "repo" in result:
# Try to add extra repo information, like icons, badges
repo_parsed = urlparse(result["repo"])
domain = repo_parsed.netloc
ext = os.path.splitext(result["repo"])[1]
if "github.com" in domain:
try:
# https://github.com/<user>/<repo>
_, user, repo, *_ = repo_parsed.path.split("/")
except ValueError:
result["repoiconname"] = "github"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitHub"
else:
result["repobadge"] = f'<img class="badge lazyload" alt="GitHub stars" data-src="https://img.shields.io/github/stars/{user}/{repo}?style=flat-square&logo=github" src="https://img.shields.io/badge/stars-%3F-blue?style=flat-square&logo=github">'
elif (".google.com" in domain or
"googlecode.com" in domain):
result["repoiconname"] = "google"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Google Code"
elif "bitbucket.org" in domain:
result["repoiconname"] = "bitbucket"
result["repoiconstyle"] = "fab"
result["repotitle"] = "Bitbucket"
elif "gitlab.com" in domain or domain.startswith("gitlab."):
result["repoiconname"] = "gitlab"
result["repoiconstyle"] = "fab"
result["repotitle"] = "GitLab"
elif "sourceforge.net" in domain:
try:
# https://sourceforge.net/projects/<repo>
_, _, repo, *_ = repo_parsed.path.split("/")
except ValueError:
pass
else:
result["repobadge"] = f'<img class="badge lazyload" alt="Sourceforge downloads" data-src="https://img.shields.io/sourceforge/dt/{repo}?style=flat-square" src="https://img.shields.io/badge/downloads-%3F-brightgreen?style=flat-square">'
elif ext in (".gz", ".zip", ".tar", ".tgz", ".tbz2", ".bz2", ".xz", ".rar"):
result["repoiconname"] = "box"
result["repoiconstyle"] = "fas"
result["repotitle"] = "Archive"
return result
def parse_items(site, item, key):
if not (item.get(key) and validate(item, key)):
return
if not getattr(site, key, False):
setattr(site, key, [])
meta_tags = ['genre', 'subgenre', 'theme']
game_tags = [
'status',
'development',
'lang',
'framework',
'content',
'license',
'multiplayer',
'type'
]
meta = item.get('meta', {})
meta["names_ascii"] = parse_unicode(names(item))
meta["external"] = item.get('external', {})
parse_global_tags(site, meta, 'genre', item['name'])
parse_global_tags(site, meta, 'subgenre', item['name'])
parse_global_tags(site, meta, 'theme', item['name'])
parse_fn = partial(parse_item, entry_tags=game_tags, meta=meta, meta_tags=meta_tags)
for game in item[key]:
parse_global_tags(site, game, 'lang', game['name'])
item = (names(item), meta, [parse_fn(i) for i in item[key]])
getattr(site, key).append(item)
def show_error(game_name, error_str):
print(f'\033[91m {game_name}\033[0m')
print(f' {error_str}')
def show_errors(errors):
print('\n')
for error in errors:
show_error(error["name"], error["error"])
print(f'\n {len(errors)} errors\n')
sys.exit(1)
def show_validation_errors(data, validation_errors):
errors = []
for error in validation_errors:
path = error.path.split('/')
game = data[int(path[1])]
name = game_name(game)
errors.append({"name": name, "error": error.__repr__()})
show_errors(errors)
def validate_with_schema(source_data, schema_file):
core = Core(source_data=source_data, schema_files=[schema_file])
try:
core.validate(raise_exception=True)
except Exception as error:
if len(core.errors) > 0:
show_validation_errors(source_data, core.errors)
else:
raise error
def parse_data(site):
base = op.dirname(__file__)
originals = []
for fn in os.listdir(op.join(base, 'originals')):
if fn.endswith('.yaml'):
originals.extend(yaml.safe_load(open(op.join(base, 'originals', fn), encoding="utf-8")))
def sort_key(game):
name = game_name(game)
# Always sort SCUMM first
if name == 'SCUMM':
return '0'
if name.startswith('The '):
return name[4:]
return name
originals = natsorted(originals, key=sort_key, alg=ns.IGNORECASE)
print(str(len(originals)) + ' games in total')
validate_with_schema(originals, 'schema/originals.yaml')
clones = []
for fn in sorted(os.listdir(op.join(base, 'games'))):
if fn.endswith('.yaml'):
clones.extend(yaml.safe_load(open(op.join(base, 'games', fn), encoding="utf-8")))
print(str(len(clones)) + ' clones in total')
validate_with_schema(clones, 'schema/games.yaml')
errors = []
originals_map = {}
for item in originals:
name = game_name(item)
if name in originals_map:
errors.append({
"name": name,
"error": "Duplicate original game '%s'" % name
})
originals_map[name] = item
if len(errors) > 0:
show_errors(errors)
for clone in clones:
if 'originals' not in clone:
show_errors([{
"name": clone["name"],
"error": "Unable to find 'remakes' or 'clones' in game"
}])
for original in clone['originals']:
if original not in originals_map:
errors.append({
"name": clone["name"],
"error": "Original game '%s' not found" % original
})
if "updated" not in clone:
print(f"{clone['name']} has no updated field")
else:
if isinstance(clone['updated'], str):
clone['updated'] = datetime.strptime(clone['updated'], "%Y-%m-%d").date()
if "status" not in clone:
print(f"{clone['name']} has no status field")
oldest_games = sorted([(clone['name'], clone['updated']) for clone in clones if 'updated' in clone], key=lambda x: x[1])[:5]
print(f"Oldest 5 games: {oldest_games}")
if len(errors) > 0:
show_errors(errors)
for item in originals:
# Recombine originals and clones
combined = copy.deepcopy(item)
name = game_name(combined)
combined['games'] = [
clone for clone in clones
if name in clone['originals']
]
parse_items(site, combined, 'games')
| true | true |
7901eb0fa2d34276c12c21fe340944d342e953ee | 12,417 | py | Python | lithops/job/job.py | pablogs98/lithops | ab73af96d168b17594477d36195c652ec58199da | [
"Apache-2.0"
] | null | null | null | lithops/job/job.py | pablogs98/lithops | ab73af96d168b17594477d36195c652ec58199da | [
"Apache-2.0"
] | null | null | null | lithops/job/job.py | pablogs98/lithops | ab73af96d168b17594477d36195c652ec58199da | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 PyWren Team
# (C) Copyright IBM Corp. 2020
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import pickle
import logging
from lithops import utils
from lithops.job.partitioner import create_partitions
from lithops.utils import is_object_processing_function, sizeof_fmt
from lithops.storage.utils import create_func_key, create_agg_data_key
from lithops.job.serialize import SerializeIndependent, create_module_data
from lithops.constants import MAX_AGG_DATA_SIZE, JOBS_PREFIX, LOCALHOST,\
SERVERLESS, STANDALONE, LITHOPS_TEMP_DIR
from types import SimpleNamespace
import os
import hashlib
import inspect
from lithops.utils import b64str_to_bytes
logger = logging.getLogger(__name__)
def create_map_job(config, internal_storage, executor_id, job_id, map_function,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
extra_args=None, obj_chunk_size=None, obj_chunk_number=None,
invoke_pool_threads=128):
"""
Wrapper to create a map job. It integrates COS logic to process objects.
"""
host_job_meta = {'host_job_create_tstamp': time.time()}
map_iterdata = utils.verify_args(map_function, iterdata, extra_args)
if config['lithops'].get('rabbitmq_monitor', False):
rabbit_amqp_url = config['rabbitmq'].get('amqp_url')
utils.create_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id)
# Object processing functionality
parts_per_object = None
if is_object_processing_function(map_function):
create_partitions_start = time.time()
# Create partitions according chunk_size or chunk_number
logger.debug('ExecutorID {} | JobID {} - Calling map on partitions '
'from object storage flow'.format(executor_id, job_id))
map_iterdata, parts_per_object = create_partitions(config, internal_storage,
map_iterdata, obj_chunk_size,
obj_chunk_number)
host_job_meta['host_job_create_partitions_time'] = round(time.time()-create_partitions_start, 6)
# ########
job = _create_job(config=config,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=job_id,
func=map_function,
iterdata=map_iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=extra_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta,
invoke_pool_threads=invoke_pool_threads)
if parts_per_object:
job.parts_per_object = parts_per_object
return job
def create_reduce_job(config, internal_storage, executor_id, reduce_job_id,
reduce_function, map_job, map_futures, runtime_meta,
runtime_memory, reducer_one_per_object, extra_env,
include_modules, exclude_modules, execution_timeout=None):
"""
Wrapper to create a reduce job. Apply a function across all map futures.
"""
host_job_meta = {'host_job_create_tstamp': time.time()}
iterdata = [(map_futures, )]
if hasattr(map_job, 'parts_per_object') and reducer_one_per_object:
prev_total_partitons = 0
iterdata = []
for total_partitions in map_job.parts_per_object:
iterdata.append((map_futures[prev_total_partitons:prev_total_partitons+total_partitions],))
prev_total_partitons += total_partitions
reduce_job_env = {'__LITHOPS_REDUCE_JOB': True}
if extra_env is None:
ext_env = reduce_job_env
else:
ext_env = extra_env.copy()
ext_env.update(reduce_job_env)
iterdata = utils.verify_args(reduce_function, iterdata, None)
return _create_job(config=config,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=reduce_job_id,
func=reduce_function,
iterdata=iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=ext_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta)
'''
stores function and modules in temporary directory to be used later in optimized runtime
'''
def _store_func_and_modules(func_key, func_str, module_data):
# save function
func_path = '/'.join([LITHOPS_TEMP_DIR, func_key])
os.makedirs(os.path.dirname(func_path), exist_ok=True)
with open(func_path, "wb") as f:
f.write(func_str)
if module_data:
logger.debug("Writing Function dependencies to local disk")
modules_path = '/'.join([os.path.dirname(func_path), 'modules'])
for m_filename, m_data in module_data.items():
m_path = os.path.dirname(m_filename)
if len(m_path) > 0 and m_path[0] == "/":
m_path = m_path[1:]
to_make = os.path.join(modules_path, m_path)
try:
os.makedirs(to_make)
except OSError as e:
if e.errno == 17:
pass
else:
raise e
full_filename = os.path.join(to_make, os.path.basename(m_filename))
with open(full_filename, 'wb') as fid:
fid.write(b64str_to_bytes(m_data))
logger.debug("Finished storing function and modules")
def _create_job(config, internal_storage, executor_id, job_id, func,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
host_job_meta, invoke_pool_threads=128):
"""
:param func: the function to map over the data
:param iterdata: An iterable of input data
:param extra_env: Additional environment variables for CF environment. Default None.
:param extra_meta: Additional metadata to pass to CF. Default None.
:param remote_invocation: Enable remote invocation. Default False.
:param invoke_pool_threads: Number of threads to use to invoke.
:param data_all_as_one: upload the data as a single object. Default True
:param overwrite_invoke_args: Overwrite other args. Mainly used for testing.
:param exclude_modules: Explicitly keep these modules from pickled dependencies.
:return: A list with size `len(iterdata)` of futures for each job
:rtype: list of futures.
"""
ext_env = {} if extra_env is None else extra_env.copy()
if ext_env:
ext_env = utils.convert_bools_to_string(ext_env)
logger.debug("Extra environment vars {}".format(ext_env))
job = SimpleNamespace()
job.executor_id = executor_id
job.job_id = job_id
job.extra_env = ext_env
job.execution_timeout = execution_timeout or config['lithops']['execution_timeout']
job.function_name = func.__name__
job.total_calls = len(iterdata)
mode = config['lithops']['mode']
if mode == SERVERLESS:
job.invoke_pool_threads = invoke_pool_threads
job.runtime_memory = runtime_memory or config['serverless']['runtime_memory']
job.runtime_timeout = config['serverless']['runtime_timeout']
if job.execution_timeout >= job.runtime_timeout:
job.execution_timeout = job.runtime_timeout - 5
elif mode == STANDALONE:
job.runtime_memory = None
runtime_timeout = config['standalone']['hard_dismantle_timeout']
if job.execution_timeout >= runtime_timeout:
job.execution_timeout = runtime_timeout - 10
elif mode == LOCALHOST:
job.runtime_memory = None
job.runtime_timeout = execution_timeout
exclude_modules_cfg = config['lithops'].get('exclude_modules', [])
include_modules_cfg = config['lithops'].get('include_modules', [])
exc_modules = set()
inc_modules = set()
if exclude_modules_cfg:
exc_modules.update(exclude_modules_cfg)
if exclude_modules:
exc_modules.update(exclude_modules)
if include_modules_cfg is not None:
inc_modules.update(include_modules_cfg)
if include_modules_cfg is None and not include_modules:
inc_modules = None
if include_modules is not None and include_modules:
inc_modules.update(include_modules)
if include_modules is None:
inc_modules = None
logger.debug('ExecutorID {} | JobID {} - Serializing function and data'.format(executor_id, job_id))
job_serialize_start = time.time()
serializer = SerializeIndependent(runtime_meta['preinstalls'])
func_and_data_ser, mod_paths = serializer([func] + iterdata, inc_modules, exc_modules)
data_strs = func_and_data_ser[1:]
data_size_bytes = sum(len(x) for x in data_strs)
module_data = create_module_data(mod_paths)
func_str = func_and_data_ser[0]
func_module_str = pickle.dumps({'func': func_str, 'module_data': module_data}, -1)
func_module_size_bytes = len(func_module_str)
total_size = utils.sizeof_fmt(data_size_bytes+func_module_size_bytes)
host_job_meta['host_job_serialize_time'] = round(time.time()-job_serialize_start, 6)
host_job_meta['data_size_bytes'] = data_size_bytes
host_job_meta['func_module_size_bytes'] = func_module_size_bytes
if 'data_limit' in config['lithops']:
data_limit = config['lithops']['data_limit']
else:
data_limit = MAX_AGG_DATA_SIZE
if data_limit and data_size_bytes > data_limit*1024**2:
log_msg = ('ExecutorID {} | JobID {} - Total data exceeded maximum size '
'of {}'.format(executor_id, job_id, sizeof_fmt(data_limit*1024**2)))
raise Exception(log_msg)
logger.info('ExecutorID {} | JobID {} - Uploading function and data '
'- Total: {}'.format(executor_id, job_id, total_size))
# Upload data
data_key = create_agg_data_key(JOBS_PREFIX, executor_id, job_id)
job.data_key = data_key
data_bytes, data_ranges = utils.agg_data(data_strs)
job.data_ranges = data_ranges
data_upload_start = time.time()
internal_storage.put_data(data_key, data_bytes)
data_upload_end = time.time()
host_job_meta['host_data_upload_time'] = round(data_upload_end-data_upload_start, 6)
func_upload_start = time.time()
# Upload function and modules
if config[mode].get('customized_runtime'):
# Prepare function and modules locally to store in the runtime image later
function_file = func.__code__.co_filename
function_hash = hashlib.md5(open(function_file,'rb').read()).hexdigest()[:16]
mod_hash = hashlib.md5(repr(sorted(mod_paths)).encode('utf-8')).hexdigest()[:16]
uuid = f'{function_hash}{mod_hash}'
func_key = create_func_key(JOBS_PREFIX, uuid, "")
_store_func_and_modules(func_key, func_str, module_data)
job.ext_runtime_uuid = uuid
else:
func_key = create_func_key(JOBS_PREFIX, executor_id, job_id)
internal_storage.put_func(func_key, func_module_str)
job.func_key = func_key
func_upload_end = time.time()
host_job_meta['host_func_upload_time'] = round(func_upload_end - func_upload_start, 6)
host_job_meta['host_job_created_time'] = round(time.time() - host_job_meta['host_job_create_tstamp'], 6)
job.metadata = host_job_meta
return job
| 41.115894 | 108 | 0.67464 |
import time
import pickle
import logging
from lithops import utils
from lithops.job.partitioner import create_partitions
from lithops.utils import is_object_processing_function, sizeof_fmt
from lithops.storage.utils import create_func_key, create_agg_data_key
from lithops.job.serialize import SerializeIndependent, create_module_data
from lithops.constants import MAX_AGG_DATA_SIZE, JOBS_PREFIX, LOCALHOST,\
SERVERLESS, STANDALONE, LITHOPS_TEMP_DIR
from types import SimpleNamespace
import os
import hashlib
import inspect
from lithops.utils import b64str_to_bytes
logger = logging.getLogger(__name__)
def create_map_job(config, internal_storage, executor_id, job_id, map_function,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
extra_args=None, obj_chunk_size=None, obj_chunk_number=None,
invoke_pool_threads=128):
host_job_meta = {'host_job_create_tstamp': time.time()}
map_iterdata = utils.verify_args(map_function, iterdata, extra_args)
if config['lithops'].get('rabbitmq_monitor', False):
rabbit_amqp_url = config['rabbitmq'].get('amqp_url')
utils.create_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id)
parts_per_object = None
if is_object_processing_function(map_function):
create_partitions_start = time.time()
logger.debug('ExecutorID {} | JobID {} - Calling map on partitions '
'from object storage flow'.format(executor_id, job_id))
map_iterdata, parts_per_object = create_partitions(config, internal_storage,
map_iterdata, obj_chunk_size,
obj_chunk_number)
host_job_meta['host_job_create_partitions_time'] = round(time.time()-create_partitions_start, 6)
g,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=job_id,
func=map_function,
iterdata=map_iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=extra_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta,
invoke_pool_threads=invoke_pool_threads)
if parts_per_object:
job.parts_per_object = parts_per_object
return job
def create_reduce_job(config, internal_storage, executor_id, reduce_job_id,
reduce_function, map_job, map_futures, runtime_meta,
runtime_memory, reducer_one_per_object, extra_env,
include_modules, exclude_modules, execution_timeout=None):
host_job_meta = {'host_job_create_tstamp': time.time()}
iterdata = [(map_futures, )]
if hasattr(map_job, 'parts_per_object') and reducer_one_per_object:
prev_total_partitons = 0
iterdata = []
for total_partitions in map_job.parts_per_object:
iterdata.append((map_futures[prev_total_partitons:prev_total_partitons+total_partitions],))
prev_total_partitons += total_partitions
reduce_job_env = {'__LITHOPS_REDUCE_JOB': True}
if extra_env is None:
ext_env = reduce_job_env
else:
ext_env = extra_env.copy()
ext_env.update(reduce_job_env)
iterdata = utils.verify_args(reduce_function, iterdata, None)
return _create_job(config=config,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=reduce_job_id,
func=reduce_function,
iterdata=iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=ext_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta)
def _store_func_and_modules(func_key, func_str, module_data):
func_path = '/'.join([LITHOPS_TEMP_DIR, func_key])
os.makedirs(os.path.dirname(func_path), exist_ok=True)
with open(func_path, "wb") as f:
f.write(func_str)
if module_data:
logger.debug("Writing Function dependencies to local disk")
modules_path = '/'.join([os.path.dirname(func_path), 'modules'])
for m_filename, m_data in module_data.items():
m_path = os.path.dirname(m_filename)
if len(m_path) > 0 and m_path[0] == "/":
m_path = m_path[1:]
to_make = os.path.join(modules_path, m_path)
try:
os.makedirs(to_make)
except OSError as e:
if e.errno == 17:
pass
else:
raise e
full_filename = os.path.join(to_make, os.path.basename(m_filename))
with open(full_filename, 'wb') as fid:
fid.write(b64str_to_bytes(m_data))
logger.debug("Finished storing function and modules")
def _create_job(config, internal_storage, executor_id, job_id, func,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
host_job_meta, invoke_pool_threads=128):
ext_env = {} if extra_env is None else extra_env.copy()
if ext_env:
ext_env = utils.convert_bools_to_string(ext_env)
logger.debug("Extra environment vars {}".format(ext_env))
job = SimpleNamespace()
job.executor_id = executor_id
job.job_id = job_id
job.extra_env = ext_env
job.execution_timeout = execution_timeout or config['lithops']['execution_timeout']
job.function_name = func.__name__
job.total_calls = len(iterdata)
mode = config['lithops']['mode']
if mode == SERVERLESS:
job.invoke_pool_threads = invoke_pool_threads
job.runtime_memory = runtime_memory or config['serverless']['runtime_memory']
job.runtime_timeout = config['serverless']['runtime_timeout']
if job.execution_timeout >= job.runtime_timeout:
job.execution_timeout = job.runtime_timeout - 5
elif mode == STANDALONE:
job.runtime_memory = None
runtime_timeout = config['standalone']['hard_dismantle_timeout']
if job.execution_timeout >= runtime_timeout:
job.execution_timeout = runtime_timeout - 10
elif mode == LOCALHOST:
job.runtime_memory = None
job.runtime_timeout = execution_timeout
exclude_modules_cfg = config['lithops'].get('exclude_modules', [])
include_modules_cfg = config['lithops'].get('include_modules', [])
exc_modules = set()
inc_modules = set()
if exclude_modules_cfg:
exc_modules.update(exclude_modules_cfg)
if exclude_modules:
exc_modules.update(exclude_modules)
if include_modules_cfg is not None:
inc_modules.update(include_modules_cfg)
if include_modules_cfg is None and not include_modules:
inc_modules = None
if include_modules is not None and include_modules:
inc_modules.update(include_modules)
if include_modules is None:
inc_modules = None
logger.debug('ExecutorID {} | JobID {} - Serializing function and data'.format(executor_id, job_id))
job_serialize_start = time.time()
serializer = SerializeIndependent(runtime_meta['preinstalls'])
func_and_data_ser, mod_paths = serializer([func] + iterdata, inc_modules, exc_modules)
data_strs = func_and_data_ser[1:]
data_size_bytes = sum(len(x) for x in data_strs)
module_data = create_module_data(mod_paths)
func_str = func_and_data_ser[0]
func_module_str = pickle.dumps({'func': func_str, 'module_data': module_data}, -1)
func_module_size_bytes = len(func_module_str)
total_size = utils.sizeof_fmt(data_size_bytes+func_module_size_bytes)
host_job_meta['host_job_serialize_time'] = round(time.time()-job_serialize_start, 6)
host_job_meta['data_size_bytes'] = data_size_bytes
host_job_meta['func_module_size_bytes'] = func_module_size_bytes
if 'data_limit' in config['lithops']:
data_limit = config['lithops']['data_limit']
else:
data_limit = MAX_AGG_DATA_SIZE
if data_limit and data_size_bytes > data_limit*1024**2:
log_msg = ('ExecutorID {} | JobID {} - Total data exceeded maximum size '
'of {}'.format(executor_id, job_id, sizeof_fmt(data_limit*1024**2)))
raise Exception(log_msg)
logger.info('ExecutorID {} | JobID {} - Uploading function and data '
'- Total: {}'.format(executor_id, job_id, total_size))
data_key = create_agg_data_key(JOBS_PREFIX, executor_id, job_id)
job.data_key = data_key
data_bytes, data_ranges = utils.agg_data(data_strs)
job.data_ranges = data_ranges
data_upload_start = time.time()
internal_storage.put_data(data_key, data_bytes)
data_upload_end = time.time()
host_job_meta['host_data_upload_time'] = round(data_upload_end-data_upload_start, 6)
func_upload_start = time.time()
if config[mode].get('customized_runtime'):
function_file = func.__code__.co_filename
function_hash = hashlib.md5(open(function_file,'rb').read()).hexdigest()[:16]
mod_hash = hashlib.md5(repr(sorted(mod_paths)).encode('utf-8')).hexdigest()[:16]
uuid = f'{function_hash}{mod_hash}'
func_key = create_func_key(JOBS_PREFIX, uuid, "")
_store_func_and_modules(func_key, func_str, module_data)
job.ext_runtime_uuid = uuid
else:
func_key = create_func_key(JOBS_PREFIX, executor_id, job_id)
internal_storage.put_func(func_key, func_module_str)
job.func_key = func_key
func_upload_end = time.time()
host_job_meta['host_func_upload_time'] = round(func_upload_end - func_upload_start, 6)
host_job_meta['host_job_created_time'] = round(time.time() - host_job_meta['host_job_create_tstamp'], 6)
job.metadata = host_job_meta
return job
| true | true |
7901eb0fc2ac903ef84d28f8fdfcc366ca81912e | 811 | py | Python | Code/word_jumble.py | Nyapal/CS1.3 | 2e10953bd141d02694ac90e50c0af3f40f8c0924 | [
"MIT"
] | null | null | null | Code/word_jumble.py | Nyapal/CS1.3 | 2e10953bd141d02694ac90e50c0af3f40f8c0924 | [
"MIT"
] | 4 | 2020-02-18T00:09:46.000Z | 2020-03-10T21:05:48.000Z | Code/word_jumble.py | Nyapal/CS1.3 | 2e10953bd141d02694ac90e50c0af3f40f8c0924 | [
"MIT"
] | null | null | null | class Jumble(object):
def __init__(self):
self.dict = self.make_dict()
def make_dict(self):
dic = {}
f = open('/usr/share/dict/words', 'r')
for word in f:
word = word.strip().lower()
sort = ''.join(sorted(word))
dic[sort] = word
return dic
def unjumble(self, lst):
for word in lst:
word = word.strip().lower()
sorted_word = "".join(sorted(word))
if sorted_word in self.dict:
self.dict[sorted_word]
else:
return None
if __name__ == "__main__":
f_list = ['prouot', 'laurr', 'jobum', 'lethem']
s_list = ['siconu', 'tefon', 'tarfd', 'laisa']
t_list = ['sokik', 'niumem', 'tenjuk', 'doore']
unjumble = Jumble()
print(unjumble.unjumble(f_list))
print(unjumble.unjumble(s_list))
print(unjumble.unjumble(t_list)) | 26.16129 | 49 | 0.59926 | class Jumble(object):
def __init__(self):
self.dict = self.make_dict()
def make_dict(self):
dic = {}
f = open('/usr/share/dict/words', 'r')
for word in f:
word = word.strip().lower()
sort = ''.join(sorted(word))
dic[sort] = word
return dic
def unjumble(self, lst):
for word in lst:
word = word.strip().lower()
sorted_word = "".join(sorted(word))
if sorted_word in self.dict:
self.dict[sorted_word]
else:
return None
if __name__ == "__main__":
f_list = ['prouot', 'laurr', 'jobum', 'lethem']
s_list = ['siconu', 'tefon', 'tarfd', 'laisa']
t_list = ['sokik', 'niumem', 'tenjuk', 'doore']
unjumble = Jumble()
print(unjumble.unjumble(f_list))
print(unjumble.unjumble(s_list))
print(unjumble.unjumble(t_list)) | true | true |
7901eb4e813bc62439b385f6f317294cffa84a67 | 3,862 | py | Python | env/Lib/site-packages/sqlalchemy/dialects/oracle/provision.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | env/Lib/site-packages/sqlalchemy/dialects/oracle/provision.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | env/Lib/site-packages/sqlalchemy/dialects/oracle/provision.py | aammjian/cotton | f72b814f795f79a4054688e465c8b0ae5560f3b7 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | from ... import create_engine
from ... import exc
from ...engine import url as sa_url
from ...testing.provision import configure_follower
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import log
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
from ...testing.provision import update_db_opts
@create_db.for_db("oracle")
def _oracle_create_db(cfg, eng, ident):
# NOTE: make sure you've run "ALTER DATABASE default tablespace users" or
# similar, so that the default tablespace is not "system"; reflection will
# fail otherwise
with eng.connect() as conn:
conn.execute("create user %s identified by xe" % ident)
conn.execute("create user %s_ts1 identified by xe" % ident)
conn.execute("create user %s_ts2 identified by xe" % ident)
conn.execute("grant dba to %s" % (ident,))
conn.execute("grant unlimited tablespace to %s" % ident)
conn.execute("grant unlimited tablespace to %s_ts1" % ident)
conn.execute("grant unlimited tablespace to %s_ts2" % ident)
@configure_follower.for_db("oracle")
def _oracle_configure_follower(config, ident):
config.test_schema = "%s_ts1" % ident
config.test_schema_2 = "%s_ts2" % ident
def _ora_drop_ignore(conn, dbname):
try:
conn.execute("drop user %s cascade" % dbname)
log.info("Reaped db: %s", dbname)
return True
except exc.DatabaseError as err:
log.warning("couldn't drop db: %s", err)
return False
@drop_db.for_db("oracle")
def _oracle_drop_db(cfg, eng, ident):
with eng.connect() as conn:
# cx_Oracle seems to occasionally leak open connections when a large
# suite it run, even if we confirm we have zero references to
# connection objects.
# while there is a "kill session" command in Oracle,
# it unfortunately does not release the connection sufficiently.
_ora_drop_ignore(conn, ident)
_ora_drop_ignore(conn, "%s_ts1" % ident)
_ora_drop_ignore(conn, "%s_ts2" % ident)
@update_db_opts.for_db("oracle")
def _oracle_update_db_opts(db_url, db_opts):
pass
@run_reap_dbs.for_db("oracle")
def _reap_oracle_dbs(url, idents):
log.info("db reaper connecting to %r", url)
eng = create_engine(url)
with eng.connect() as conn:
log.info("identifiers in file: %s", ", ".join(idents))
to_reap = conn.execute(
"select u.username from all_users u where username "
"like 'TEST_%' and not exists (select username "
"from v$session where username=u.username)"
)
all_names = {username.lower() for (username,) in to_reap}
to_drop = set()
for name in all_names:
if name.endswith("_ts1") or name.endswith("_ts2"):
continue
elif name in idents:
to_drop.add(name)
if "%s_ts1" % name in all_names:
to_drop.add("%s_ts1" % name)
if "%s_ts2" % name in all_names:
to_drop.add("%s_ts2" % name)
dropped = total = 0
for total, username in enumerate(to_drop, 1):
if _ora_drop_ignore(conn, username):
dropped += 1
log.info(
"Dropped %d out of %d stale databases detected", dropped, total
)
@follower_url_from_main.for_db("oracle")
def _oracle_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
url.username = ident
url.password = "xe"
return url
@temp_table_keyword_args.for_db("oracle")
def _oracle_temp_table_keyword_args(cfg, eng):
return {
"prefixes": ["GLOBAL TEMPORARY"],
"oracle_on_commit": "PRESERVE ROWS",
}
| 34.792793 | 78 | 0.651217 | from ... import create_engine
from ... import exc
from ...engine import url as sa_url
from ...testing.provision import configure_follower
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import log
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
from ...testing.provision import update_db_opts
@create_db.for_db("oracle")
def _oracle_create_db(cfg, eng, ident):
# similar, so that the default tablespace is not "system"; reflection will
# fail otherwise
with eng.connect() as conn:
conn.execute("create user %s identified by xe" % ident)
conn.execute("create user %s_ts1 identified by xe" % ident)
conn.execute("create user %s_ts2 identified by xe" % ident)
conn.execute("grant dba to %s" % (ident,))
conn.execute("grant unlimited tablespace to %s" % ident)
conn.execute("grant unlimited tablespace to %s_ts1" % ident)
conn.execute("grant unlimited tablespace to %s_ts2" % ident)
@configure_follower.for_db("oracle")
def _oracle_configure_follower(config, ident):
config.test_schema = "%s_ts1" % ident
config.test_schema_2 = "%s_ts2" % ident
def _ora_drop_ignore(conn, dbname):
try:
conn.execute("drop user %s cascade" % dbname)
log.info("Reaped db: %s", dbname)
return True
except exc.DatabaseError as err:
log.warning("couldn't drop db: %s", err)
return False
@drop_db.for_db("oracle")
def _oracle_drop_db(cfg, eng, ident):
with eng.connect() as conn:
_ora_drop_ignore(conn, ident)
_ora_drop_ignore(conn, "%s_ts1" % ident)
_ora_drop_ignore(conn, "%s_ts2" % ident)
@update_db_opts.for_db("oracle")
def _oracle_update_db_opts(db_url, db_opts):
pass
@run_reap_dbs.for_db("oracle")
def _reap_oracle_dbs(url, idents):
log.info("db reaper connecting to %r", url)
eng = create_engine(url)
with eng.connect() as conn:
log.info("identifiers in file: %s", ", ".join(idents))
to_reap = conn.execute(
"select u.username from all_users u where username "
"like 'TEST_%' and not exists (select username "
"from v$session where username=u.username)"
)
all_names = {username.lower() for (username,) in to_reap}
to_drop = set()
for name in all_names:
if name.endswith("_ts1") or name.endswith("_ts2"):
continue
elif name in idents:
to_drop.add(name)
if "%s_ts1" % name in all_names:
to_drop.add("%s_ts1" % name)
if "%s_ts2" % name in all_names:
to_drop.add("%s_ts2" % name)
dropped = total = 0
for total, username in enumerate(to_drop, 1):
if _ora_drop_ignore(conn, username):
dropped += 1
log.info(
"Dropped %d out of %d stale databases detected", dropped, total
)
@follower_url_from_main.for_db("oracle")
def _oracle_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
url.username = ident
url.password = "xe"
return url
@temp_table_keyword_args.for_db("oracle")
def _oracle_temp_table_keyword_args(cfg, eng):
return {
"prefixes": ["GLOBAL TEMPORARY"],
"oracle_on_commit": "PRESERVE ROWS",
}
| true | true |
7901ec460b66a712ed8b79d133890a9061c52085 | 4,337 | py | Python | spiders/a55_crawl.py | senlyu163/crawler | ecf95f7b356c726922b5e5d90000fda3e16ae90d | [
"Apache-2.0"
] | null | null | null | spiders/a55_crawl.py | senlyu163/crawler | ecf95f7b356c726922b5e5d90000fda3e16ae90d | [
"Apache-2.0"
] | null | null | null | spiders/a55_crawl.py | senlyu163/crawler | ecf95f7b356c726922b5e5d90000fda3e16ae90d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_splash import SplashRequest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
import requests
class A55CrawlSpider(CrawlSpider):
name = '55_crawl'
allowed_domains = ['fsx.sxxz.gov.cn']
# start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/xxgkzn/']
start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/wj/']
# start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/jgsz_6342/']
rules = (
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul//li'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[1]/dl//dd'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[5]/dl//dd'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[10]/dl//dd'), follow=True),
Rule(LinkExtractor(allow=r'/\d+/t\d+_\d+\.html'), callback='parse_item', follow=True),
Rule(LinkExtractor(restrict_xpaths='//*[@id="searchsection"]/div[2]/a[3]'), follow=True),
Rule(LinkExtractor(allow=r'index_\d+\.html/'), follow=True),
)
# def start_requests(self):
# for url in self.start_urls:
# yield scrapy.Request(url)
def _build_request(self, rule, link):
r = SplashRequest(url=link.url, callback=self._response_downloaded, args={"wait": 0.5})
r.meta.update(rule=rule, link_text=link.text)
return r
def _requests_to_follow(self, response):
# if not isinstance(response, HtmlResponse):
# return
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link)
yield rule.process_request(r)
def parse_item(self, response):
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[2]/div/div/div[1]/p').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[2]/div/div/div[1]/h2[1]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-con"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[2]/div/div/div[2]/p').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[2]/div/div/div[2]/h2/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-con"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[3]/div[2]/div/div[1]/ul/li[8]').extract_first()
date = re.search(r"(\d{4}年\d{2}月\d{2}日)", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[3]/div[2]/div/div[1]/ul/li[6]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-body"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
pass
| 41.701923 | 121 | 0.572516 |
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_splash import SplashRequest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sys
from ..utils import extract_CN_from_content
from ..items import ScrapySpiderItem
import re
import requests
class A55CrawlSpider(CrawlSpider):
name = '55_crawl'
allowed_domains = ['fsx.sxxz.gov.cn']
start_urls = ['http://fsx.sxxz.gov.cn/fsxzw/zwgk/wj/']
rules = (
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul//li'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[1]/dl//dd'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[5]/dl//dd'), follow=True),
Rule(LinkExtractor(restrict_xpaths='/html/body/div[2]/div/div[1]/ul/li[5]/ul/li/ul/li[10]/dl//dd'), follow=True),
Rule(LinkExtractor(allow=r'/\d+/t\d+_\d+\.html'), callback='parse_item', follow=True),
Rule(LinkExtractor(restrict_xpaths='//*[@id="searchsection"]/div[2]/a[3]'), follow=True),
Rule(LinkExtractor(allow=r'index_\d+\.html/'), follow=True),
)
def _build_request(self, rule, link):
r = SplashRequest(url=link.url, callback=self._response_downloaded, args={"wait": 0.5})
r.meta.update(rule=rule, link_text=link.text)
return r
def _requests_to_follow(self, response):
seen = set()
for n, rule in enumerate(self._rules):
links = [lnk for lnk in rule.link_extractor.extract_links(response)
if lnk not in seen]
if links and rule.process_links:
links = rule.process_links(links)
for link in links:
seen.add(link)
r = self._build_request(n, link)
yield rule.process_request(r)
def parse_item(self, response):
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[2]/div/div/div[1]/p').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[2]/div/div/div[1]/h2[1]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-con"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[2]/div/div/div[2]/p').extract_first()
date = re.search(r"(\d{4}-\d{2}-\d{2})", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[2]/div/div/div[2]/h2/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-con"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
try:
item = ScrapySpiderItem()
item['url'] = response.url
date = response.xpath('/html/body/div[3]/div[2]/div/div[1]/ul/li[8]').extract_first()
date = re.search(r"(\d{4}年\d{2}月\d{2}日)", date).groups()[0]
item['date'] = date
title = response.xpath('/html/body/div[3]/div[2]/div/div[1]/ul/li[6]/text()').extract_first()
item['title'] = title
contents = response.xpath('//div[@class="article-body"]').extract()
item['contents'] = extract_CN_from_content(contents)
return item
except:
pass
| true | true |
7901ecb76b0a2b423dce5989be647783db851010 | 751 | py | Python | threathunter_common_python/test/testphone.py | threathunterX/python_lib | e2d4052de04c82cb7bccd08042f28db824cab442 | [
"Apache-2.0"
] | 2 | 2019-03-17T04:03:08.000Z | 2019-05-01T09:42:23.000Z | threathunter_common_python/test/testphone.py | threathunterX/python_lib | e2d4052de04c82cb7bccd08042f28db824cab442 | [
"Apache-2.0"
] | null | null | null | threathunter_common_python/test/testphone.py | threathunterX/python_lib | e2d4052de04c82cb7bccd08042f28db824cab442 | [
"Apache-2.0"
] | 4 | 2019-06-24T05:47:24.000Z | 2020-09-29T05:00:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from threathunter_common.geo.phonelocator import *
__author__ = "nebula"
def test_phone():
print check_phone_number("+13482345020", None)
assert check_phone_number("13482345020", 'CN')
assert not check_phone_number("+134823450", None)
print get_carrier("13482121123", 'CN')
print get_carrier("13815430576", 'CN')
print get_carrier("13093705423", 'CN')
print get_geo("13482121123", 'CN')
print get_geo("13815430576", 'CN')
print get_geo("13093705423", 'CN')
print 111, get_geo("020 8366 1177", "GB")
print 111, get_geo("+442083661177")
print phonenumbers.parse("020 8366 1177", "GB")
print phonenumbers.parse("+442083661177")
assert False
| 24.225806 | 53 | 0.679095 |
from threathunter_common.geo.phonelocator import *
__author__ = "nebula"
def test_phone():
print check_phone_number("+13482345020", None)
assert check_phone_number("13482345020", 'CN')
assert not check_phone_number("+134823450", None)
print get_carrier("13482121123", 'CN')
print get_carrier("13815430576", 'CN')
print get_carrier("13093705423", 'CN')
print get_geo("13482121123", 'CN')
print get_geo("13815430576", 'CN')
print get_geo("13093705423", 'CN')
print 111, get_geo("020 8366 1177", "GB")
print 111, get_geo("+442083661177")
print phonenumbers.parse("020 8366 1177", "GB")
print phonenumbers.parse("+442083661177")
assert False
| false | true |
7901ee355af4c47981a157ca6ec9680464390f97 | 11,439 | py | Python | Task2/Client_dev.py | Aiemu/CourseCN-Proj-RTP | 3cac199dcada9c96eaeb2f28fbfbe1b55d6bd02c | [
"MIT"
] | null | null | null | Task2/Client_dev.py | Aiemu/CourseCN-Proj-RTP | 3cac199dcada9c96eaeb2f28fbfbe1b55d6bd02c | [
"MIT"
] | null | null | null | Task2/Client_dev.py | Aiemu/CourseCN-Proj-RTP | 3cac199dcada9c96eaeb2f28fbfbe1b55d6bd02c | [
"MIT"
] | null | null | null | import socket, threading, sys, traceback, os, tkinter
from ui import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import QtCore, QtGui, QtWidgets
from tkinter import *
from PIL import Image, ImageTk
from tkinter import messagebox, Tk
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from RtpPacket import RtpPacket
RECV_SIZE = 20480 + 14
HIGHT = 500
CACHE_FILE_NAME = "cache-"
CACHE_FILE_EXT = ".jpg"
class Client:
INIT = 0
READY = 1
PLAYING = 2
state = INIT
SETUP = 0
PLAY = 1
PAUSE = 2
TEARDOWN = 3
FASTER = 4
SLOWER = 5
# Initiation..
def __init__(self, serveraddr, serverport, rtpport, filename):
self.page_main = Ui_MainWindow()
self.state == self.READY
self.serverAddr = serveraddr
self.serverPort = int(serverport)
self.rtpPort = int(rtpport)
self.fileName = filename
self.rtspSeq = 0
self.sessionId = 0
self.requestSent = -1
self.teardownAcked = 0
self.connectToServer()
self.frameNbr = 0
self.createWidgets()
def createWidgets(self):
app = QtWidgets.QApplication(sys.argv)
page_tmp = QtWidgets.QMainWindow()
self.page_main.setupUi(page_tmp)
page_tmp.show()
self.page_main.btn_setup.clicked.connect(lambda: self.setupMovie())
self.page_main.btn_play.clicked.connect(lambda: self.playMovie())
self.page_main.btn_pause.clicked.connect(lambda: self.pauseMovie())
self.page_main.btn_teardown.clicked.connect(lambda: self.exitClient())
self.page_main.btn_faster.clicked.connect(lambda: self.fasterMovie())
self.page_main.btn_slower.clicked.connect(lambda: self.slowerMovie())
sys.exit(app.exec_())
def fasterMovie(self):
"""Let movie faster."""
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.FASTER)
def slowerMovie(self):
"""Let movie slower."""
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.SLOWER)
def setupMovie(self):
"""Setup init."""
if self.state == self.INIT:
self.sendRtspRequest(self.SETUP)
def exitClient(self):
"""Teardown the client."""
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0) # Close the gui window
print(os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT)) # Delete the cache image from video
def pauseMovie(self):
"""Pause movie."""
if self.state == self.PLAYING:
self.sendRtspRequest(self.PAUSE)
def playMovie(self):
"""Play movie."""
if self.state == self.READY:
# Create a new thread to listen for RTP packets
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY)
def listenRtp(self):
"""Listen for RTP packets."""
while 1:
try:
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb+")
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
rtpPacket = RtpPacket()
rtpPacket.decode(data)
# self.cutFrameList.append(rtpPacket.getPayload())
currFrameNbr = rtpPacket.seqNum()
file.write(rtpPacket.getPayload())
print("Current Seq Num: " + str(currFrameNbr))
if currFrameNbr > self.frameNbr and rtpPacket.getIfEnd(): # Discard the late packet
self.frameNbr = currFrameNbr
self.updateMovie(cachename)
file.close()
break
except:
# Stop listening upon requesting PAUSE or TEARDOWN
if self.playEvent.isSet():
break
print('Frame receiving failed!')
# Upon receiving ACK for TEARDOWN request,
# close the RTP socket
if self.teardownAcked == 1:
self.rtpSocket.shutdown(socket.SHUT_RDWR)
self.rtpSocket.close()
break
def writeFrame(self):
"""Write the received frame to a temp image file. Return the image file."""
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb")
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename
def updateMovie(self, imageFile):
"""Update the image file as video frame in the GUI."""
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True)
def connectToServer(self):
"""Connect to the Server. Start a new RTSP/TCP session."""
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
# tkMessageBox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
messagebox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
def sendRtspRequest(self, requestCode):
"""Send RTSP request to the server."""
# Setup
if requestCode == self.SETUP and self.state == self.INIT:
threading.Thread(target=self.recvRtspReply).start()
# Update RTSP sequence number.
self.rtspSeq += 1
# Write the RTSP request to be sent.
request = 'SETUP ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nTransport: RTP/UDP; client_port= ' + str(self.rtpPort)
# Keep track of the sent request.
self.requestSent = self.SETUP
# Play
elif requestCode == self.PLAY and self.state == self.READY:
self.rtspSeq += 1
request = 'PLAY ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PLAY
# Pause
elif requestCode == self.PAUSE and self.state == self.PLAYING:
self.rtspSeq += 1
request = 'PAUSE ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PAUSE
# Teardown
elif requestCode == self.TEARDOWN and not self.state == self.INIT:
self.rtspSeq += 1
request = 'TEARDOWN ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.TEARDOWN
# Faster
elif requestCode == self.FASTER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'FASTER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
# Slower
elif requestCode == self.SLOWER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'SLOWER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
else:
return
# Send the RTSP request using rtspSocket.
self.rtspSocket.send(request.encode())
print('\nData sent:\n' + request)
def recvRtspReply(self):
"""Receive RTSP reply from the server."""
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode("utf-8"))
# Close the RTSP socket upon requesting Teardown
if self.requestSent == self.TEARDOWN:
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break
def parseRtspReply(self, data):
"""Parse the RTSP reply from the server."""
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
# Process only if the server reply's sequence number is the same as the request's
if seqNum == self.rtspSeq:
session = int(lines[2].split(' ')[1])
# New RTSP session ID
if self.sessionId == 0:
self.sessionId = session
# Process only if the session ID is the same
if self.sessionId == session:
if int(lines[0].split(' ')[1]) == 200:
if self.requestSent == self.SETUP:
# Update RTSP state.
self.state = self.READY
# Open RTP port.
self.openRtpPort()
elif self.requestSent == self.PLAY:
self.state = self.PLAYING
elif self.requestSent == self.PAUSE:
self.state = self.READY
# The play thread exits. A new thread is created on resume.
self.playEvent.set()
elif self.requestSent == self.TEARDOWN:
self.state = self.INIT
# Flag the teardownAcked to close the socket.
self.teardownAcked = 1
def openRtpPort(self):
"""Open RTP socket binded to a specified port."""
# Create a new datagram socket to receive RTP packets from the server
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set the timeout value of the socket to 0.5sec
self.rtpSocket.settimeout(0.5)
try:
# Bind the socket to the address using the RTP port given by the client user
self.rtpSocket.bind(("", self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', 'Unable to bind PORT=%d' %self.rtpPort)
def handler(self):
"""Handler on explicitly closing the GUI window."""
self.pauseMovie()
if messagebox.askokcancel("Quit?", "Are you sure you want to quit?"):
self.exitClient()
else: # When the user presses cancel, resume playing.
self.playMovie()
if __name__ == "__main__":
try:
# serverAddr = sys.argv[1]
# serverPort = sys.argv[2]
# rtpPort = sys.argv[3]
# fileName = sys.argv[4]
serverAddr = sys.argv[1]
serverPort = sys.argv[4]
rtpPort = sys.argv[3]
fileName = sys.argv[2]
except:
print ("[Usage: ClientLauncher.py Server_name Server_port RTP_port Video_file]\n")
# root = tkinter.Tk()
client = Client(serverAddr, serverPort, rtpPort, fileName)
# client.master.title('RTP Client')
# root.mainloop() | 38.13 | 148 | 0.559577 | import socket, threading, sys, traceback, os, tkinter
from ui import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import QtCore, QtGui, QtWidgets
from tkinter import *
from PIL import Image, ImageTk
from tkinter import messagebox, Tk
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from RtpPacket import RtpPacket
RECV_SIZE = 20480 + 14
HIGHT = 500
CACHE_FILE_NAME = "cache-"
CACHE_FILE_EXT = ".jpg"
class Client:
INIT = 0
READY = 1
PLAYING = 2
state = INIT
SETUP = 0
PLAY = 1
PAUSE = 2
TEARDOWN = 3
FASTER = 4
SLOWER = 5
def __init__(self, serveraddr, serverport, rtpport, filename):
self.page_main = Ui_MainWindow()
self.state == self.READY
self.serverAddr = serveraddr
self.serverPort = int(serverport)
self.rtpPort = int(rtpport)
self.fileName = filename
self.rtspSeq = 0
self.sessionId = 0
self.requestSent = -1
self.teardownAcked = 0
self.connectToServer()
self.frameNbr = 0
self.createWidgets()
def createWidgets(self):
app = QtWidgets.QApplication(sys.argv)
page_tmp = QtWidgets.QMainWindow()
self.page_main.setupUi(page_tmp)
page_tmp.show()
self.page_main.btn_setup.clicked.connect(lambda: self.setupMovie())
self.page_main.btn_play.clicked.connect(lambda: self.playMovie())
self.page_main.btn_pause.clicked.connect(lambda: self.pauseMovie())
self.page_main.btn_teardown.clicked.connect(lambda: self.exitClient())
self.page_main.btn_faster.clicked.connect(lambda: self.fasterMovie())
self.page_main.btn_slower.clicked.connect(lambda: self.slowerMovie())
sys.exit(app.exec_())
def fasterMovie(self):
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.FASTER)
def slowerMovie(self):
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.SLOWER)
def setupMovie(self):
if self.state == self.INIT:
self.sendRtspRequest(self.SETUP)
def exitClient(self):
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0)
print(os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT))
def pauseMovie(self):
if self.state == self.PLAYING:
self.sendRtspRequest(self.PAUSE)
def playMovie(self):
if self.state == self.READY:
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY)
def listenRtp(self):
while 1:
try:
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb+")
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
rtpPacket = RtpPacket()
rtpPacket.decode(data)
currFrameNbr = rtpPacket.seqNum()
file.write(rtpPacket.getPayload())
print("Current Seq Num: " + str(currFrameNbr))
if currFrameNbr > self.frameNbr and rtpPacket.getIfEnd():
self.frameNbr = currFrameNbr
self.updateMovie(cachename)
file.close()
break
except:
if self.playEvent.isSet():
break
print('Frame receiving failed!')
if self.teardownAcked == 1:
self.rtpSocket.shutdown(socket.SHUT_RDWR)
self.rtpSocket.close()
break
def writeFrame(self):
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb")
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename
def updateMovie(self, imageFile):
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True)
def connectToServer(self):
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
messagebox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
def sendRtspRequest(self, requestCode):
if requestCode == self.SETUP and self.state == self.INIT:
threading.Thread(target=self.recvRtspReply).start()
self.rtspSeq += 1
request = 'SETUP ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nTransport: RTP/UDP; client_port= ' + str(self.rtpPort)
self.requestSent = self.SETUP
elif requestCode == self.PLAY and self.state == self.READY:
self.rtspSeq += 1
request = 'PLAY ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PLAY
elif requestCode == self.PAUSE and self.state == self.PLAYING:
self.rtspSeq += 1
request = 'PAUSE ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PAUSE
elif requestCode == self.TEARDOWN and not self.state == self.INIT:
self.rtspSeq += 1
request = 'TEARDOWN ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.TEARDOWN
elif requestCode == self.FASTER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'FASTER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
elif requestCode == self.SLOWER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'SLOWER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
else:
return
self.rtspSocket.send(request.encode())
print('\nData sent:\n' + request)
def recvRtspReply(self):
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode("utf-8"))
if self.requestSent == self.TEARDOWN:
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break
def parseRtspReply(self, data):
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
if seqNum == self.rtspSeq:
session = int(lines[2].split(' ')[1])
if self.sessionId == 0:
self.sessionId = session
if self.sessionId == session:
if int(lines[0].split(' ')[1]) == 200:
if self.requestSent == self.SETUP:
self.state = self.READY
self.openRtpPort()
elif self.requestSent == self.PLAY:
self.state = self.PLAYING
elif self.requestSent == self.PAUSE:
self.state = self.READY
self.playEvent.set()
elif self.requestSent == self.TEARDOWN:
self.state = self.INIT
self.teardownAcked = 1
def openRtpPort(self):
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rtpSocket.settimeout(0.5)
try:
self.rtpSocket.bind(("", self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', 'Unable to bind PORT=%d' %self.rtpPort)
def handler(self):
self.pauseMovie()
if messagebox.askokcancel("Quit?", "Are you sure you want to quit?"):
self.exitClient()
else:
self.playMovie()
if __name__ == "__main__":
try:
serverAddr = sys.argv[1]
serverPort = sys.argv[4]
rtpPort = sys.argv[3]
fileName = sys.argv[2]
except:
print ("[Usage: ClientLauncher.py Server_name Server_port RTP_port Video_file]\n")
client = Client(serverAddr, serverPort, rtpPort, fileName)
| true | true |
7901ef919420395bd2c98b4da2de6f7d45324374 | 4,958 | py | Python | modules/ptboard/__init__.py | iigxdehuli/PT-help-server | 2bc76564116e1135b2efcc02595310e7b8c6047d | [
"MIT"
] | 155 | 2018-01-13T12:05:06.000Z | 2022-02-25T08:04:24.000Z | modules/ptboard/__init__.py | iigxdehuli/PT-help-server | 2bc76564116e1135b2efcc02595310e7b8c6047d | [
"MIT"
] | 23 | 2018-01-10T09:06:36.000Z | 2020-09-22T00:35:42.000Z | modules/ptboard/__init__.py | iigxdehuli/PT-help-server | 2bc76564116e1135b2efcc02595310e7b8c6047d | [
"MIT"
] | 100 | 2018-01-08T10:43:30.000Z | 2021-12-24T06:24:47.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2020 Rhilip <rhilipruan@gmail.com>
import re
import time
from flask import Blueprint, request, jsonify
from app import mysql, app, cache
from pymysql import escape_string
ptboard_blueprint = Blueprint('ptboard', __name__)
search_default = app.config.get("PTBOARD_SEARCH", "")
site_default = app.config.get("PTBOARD_SITE", "")
no_site_default = app.config.get("PTBOARD_NO_SITE", "")
order_default = app.config.get("PTBOARD_ORDER", "desc")
limit_default = app.config.get("PTBOARD_LIMIT", 100)
offset_default = app.config.get("PTBOARD_OFFSET", 0)
start_time_default = app.config.get("PTBOARD_START_TIME", 0)
end_time_default = app.config.get("PTBOARD_END_TIME", "CURRENT_TIMESTAMP")
predb_prefix = "https://trace.corrupt-net.org/?q="
def recover_int_to_default(value, default):
try:
ret = int(value)
except(ValueError, TypeError):
ret = default
return ret
def warp_str(string):
return "({})".format(string)
@ptboard_blueprint.route("/ptboard", methods=["GET"])
def ptboard():
t0 = time.time()
ret = {
"success": False,
"error": None
}
token = request.args.get("token") or ""
@cache.memoize(timeout=86400)
def token_valid(token_):
if len(token_) != 32:
return False
row, data = mysql.exec("SELECT * FROM `api`.`ptboard_token` WHERE token = %s", token_, ret_row=True)
if row > 0:
return True
else:
return False
if not token_valid(token):
ret["error"] = "Token is not exist."
return jsonify(ret)
mysql.exec('UPDATE `api`.`ptboard_token` set `useage_count` = `useage_count` + 1 WHERE token = %s', (token,))
# 1. Get user requests
search_raw = request.args.get("search") or search_default
order_raw = request.args.get("order") or order_default
site_raw = request.args.get("site") or site_default
no_site_raw = request.args.get("no_site") or no_site_default
limit = request.args.get("limit") or limit_default
offset = request.args.get("offset") or offset_default
start_time = request.args.get("start_time") or start_time_default
end_time = request.args.get("end_time") or end_time_default
# 2. Clean user requests
search = re.sub(r"[ _\-,.+]", " ", search_raw)
search = search.split()
search = list(filter(lambda l: len(l) > 1, search)) # Remove those too short letter
search = search[:10]
search_opt = site_opt = no_site_opt = "1=1"
if search:
search_opt = warp_str(" AND ".join(map(lambda i: "title LIKE '%{}%'".format(escape_string(i)), search)))
start_time = recover_int_to_default(start_time, start_time_default)
end_time = recover_int_to_default(end_time, end_time_default)
time_opt = warp_str("ptboard_record.pubDate BETWEEN {start} AND {end}".format(start=start_time, end=end_time))
@cache.cached(timeout=86400)
def get_site_list():
return [i[0] for i in mysql.exec("SELECT `site` FROM `api`.`ptboard_site`", fetch_all=True)]
site_list = get_site_list()
site = list(filter(lambda i: i in site_list, site_raw.split(",")))
no_site = list(filter(lambda i: i in site_list, no_site_raw.split(",")))
if site:
site_opt = warp_str(" OR ".join(["ptboard_record.site = '{site}'".format(site=s) for s in site]))
if no_site:
no_site_opt = warp_str(" AND ".join(["ptboard_record.site != '{site}'".format(site=s) for s in no_site]))
limit = recover_int_to_default(limit, limit_default)
offset = recover_int_to_default(offset, offset_default)
if limit > 200:
limit = 200
order = "desc" if order_raw.lower() not in ["desc", "asc"] else order_raw
# 3. Get response data from Database
opt = " AND ".join([time_opt, site_opt, no_site_opt, search_opt])
sql = ("SELECT ptboard_record.sid, ptboard_site.site, ptboard_record.title, "
"concat(ptboard_site.torrent_prefix,ptboard_record.sid, ptboard_site.torrent_suffix) AS link, "
"ptboard_record.pubDate FROM api.ptboard_record "
"INNER JOIN api.ptboard_site ON api.ptboard_site.site = api.ptboard_record.site "
"WHERE {opt} ORDER BY `pubDate` {_da} "
"LIMIT {_offset}, {_limit}".format(opt=opt, _da=order.upper(), _offset=offset, _limit=limit)
)
record_count, rows_data = mysql.exec(sql=sql, r_dict=True, fetch_all=True, ret_row=True)
# 4. Sort Response data
if app.config.get("DEBUG"):
ret["sql"] = sql
def fix_predb(d: dict):
if d["site"] == "PreDB":
d["link"] = predb_prefix + d["title"].split(" | ")[1]
return d
ret.update({
"success": True,
"rows": list(map(fix_predb, rows_data)),
"total": record_count if search else mysql.exec("SELECT count(*) FROM `api`.`ptboard_record`")[0],
})
ret["cost"] = time.time() - t0
return jsonify(ret)
| 35.669065 | 114 | 0.656313 |
import re
import time
from flask import Blueprint, request, jsonify
from app import mysql, app, cache
from pymysql import escape_string
ptboard_blueprint = Blueprint('ptboard', __name__)
search_default = app.config.get("PTBOARD_SEARCH", "")
site_default = app.config.get("PTBOARD_SITE", "")
no_site_default = app.config.get("PTBOARD_NO_SITE", "")
order_default = app.config.get("PTBOARD_ORDER", "desc")
limit_default = app.config.get("PTBOARD_LIMIT", 100)
offset_default = app.config.get("PTBOARD_OFFSET", 0)
start_time_default = app.config.get("PTBOARD_START_TIME", 0)
end_time_default = app.config.get("PTBOARD_END_TIME", "CURRENT_TIMESTAMP")
predb_prefix = "https://trace.corrupt-net.org/?q="
def recover_int_to_default(value, default):
try:
ret = int(value)
except(ValueError, TypeError):
ret = default
return ret
def warp_str(string):
return "({})".format(string)
@ptboard_blueprint.route("/ptboard", methods=["GET"])
def ptboard():
t0 = time.time()
ret = {
"success": False,
"error": None
}
token = request.args.get("token") or ""
@cache.memoize(timeout=86400)
def token_valid(token_):
if len(token_) != 32:
return False
row, data = mysql.exec("SELECT * FROM `api`.`ptboard_token` WHERE token = %s", token_, ret_row=True)
if row > 0:
return True
else:
return False
if not token_valid(token):
ret["error"] = "Token is not exist."
return jsonify(ret)
mysql.exec('UPDATE `api`.`ptboard_token` set `useage_count` = `useage_count` + 1 WHERE token = %s', (token,))
search_raw = request.args.get("search") or search_default
order_raw = request.args.get("order") or order_default
site_raw = request.args.get("site") or site_default
no_site_raw = request.args.get("no_site") or no_site_default
limit = request.args.get("limit") or limit_default
offset = request.args.get("offset") or offset_default
start_time = request.args.get("start_time") or start_time_default
end_time = request.args.get("end_time") or end_time_default
search = re.sub(r"[ _\-,.+]", " ", search_raw)
search = search.split()
search = list(filter(lambda l: len(l) > 1, search))
search = search[:10]
search_opt = site_opt = no_site_opt = "1=1"
if search:
search_opt = warp_str(" AND ".join(map(lambda i: "title LIKE '%{}%'".format(escape_string(i)), search)))
start_time = recover_int_to_default(start_time, start_time_default)
end_time = recover_int_to_default(end_time, end_time_default)
time_opt = warp_str("ptboard_record.pubDate BETWEEN {start} AND {end}".format(start=start_time, end=end_time))
@cache.cached(timeout=86400)
def get_site_list():
return [i[0] for i in mysql.exec("SELECT `site` FROM `api`.`ptboard_site`", fetch_all=True)]
site_list = get_site_list()
site = list(filter(lambda i: i in site_list, site_raw.split(",")))
no_site = list(filter(lambda i: i in site_list, no_site_raw.split(",")))
if site:
site_opt = warp_str(" OR ".join(["ptboard_record.site = '{site}'".format(site=s) for s in site]))
if no_site:
no_site_opt = warp_str(" AND ".join(["ptboard_record.site != '{site}'".format(site=s) for s in no_site]))
limit = recover_int_to_default(limit, limit_default)
offset = recover_int_to_default(offset, offset_default)
if limit > 200:
limit = 200
order = "desc" if order_raw.lower() not in ["desc", "asc"] else order_raw
opt = " AND ".join([time_opt, site_opt, no_site_opt, search_opt])
sql = ("SELECT ptboard_record.sid, ptboard_site.site, ptboard_record.title, "
"concat(ptboard_site.torrent_prefix,ptboard_record.sid, ptboard_site.torrent_suffix) AS link, "
"ptboard_record.pubDate FROM api.ptboard_record "
"INNER JOIN api.ptboard_site ON api.ptboard_site.site = api.ptboard_record.site "
"WHERE {opt} ORDER BY `pubDate` {_da} "
"LIMIT {_offset}, {_limit}".format(opt=opt, _da=order.upper(), _offset=offset, _limit=limit)
)
record_count, rows_data = mysql.exec(sql=sql, r_dict=True, fetch_all=True, ret_row=True)
if app.config.get("DEBUG"):
ret["sql"] = sql
def fix_predb(d: dict):
if d["site"] == "PreDB":
d["link"] = predb_prefix + d["title"].split(" | ")[1]
return d
ret.update({
"success": True,
"rows": list(map(fix_predb, rows_data)),
"total": record_count if search else mysql.exec("SELECT count(*) FROM `api`.`ptboard_record`")[0],
})
ret["cost"] = time.time() - t0
return jsonify(ret)
| true | true |
7901efaec7f53f1176ace988ca5c872964685c57 | 12,058 | py | Python | sdks/python/client/argo_workflows/model/lifecycle_handler.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | 1 | 2022-02-24T01:45:03.000Z | 2022-02-24T01:45:03.000Z | sdks/python/client/argo_workflows/model/lifecycle_handler.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | 18 | 2022-02-01T23:09:58.000Z | 2022-03-31T23:28:41.000Z | sdks/python/client/argo_workflows/model/lifecycle_handler.py | BearerPipelineTest/argo-workflows | ecd91b1c4215a2ab8742f7c43eaade98a1d47eba | [
"Apache-2.0"
] | null | null | null | """
Argo Workflows API
Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. For more information, please see https://argoproj.github.io/argo-workflows/ # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.exec_action import ExecAction
from argo_workflows.model.http_get_action import HTTPGetAction
from argo_workflows.model.tcp_socket_action import TCPSocketAction
globals()['ExecAction'] = ExecAction
globals()['HTTPGetAction'] = HTTPGetAction
globals()['TCPSocketAction'] = TCPSocketAction
class LifecycleHandler(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'_exec': (ExecAction,), # noqa: E501
'http_get': (HTTPGetAction,), # noqa: E501
'tcp_socket': (TCPSocketAction,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'_exec': 'exec', # noqa: E501
'http_get': 'httpGet', # noqa: E501
'tcp_socket': 'tcpSocket', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""LifecycleHandler - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
_exec (ExecAction): [optional] # noqa: E501
http_get (HTTPGetAction): [optional] # noqa: E501
tcp_socket (TCPSocketAction): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.007299 | 206 | 0.579366 |
import re
import sys
from argo_workflows.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.exec_action import ExecAction
from argo_workflows.model.http_get_action import HTTPGetAction
from argo_workflows.model.tcp_socket_action import TCPSocketAction
globals()['ExecAction'] = ExecAction
globals()['HTTPGetAction'] = HTTPGetAction
globals()['TCPSocketAction'] = TCPSocketAction
class LifecycleHandler(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'_exec': (ExecAction,),
'http_get': (HTTPGetAction,),
'tcp_socket': (TCPSocketAction,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'_exec': 'exec',
'http_get': 'httpGet',
'tcp_socket': 'tcpSocket',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
7901f032a000f62e27cc566898a31918f4a99544 | 3,111 | py | Python | ETL-data-with-postgres/etl.py | cdiswine/data-engineering-nanodegree | a5895e3ba2f94128a16b9da6d07327451bacb173 | [
"MIT"
] | null | null | null | ETL-data-with-postgres/etl.py | cdiswine/data-engineering-nanodegree | a5895e3ba2f94128a16b9da6d07327451bacb173 | [
"MIT"
] | null | null | null | ETL-data-with-postgres/etl.py | cdiswine/data-engineering-nanodegree | a5895e3ba2f94128a16b9da6d07327451bacb173 | [
"MIT"
] | null | null | null | import os
import glob
import psycopg2
import pandas as pd
import numpy as np
from sql_queries import *
def process_song_file(cur, filepath):
# open song file
df = pd.read_json(filepath, lines = True)
# insert song record
song_data = df[["song_id", "title", "artist_id", "year", "duration"]].values[0]
cur.execute(song_table_insert, song_data)
# insert artist record
artist_data = df[["artist_id", "artist_name", "artist_location", "artist_latitude", "artist_longitude",]].values[0]
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
# open log file
df = pd.read_json(filepath, lines = True)
# filter by NextSong action
df = df.query("page=='NextSong'")
# convert timestamp column to datetime
t = pd.to_datetime(df["ts"]/1000, unit = 's')
# insert time data records
time_data = np.transpose(np.array([df["ts"].values, t.dt.hour.values, t.dt.day.values, t.dt.week.values, \
t.dt.month.values, t.dt.year.values, t.dt.weekday.values]))
column_labels = ("timestamp", "hour", "day", "week of year", "month", "year", "weekday")
time_df = pd.DataFrame(data = time_data, columns = column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
# load user table
user_df = df[["userId", "firstName", "lastName", "gender", "level"]]
# insert user records
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
# insert songplay records
for index, row in df.iterrows():
# get songid and artistid from song and artist tables
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
# insert songplay record
songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, \
row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
# get all files matching extension from directory
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
# get total number of files found
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
# iterate over files and process
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main() | 32.40625 | 119 | 0.641916 | import os
import glob
import psycopg2
import pandas as pd
import numpy as np
from sql_queries import *
def process_song_file(cur, filepath):
df = pd.read_json(filepath, lines = True)
song_data = df[["song_id", "title", "artist_id", "year", "duration"]].values[0]
cur.execute(song_table_insert, song_data)
artist_data = df[["artist_id", "artist_name", "artist_location", "artist_latitude", "artist_longitude",]].values[0]
cur.execute(artist_table_insert, artist_data)
def process_log_file(cur, filepath):
df = pd.read_json(filepath, lines = True)
df = df.query("page=='NextSong'")
t = pd.to_datetime(df["ts"]/1000, unit = 's')
time_data = np.transpose(np.array([df["ts"].values, t.dt.hour.values, t.dt.day.values, t.dt.week.values, \
t.dt.month.values, t.dt.year.values, t.dt.weekday.values]))
column_labels = ("timestamp", "hour", "day", "week of year", "month", "year", "weekday")
time_df = pd.DataFrame(data = time_data, columns = column_labels)
for i, row in time_df.iterrows():
cur.execute(time_table_insert, list(row))
user_df = df[["userId", "firstName", "lastName", "gender", "level"]]
for i, row in user_df.iterrows():
cur.execute(user_table_insert, row)
for index, row in df.iterrows():
cur.execute(song_select, (row.song, row.artist, row.length))
results = cur.fetchone()
if results:
songid, artistid = results
else:
songid, artistid = None, None
songplay_data = (row.ts, row.userId, row.level, songid, artistid, row.sessionId, \
row.location, row.userAgent)
cur.execute(songplay_table_insert, songplay_data)
def process_data(cur, conn, filepath, func):
all_files = []
for root, dirs, files in os.walk(filepath):
files = glob.glob(os.path.join(root,'*.json'))
for f in files :
all_files.append(os.path.abspath(f))
num_files = len(all_files)
print('{} files found in {}'.format(num_files, filepath))
for i, datafile in enumerate(all_files, 1):
func(cur, datafile)
conn.commit()
print('{}/{} files processed.'.format(i, num_files))
def main():
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=student password=student")
cur = conn.cursor()
process_data(cur, conn, filepath='data/song_data', func=process_song_file)
process_data(cur, conn, filepath='data/log_data', func=process_log_file)
conn.close()
if __name__ == "__main__":
main() | true | true |
7901f082786cfa18d39230194f4199dad5d96bce | 2,100 | py | Python | spider.py | yeonzi/163course_spider | 238731f55320e885ea5300059b82e347e48b8845 | [
"MIT"
] | null | null | null | spider.py | yeonzi/163course_spider | 238731f55320e885ea5300059b82e347e48b8845 | [
"MIT"
] | null | null | null | spider.py | yeonzi/163course_spider | 238731f55320e885ea5300059b82e347e48b8845 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#coding:utf-8
import time
import json
import requests
from selenium import webdriver
filename = 'a.csv'
url = 'http://www.icourse163.org/university/view/all.htm#/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
# with open(filename, 'w+') as file:
# file.write("大学,课程,课程时长,课程负载,内容类型,课程分类\n")
file = open(filename, 'w+')
print("大学,课程,课程时长,课程负载,内容类型,课程分类")
file.write("大学,课程,课程时长,课程负载,内容类型,课程分类\n")
browser = webdriver.PhantomJS()
browser2 = webdriver.PhantomJS()
browser3 = webdriver.PhantomJS()
browser.get(url)
# 大学
university = browser.find_elements_by_class_name("u-usity")
for i in university:
university_url = i.get_attribute("href")
university_name = i.find_element_by_id("").get_attribute("alt")
browser2.get(university_url)
# 课程
course = browser2.find_elements_by_class_name("g-cell1")
for j in course:
course_url = "http://www.icourse163.org" + j.get_attribute("data-href")
course_name = j.find_element_by_class_name("card").find_element_by_class_name("f-f0").text
browser3.get(course_url)
# 课程信息
course_text = browser3.find_elements_by_class_name("block")
try:
k0 = course_text[0].find_element_by_class_name("t2").text
k1 = course_text[1].find_element_by_class_name("t2").text
k2 = course_text[2].find_element_by_class_name("t2").text
k3 = course_text[3].find_element_by_class_name("t2").text
except Exception as e:
k3 = k2
k2 = k1
k1 = None
K0 = None
finally:
print("%s,%s,%s,%s,%s,%s" % (university_name,course_name,k0,k1,k2,k3))
file.write("%s,%s,%s,%s,%s,%s\n" % (university_name,course_name,k0,k1,k2,k3))
# with open(filename, 'a+') as file:
# file.write("%s,%s,%s,%s,%s,%s\n" % (university_name,course_name,k0,k1,k2,k3))
browser3.close()
browser2.close()
browser.close()
| 32.307692 | 183 | 0.645714 |
import time
import json
import requests
from selenium import webdriver
filename = 'a.csv'
url = 'http://www.icourse163.org/university/view/all.htm#/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
file = open(filename, 'w+')
print("大学,课程,课程时长,课程负载,内容类型,课程分类")
file.write("大学,课程,课程时长,课程负载,内容类型,课程分类\n")
browser = webdriver.PhantomJS()
browser2 = webdriver.PhantomJS()
browser3 = webdriver.PhantomJS()
browser.get(url)
university = browser.find_elements_by_class_name("u-usity")
for i in university:
university_url = i.get_attribute("href")
university_name = i.find_element_by_id("").get_attribute("alt")
browser2.get(university_url)
course = browser2.find_elements_by_class_name("g-cell1")
for j in course:
course_url = "http://www.icourse163.org" + j.get_attribute("data-href")
course_name = j.find_element_by_class_name("card").find_element_by_class_name("f-f0").text
browser3.get(course_url)
course_text = browser3.find_elements_by_class_name("block")
try:
k0 = course_text[0].find_element_by_class_name("t2").text
k1 = course_text[1].find_element_by_class_name("t2").text
k2 = course_text[2].find_element_by_class_name("t2").text
k3 = course_text[3].find_element_by_class_name("t2").text
except Exception as e:
k3 = k2
k2 = k1
k1 = None
K0 = None
finally:
print("%s,%s,%s,%s,%s,%s" % (university_name,course_name,k0,k1,k2,k3))
file.write("%s,%s,%s,%s,%s,%s\n" % (university_name,course_name,k0,k1,k2,k3))
browser3.close()
browser2.close()
browser.close()
| true | true |
7901f195922865b42cc3498967ba347cca40919e | 5,086 | py | Python | openstack/message/v2/queue.py | allenLew1991/openstacksdk | 445a5491db0d701bf1e01290014abcd38a11a41a | [
"Apache-2.0"
] | null | null | null | openstack/message/v2/queue.py | allenLew1991/openstacksdk | 445a5491db0d701bf1e01290014abcd38a11a41a | [
"Apache-2.0"
] | null | null | null | openstack/message/v2/queue.py | allenLew1991/openstacksdk | 445a5491db0d701bf1e01290014abcd38a11a41a | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from openstack import resource
class Queue(resource.Resource):
# FIXME(anyone): The name string of `location` field of Zaqar API response
# is lower case. That is inconsistent with the guide from API-WG. This is
# a workaround for this issue.
location = resource.Header("location")
resources_key = "queues"
base_path = "/queues"
# capabilities
allow_create = True
allow_list = True
allow_fetch = True
allow_delete = True
# Properties
#: The default TTL of messages defined for a queue, which will effect for
#: any messages posted to the queue.
default_message_ttl = resource.Body("_default_message_ttl")
#: Description of the queue.
description = resource.Body("description")
#: The max post size of messages defined for a queue, which will effect
#: for any messages posted to the queue.
max_messages_post_size = resource.Body("_max_messages_post_size")
#: Name of the queue. The name is the unique identity of a queue. It
#: must not exceed 64 bytes in length, and it is limited to US-ASCII
#: letters, digits, underscores, and hyphens.
name = resource.Body("name", alternate_id=True)
#: The ID to identify the client accessing Zaqar API. Must be specified
#: in header for each API request.
client_id = resource.Header("Client-ID")
#: The ID to identify the project accessing Zaqar API. Must be specified
#: in case keystone auth is not enabled in Zaqar service.
project_id = resource.Header("X-PROJECT-ID")
def create(self, session, prepend_key=True):
request = self._prepare_request(requires_id=True,
prepend_key=prepend_key)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.put(request.url,
json=request.body, headers=request.headers)
self._translate_response(response, has_body=False)
return self
@classmethod
def list(cls, session, paginated=False, **params):
"""This method is a generator which yields queue objects.
This is almost the copy of list method of resource.Resource class.
The only difference is the request header now includes `Client-ID`
and `X-PROJECT-ID` fields which are required by Zaqar v2 API.
"""
more_data = True
query_params = cls._query_mapping._transpose(params)
uri = cls.base_path % params
headers = {
"Client-ID": params.get('client_id', None) or str(uuid.uuid4()),
"X-PROJECT-ID": params.get('project_id', None
) or session.get_project_id()
}
while more_data:
resp = session.get(uri,
headers=headers, params=query_params)
resp = resp.json()
resp = resp[cls.resources_key]
if not resp:
more_data = False
yielded = 0
new_marker = None
for data in resp:
value = cls.existing(**data)
new_marker = value.id
yielded += 1
yield value
if not paginated:
return
if "limit" in query_params and yielded < query_params["limit"]:
return
query_params["limit"] = yielded
query_params["marker"] = new_marker
def fetch(self, session, requires_id=True, error_message=None):
request = self._prepare_request(requires_id=requires_id)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.get(request.url,
headers=headers)
self._translate_response(response)
return self
def delete(self, session):
request = self._prepare_request()
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.delete(request.url,
headers=headers)
self._translate_response(response, has_body=False)
return self
| 38.240602 | 78 | 0.624853 |
import uuid
from openstack import resource
class Queue(resource.Resource):
location = resource.Header("location")
resources_key = "queues"
base_path = "/queues"
allow_create = True
allow_list = True
allow_fetch = True
allow_delete = True
default_message_ttl = resource.Body("_default_message_ttl")
description = resource.Body("description")
max_messages_post_size = resource.Body("_max_messages_post_size")
name = resource.Body("name", alternate_id=True)
client_id = resource.Header("Client-ID")
project_id = resource.Header("X-PROJECT-ID")
def create(self, session, prepend_key=True):
request = self._prepare_request(requires_id=True,
prepend_key=prepend_key)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.put(request.url,
json=request.body, headers=request.headers)
self._translate_response(response, has_body=False)
return self
@classmethod
def list(cls, session, paginated=False, **params):
more_data = True
query_params = cls._query_mapping._transpose(params)
uri = cls.base_path % params
headers = {
"Client-ID": params.get('client_id', None) or str(uuid.uuid4()),
"X-PROJECT-ID": params.get('project_id', None
) or session.get_project_id()
}
while more_data:
resp = session.get(uri,
headers=headers, params=query_params)
resp = resp.json()
resp = resp[cls.resources_key]
if not resp:
more_data = False
yielded = 0
new_marker = None
for data in resp:
value = cls.existing(**data)
new_marker = value.id
yielded += 1
yield value
if not paginated:
return
if "limit" in query_params and yielded < query_params["limit"]:
return
query_params["limit"] = yielded
query_params["marker"] = new_marker
def fetch(self, session, requires_id=True, error_message=None):
request = self._prepare_request(requires_id=requires_id)
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.get(request.url,
headers=headers)
self._translate_response(response)
return self
def delete(self, session):
request = self._prepare_request()
headers = {
"Client-ID": self.client_id or str(uuid.uuid4()),
"X-PROJECT-ID": self.project_id or session.get_project_id()
}
request.headers.update(headers)
response = session.delete(request.url,
headers=headers)
self._translate_response(response, has_body=False)
return self
| true | true |
7901f32508d570308256e3c16671ca7e2216c254 | 443 | py | Python | src/ambianic/device.py | ivelin/ambianic-edge | a0cbb2a2369024735ea896b15b93d3d50d58cac1 | [
"Apache-2.0"
] | 95 | 2019-12-12T02:20:40.000Z | 2022-03-30T18:23:52.000Z | src/ambianic/device.py | ambianic/ambianic | cad14a3ae8a2149c9e17e1bd41ee7d1b75568271 | [
"Apache-2.0"
] | 313 | 2019-11-04T21:31:26.000Z | 2022-01-01T11:00:38.000Z | src/ambianic/device.py | ambianic/ambianic | cad14a3ae8a2149c9e17e1bd41ee7d1b75568271 | [
"Apache-2.0"
] | 49 | 2020-02-28T22:09:36.000Z | 2022-03-23T03:26:33.000Z | """Base classes for an Ambianic Edge device abstraction"""
from pydantic import BaseModel, Field
class DeviceInfo(BaseModel):
version: str = Field(None, description="Ambianic Edge software version.")
display_name: str = Field(
None, description="User friendly display name for this device."
)
notifications_enabled: bool = Field(
False, description="Indicates whether device notifications are enabled."
)
| 34.076923 | 80 | 0.722348 | from pydantic import BaseModel, Field
class DeviceInfo(BaseModel):
version: str = Field(None, description="Ambianic Edge software version.")
display_name: str = Field(
None, description="User friendly display name for this device."
)
notifications_enabled: bool = Field(
False, description="Indicates whether device notifications are enabled."
)
| true | true |
7901f3b4ea1cec4c15b29d2c80e86faad3993e13 | 409 | py | Python | tutorial/grid_data_demo_run.py | viz4biz/PyDataNYC2015 | 066154ea9f1837c355e6108a28b85889f3020da3 | [
"Apache-2.0"
] | 11 | 2015-11-11T13:57:21.000Z | 2019-08-14T15:53:43.000Z | tutorial/grid_data_demo_run.py | viz4biz/PyDataNYC2015 | 066154ea9f1837c355e6108a28b85889f3020da3 | [
"Apache-2.0"
] | null | null | null | tutorial/grid_data_demo_run.py | viz4biz/PyDataNYC2015 | 066154ea9f1837c355e6108a28b85889f3020da3 | [
"Apache-2.0"
] | 6 | 2015-11-11T13:57:25.000Z | 2018-09-12T07:53:03.000Z | """
Mplot demo runner
"""
import enaml
from enaml.qt.qt_application import QtApplication
def run_demo():
with enaml.imports():
#from griddata_demo_ui import Main
from griddata_demo_model_ui import Main
app = QtApplication()
view = Main(custom_title='Matplotlib demo', mplot_style='darkish')
view.show()
# Start the application event loop
app.start()
run_demo()
| 17.041667 | 70 | 0.691932 |
import enaml
from enaml.qt.qt_application import QtApplication
def run_demo():
with enaml.imports():
from griddata_demo_model_ui import Main
app = QtApplication()
view = Main(custom_title='Matplotlib demo', mplot_style='darkish')
view.show()
app.start()
run_demo()
| true | true |
7901f431e59b465571dab45305550b18d3198ad3 | 6,293 | py | Python | bin/fixup_oslogin_v1_keywords.py | fahmiduldul/tmdb | a80e441a98273a9ba823c52638b381da26c3adfe | [
"MIT"
] | null | null | null | bin/fixup_oslogin_v1_keywords.py | fahmiduldul/tmdb | a80e441a98273a9ba823c52638b381da26c3adfe | [
"MIT"
] | null | null | null | bin/fixup_oslogin_v1_keywords.py | fahmiduldul/tmdb | a80e441a98273a9ba823c52638b381da26c3adfe | [
"MIT"
] | 1 | 2022-03-09T03:17:22.000Z | 2022-03-09T03:17:22.000Z | #!/Users/fahmi.abdulaziz/PycharmProjects/tmdb/bin/python3.8
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class osloginCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'delete_posix_account': ('name', ),
'delete_ssh_public_key': ('name', ),
'get_login_profile': ('name', 'project_id', 'system_id', ),
'get_ssh_public_key': ('name', ),
'import_ssh_public_key': ('parent', 'ssh_public_key', 'project_id', ),
'update_ssh_public_key': ('name', 'ssh_public_key', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=osloginCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the oslogin client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| 34.576923 | 88 | 0.630701 |
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
return results[1], results[0]
class osloginCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'delete_posix_account': ('name', ),
'delete_ssh_public_key': ('name', ),
'get_login_profile': ('name', 'project_id', 'system_id', ),
'get_ssh_public_key': ('name', ),
'import_ssh_public_key': ('parent', 'ssh_public_key', 'project_id', ),
'update_ssh_public_key': ('name', 'ssh_public_key', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
return updated
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=osloginCallTransformer(),
):
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
tree = cst.parse_module(src)
updated = tree.visit(transformer)
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the oslogin client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| true | true |
7901f4c8582b1321cbc6a3da41a1b1158f064b3d | 2,432 | py | Python | multipleterm.py | sylvainmouquet/multipleterm | 427f223cfda53ef88872678c6d81ec35cb972770 | [
"MIT"
] | null | null | null | multipleterm.py | sylvainmouquet/multipleterm | 427f223cfda53ef88872678c6d81ec35cb972770 | [
"MIT"
] | null | null | null | multipleterm.py | sylvainmouquet/multipleterm | 427f223cfda53ef88872678c6d81ec35cb972770 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import subprocess
try:
import gtk
except:
print >> sys.stderr, "You need to install the python gtk bindings"
sys.exit(1)
# import vte
try:
import vte
except:
error = gtk.MessageDialog (None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
'You need to install python bindings for libvte')
error.run()
sys.exit (1)
def on_key_press_event(widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
'''print "Key %s (%d) was pressed" % (keyname, event.keyval)
v.feed_child(keyname, len(keyname))
v2.feed_child(keyname, len(keyname))'''
for i in terms:
i.emit("key-press-event", event)
if (event.keyval == 65293):
text.set_text("")
nbterm = 3
terms = []
if __name__ == '__main__':
w = gtk.Window()
hbox = gtk.HBox()
x = 0
y = 0
for i in range(0, len(sys.argv)):
v = vte.Terminal ()
v.connect ("child-exited", lambda term: gtk.main_quit())
v.fork_command()
window = gtk.Window()
if (i > 0):
print sys.argv[i]
r=subprocess.Popen(["/bin/bash", "-i", "-c", sys.argv[i]], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#v.feed_child(sys.argv[i], len(sys.argv[i]))
#line=r.stdout.readline()
#print line
v.feed_child(sys.argv[i], len(sys.argv[i]))
e = gtk.gdk.Event(gtk.gdk.KEY_PRESS)
e.keyval = 65293
e.send_event = True
window.set_title("Window %s" % (sys.argv[i]))
else:
window.set_title("Window %d" % (i+1))
terms.append(v)
window.add(v)
window.connect('delete-event', lambda window, event: gtk.main_quit())
window.move(x, y)
window.set_default_size(200, 100)
#window.set_title("Window %d" % (i+1))
window.show_all()
if (i > 0):
e.window = window.get_window()
v.emit("key-press-event", e)
x += 780
if (i-1 % 3 == 0):
y += 450
x = 0
text = gtk.Entry()
text.connect("key_press_event", on_key_press_event)
w.set_default_size(200, 15)
w.move(0, 0)
hbox.pack_start(text, True, True, 0)
w.add(hbox)
w.connect('delete-event', lambda window, event: gtk.main_quit())
w.show_all()
text.set_can_focus(True)
text.grab_focus()
gtk.main()
| 27.325843 | 153 | 0.576891 |
import sys
import subprocess
try:
import gtk
except:
print >> sys.stderr, "You need to install the python gtk bindings"
sys.exit(1)
try:
import vte
except:
error = gtk.MessageDialog (None, gtk.DIALOG_MODAL, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK,
'You need to install python bindings for libvte')
error.run()
sys.exit (1)
def on_key_press_event(widget, event):
keyname = gtk.gdk.keyval_name(event.keyval)
'''print "Key %s (%d) was pressed" % (keyname, event.keyval)
v.feed_child(keyname, len(keyname))
v2.feed_child(keyname, len(keyname))'''
for i in terms:
i.emit("key-press-event", event)
if (event.keyval == 65293):
text.set_text("")
nbterm = 3
terms = []
if __name__ == '__main__':
w = gtk.Window()
hbox = gtk.HBox()
x = 0
y = 0
for i in range(0, len(sys.argv)):
v = vte.Terminal ()
v.connect ("child-exited", lambda term: gtk.main_quit())
v.fork_command()
window = gtk.Window()
if (i > 0):
print sys.argv[i]
r=subprocess.Popen(["/bin/bash", "-i", "-c", sys.argv[i]], shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
v.feed_child(sys.argv[i], len(sys.argv[i]))
e = gtk.gdk.Event(gtk.gdk.KEY_PRESS)
e.keyval = 65293
e.send_event = True
window.set_title("Window %s" % (sys.argv[i]))
else:
window.set_title("Window %d" % (i+1))
terms.append(v)
window.add(v)
window.connect('delete-event', lambda window, event: gtk.main_quit())
window.move(x, y)
window.set_default_size(200, 100)
window.show_all()
if (i > 0):
e.window = window.get_window()
v.emit("key-press-event", e)
x += 780
if (i-1 % 3 == 0):
y += 450
x = 0
text = gtk.Entry()
text.connect("key_press_event", on_key_press_event)
w.set_default_size(200, 15)
w.move(0, 0)
hbox.pack_start(text, True, True, 0)
w.add(hbox)
w.connect('delete-event', lambda window, event: gtk.main_quit())
w.show_all()
text.set_can_focus(True)
text.grab_focus()
gtk.main()
| false | true |
7901f568e227dd7dab0639e6d05af4c5fcda55c2 | 8,861 | py | Python | pubmedpy/tests/test_pmc_oai.py | dhimmel/pubmedpy | 9d716768f5ab798ec448154588e4fd99afd7584a | [
"BlueOak-1.0.0"
] | 7 | 2019-11-13T09:14:19.000Z | 2022-03-09T01:35:06.000Z | pubmedpy/tests/test_pmc_oai.py | dhimmel/pubmedpy | 9d716768f5ab798ec448154588e4fd99afd7584a | [
"BlueOak-1.0.0"
] | 2 | 2020-08-24T15:05:57.000Z | 2020-10-21T04:12:56.000Z | pubmedpy/tests/test_pmc_oai.py | dhimmel/pubmedpy | 9d716768f5ab798ec448154588e4fd99afd7584a | [
"BlueOak-1.0.0"
] | 1 | 2021-02-18T00:01:09.000Z | 2021-02-18T00:01:09.000Z | import pathlib
from lxml import etree
import requests
import pytest
from ..pmc_oai import get_sets_for_pmcid, extract_authors_from_article
directory = pathlib.Path(__file__).parent
def test_get_sets_for_pmcid():
set_specs = get_sets_for_pmcid("PMC2092437")
assert "bmcbioi" in set_specs
assert "pmc-open" in set_specs
def get_frontmatter_etree(pmcid):
frontmatter_dir = directory.joinpath("data", "pmc-frontmatter")
text = frontmatter_dir.joinpath(f"{pmcid}.xml").read_text(encoding="utf-8-sig")
return etree.fromstring(text)
def get_frontmatter_etree_via_api(pmcid):
url = f"https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:{pmcid[3:]}&metadataPrefix=pmc_fm"
response = requests.get(url)
tree = etree.fromstring(response.content)
article = tree.find("{*}GetRecord/{*}record/{*}metadata/{*}article")
return article
pcmid_to_authors = dict()
pcmid_to_authors["PMC65048"] = [
{
"pmcid": "PMC65048",
"position": 1,
"fore_name": "Kevin",
"last_name": "Truong",
"corresponding": 0,
"reverse_position": 2,
"affiliations": [
"1 Division of Molecular and Structural Biology, Ontario Cancer Institute and Department of Medical Biophysics, University of Toronto, Toronto, Ontario, Canada"
],
},
{
"pmcid": "PMC65048",
"position": 2,
"fore_name": "Mitsuhiko",
"last_name": "Ikura",
"corresponding": 1,
"reverse_position": 1,
"affiliations": [
"1 Division of Molecular and Structural Biology, Ontario Cancer Institute and Department of Medical Biophysics, University of Toronto, Toronto, Ontario, Canada"
],
},
]
pcmid_to_authors["PMC1183515"] = [
{
"pmcid": "PMC1183515",
"position": 1,
"fore_name": "Boris E",
"last_name": "Shakhnovich",
"corresponding": 1,
"reverse_position": 1,
"affiliations": [
"Bioinformatics Program, Boston University, Boston, Massachusetts, United States of America"
],
}
]
pcmid_to_authors["PMC5870622"] = [
{
"pmcid": "PMC5870622",
"position": 1,
"fore_name": "Chao",
"last_name": "Pang",
"corresponding": 0,
"reverse_position": 13,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
"2 Department of Epidemiology, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
],
},
{
"pmcid": "PMC5870622",
"position": 2,
"fore_name": "Fleur",
"last_name": "Kelpin",
"corresponding": 0,
"reverse_position": 12,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 3,
"fore_name": "David",
"last_name": "van Enckevort",
"corresponding": 0,
"reverse_position": 11,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 4,
"fore_name": "Niina",
"last_name": "Eklund",
"corresponding": 0,
"reverse_position": 10,
"affiliations": [
"3 Department of Public Health Solutions, National Institute for Health and Welfare, Helsinki, Finland"
],
},
{
"pmcid": "PMC5870622",
"position": 5,
"fore_name": "Kaisa",
"last_name": "Silander",
"corresponding": 0,
"reverse_position": 9,
"affiliations": [
"3 Department of Public Health Solutions, National Institute for Health and Welfare, Helsinki, Finland"
],
},
{
"pmcid": "PMC5870622",
"position": 6,
"fore_name": "Dennis",
"last_name": "Hendriksen",
"corresponding": 0,
"reverse_position": 8,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 7,
"fore_name": "Mark",
"last_name": "de Haan",
"corresponding": 0,
"reverse_position": 7,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 8,
"fore_name": "Jonathan",
"last_name": "Jetten",
"corresponding": 0,
"reverse_position": 6,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 9,
"fore_name": "Tommy",
"last_name": "de Boer",
"corresponding": 0,
"reverse_position": 5,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 10,
"fore_name": "Bart",
"last_name": "Charbon",
"corresponding": 0,
"reverse_position": 4,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 11,
"fore_name": "Petr",
"last_name": "Holub",
"corresponding": 0,
"reverse_position": 3,
"affiliations": [
"4 Biobanking and BioMolecular Resources Research Infrastructure (BBMRI-ERIC), Graz, Austria"
],
},
{
"pmcid": "PMC5870622",
"position": 12,
"fore_name": "Hans",
"last_name": "Hillege",
"corresponding": 0,
"reverse_position": 2,
"affiliations": [
"2 Department of Epidemiology, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 13,
"fore_name": "Morris A",
"last_name": "Swertz",
"corresponding": 1,
"reverse_position": 1,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
"2 Department of Epidemiology, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
],
},
]
@pytest.mark.parametrize(
["pmcid", "expected"],
[
pytest.param(pmcid, authors, id=pmcid)
for pmcid, authors in pcmid_to_authors.items()
],
)
def test_extract_authors_from_article(pmcid, expected):
"""
NOTE: PMC2373917 is an example of where affiliations are encoded in a non-semantic way.
https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:2373917&metadataPrefix=pmc_fm
"""
article = get_frontmatter_etree(pmcid)
authors = extract_authors_from_article(article)
print(authors)
assert authors == expected
def test_extract_authors_from_article_PMC3003546():
"""
aff is a child of contrib-group rather than article-meta for PMC3003546
https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:3003546&metadataPrefix=pmc_fm
"""
pmcid = "PMC3003546"
article = get_frontmatter_etree_via_api(pmcid)
authors = extract_authors_from_article(article)
assert "University of California San Diego" in authors[0]["affiliations"][0]
def test_extract_authors_from_article_PMC4372613():
"""
Affiliation name is under <aff><addr-line> for PMC4372613.
https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:4372613&metadataPrefix=pmc_fm
"""
pmcid = "PMC4372613"
article = get_frontmatter_etree_via_api(pmcid)
authors = extract_authors_from_article(article)
assert "California Institute of Technology" in authors[0]["affiliations"][0]
| 34.344961 | 172 | 0.621262 | import pathlib
from lxml import etree
import requests
import pytest
from ..pmc_oai import get_sets_for_pmcid, extract_authors_from_article
directory = pathlib.Path(__file__).parent
def test_get_sets_for_pmcid():
set_specs = get_sets_for_pmcid("PMC2092437")
assert "bmcbioi" in set_specs
assert "pmc-open" in set_specs
def get_frontmatter_etree(pmcid):
frontmatter_dir = directory.joinpath("data", "pmc-frontmatter")
text = frontmatter_dir.joinpath(f"{pmcid}.xml").read_text(encoding="utf-8-sig")
return etree.fromstring(text)
def get_frontmatter_etree_via_api(pmcid):
url = f"https://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=GetRecord&identifier=oai:pubmedcentral.nih.gov:{pmcid[3:]}&metadataPrefix=pmc_fm"
response = requests.get(url)
tree = etree.fromstring(response.content)
article = tree.find("{*}GetRecord/{*}record/{*}metadata/{*}article")
return article
pcmid_to_authors = dict()
pcmid_to_authors["PMC65048"] = [
{
"pmcid": "PMC65048",
"position": 1,
"fore_name": "Kevin",
"last_name": "Truong",
"corresponding": 0,
"reverse_position": 2,
"affiliations": [
"1 Division of Molecular and Structural Biology, Ontario Cancer Institute and Department of Medical Biophysics, University of Toronto, Toronto, Ontario, Canada"
],
},
{
"pmcid": "PMC65048",
"position": 2,
"fore_name": "Mitsuhiko",
"last_name": "Ikura",
"corresponding": 1,
"reverse_position": 1,
"affiliations": [
"1 Division of Molecular and Structural Biology, Ontario Cancer Institute and Department of Medical Biophysics, University of Toronto, Toronto, Ontario, Canada"
],
},
]
pcmid_to_authors["PMC1183515"] = [
{
"pmcid": "PMC1183515",
"position": 1,
"fore_name": "Boris E",
"last_name": "Shakhnovich",
"corresponding": 1,
"reverse_position": 1,
"affiliations": [
"Bioinformatics Program, Boston University, Boston, Massachusetts, United States of America"
],
}
]
pcmid_to_authors["PMC5870622"] = [
{
"pmcid": "PMC5870622",
"position": 1,
"fore_name": "Chao",
"last_name": "Pang",
"corresponding": 0,
"reverse_position": 13,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
"2 Department of Epidemiology, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
],
},
{
"pmcid": "PMC5870622",
"position": 2,
"fore_name": "Fleur",
"last_name": "Kelpin",
"corresponding": 0,
"reverse_position": 12,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 3,
"fore_name": "David",
"last_name": "van Enckevort",
"corresponding": 0,
"reverse_position": 11,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 4,
"fore_name": "Niina",
"last_name": "Eklund",
"corresponding": 0,
"reverse_position": 10,
"affiliations": [
"3 Department of Public Health Solutions, National Institute for Health and Welfare, Helsinki, Finland"
],
},
{
"pmcid": "PMC5870622",
"position": 5,
"fore_name": "Kaisa",
"last_name": "Silander",
"corresponding": 0,
"reverse_position": 9,
"affiliations": [
"3 Department of Public Health Solutions, National Institute for Health and Welfare, Helsinki, Finland"
],
},
{
"pmcid": "PMC5870622",
"position": 6,
"fore_name": "Dennis",
"last_name": "Hendriksen",
"corresponding": 0,
"reverse_position": 8,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 7,
"fore_name": "Mark",
"last_name": "de Haan",
"corresponding": 0,
"reverse_position": 7,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 8,
"fore_name": "Jonathan",
"last_name": "Jetten",
"corresponding": 0,
"reverse_position": 6,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 9,
"fore_name": "Tommy",
"last_name": "de Boer",
"corresponding": 0,
"reverse_position": 5,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 10,
"fore_name": "Bart",
"last_name": "Charbon",
"corresponding": 0,
"reverse_position": 4,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 11,
"fore_name": "Petr",
"last_name": "Holub",
"corresponding": 0,
"reverse_position": 3,
"affiliations": [
"4 Biobanking and BioMolecular Resources Research Infrastructure (BBMRI-ERIC), Graz, Austria"
],
},
{
"pmcid": "PMC5870622",
"position": 12,
"fore_name": "Hans",
"last_name": "Hillege",
"corresponding": 0,
"reverse_position": 2,
"affiliations": [
"2 Department of Epidemiology, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands"
],
},
{
"pmcid": "PMC5870622",
"position": 13,
"fore_name": "Morris A",
"last_name": "Swertz",
"corresponding": 1,
"reverse_position": 1,
"affiliations": [
"1 Department of Genetics, Genomics Coordination Center, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
"2 Department of Epidemiology, University Medical Center Groningen, University of Groningen, Groningen, The Netherlands",
],
},
]
@pytest.mark.parametrize(
["pmcid", "expected"],
[
pytest.param(pmcid, authors, id=pmcid)
for pmcid, authors in pcmid_to_authors.items()
],
)
def test_extract_authors_from_article(pmcid, expected):
article = get_frontmatter_etree(pmcid)
authors = extract_authors_from_article(article)
print(authors)
assert authors == expected
def test_extract_authors_from_article_PMC3003546():
pmcid = "PMC3003546"
article = get_frontmatter_etree_via_api(pmcid)
authors = extract_authors_from_article(article)
assert "University of California San Diego" in authors[0]["affiliations"][0]
def test_extract_authors_from_article_PMC4372613():
pmcid = "PMC4372613"
article = get_frontmatter_etree_via_api(pmcid)
authors = extract_authors_from_article(article)
assert "California Institute of Technology" in authors[0]["affiliations"][0]
| true | true |
7901f57fe02e6fa850f845939ebf87c16e12624a | 2,221 | py | Python | airflow/contrib/sensors/emr_base_sensor.py | shrutimantri/airflow | 61eaaacd20ab0f743786df895cf8f232b3b2a48c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15 | 2017-04-06T09:01:50.000Z | 2021-10-02T13:54:31.000Z | airflow/contrib/sensors/emr_base_sensor.py | shrutimantri/airflow | 61eaaacd20ab0f743786df895cf8f232b3b2a48c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 26 | 2019-08-05T13:44:11.000Z | 2022-03-30T10:06:18.000Z | airflow/contrib/sensors/emr_base_sensor.py | shrutimantri/airflow | 61eaaacd20ab0f743786df895cf8f232b3b2a48c | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 21 | 2017-08-20T03:01:05.000Z | 2021-09-07T06:47:51.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import apply_defaults
from airflow.exceptions import AirflowException
class EmrBaseSensor(BaseSensorOperator):
"""
Contains general sensor behavior for EMR.
Subclasses should implement get_emr_response() and state_from_response() methods.
Subclasses should also implement NON_TERMINAL_STATES and FAILED_STATE constants.
"""
ui_color = '#66c3ff'
@apply_defaults
def __init__(
self,
aws_conn_id='aws_default',
*args, **kwargs):
super(EmrBaseSensor, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
def poke(self, context):
response = self.get_emr_response()
if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
self.log.info('Bad HTTP response: %s', response)
return False
state = self.state_from_response(response)
self.log.info('Job flow currently %s', state)
if state in self.NON_TERMINAL_STATES:
return False
if state in self.FAILED_STATE:
final_message = 'EMR job failed'
failure_message = self.failure_message_from_response(response)
if failure_message:
final_message += ' ' + failure_message
raise AirflowException(final_message)
return True
| 36.409836 | 85 | 0.701036 |
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import apply_defaults
from airflow.exceptions import AirflowException
class EmrBaseSensor(BaseSensorOperator):
ui_color = '#66c3ff'
@apply_defaults
def __init__(
self,
aws_conn_id='aws_default',
*args, **kwargs):
super(EmrBaseSensor, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
def poke(self, context):
response = self.get_emr_response()
if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
self.log.info('Bad HTTP response: %s', response)
return False
state = self.state_from_response(response)
self.log.info('Job flow currently %s', state)
if state in self.NON_TERMINAL_STATES:
return False
if state in self.FAILED_STATE:
final_message = 'EMR job failed'
failure_message = self.failure_message_from_response(response)
if failure_message:
final_message += ' ' + failure_message
raise AirflowException(final_message)
return True
| true | true |
7901f866a2b390b84cd531e9660dfa586136c759 | 404 | py | Python | yfinance_trending.py | pikamegan/GMS-hackathon | 1a4e0bfc0912120c812470be8b84282695834f60 | [
"MIT"
] | null | null | null | yfinance_trending.py | pikamegan/GMS-hackathon | 1a4e0bfc0912120c812470be8b84282695834f60 | [
"MIT"
] | null | null | null | yfinance_trending.py | pikamegan/GMS-hackathon | 1a4e0bfc0912120c812470be8b84282695834f60 | [
"MIT"
] | null | null | null | import requests
url = "https://apidojo-yahoo-finance-v1.p.rapidapi.com/market/get-trending-tickers"
querystring = {"region":"US"}
headers = {
'x-rapidapi-host': "apidojo-yahoo-finance-v1.p.rapidapi.com",
'x-rapidapi-key': "86bb0847c2msh62ec4f10fcc7ed9p17aea2jsn6b82733f81a1"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text) | 28.857143 | 84 | 0.717822 | import requests
url = "https://apidojo-yahoo-finance-v1.p.rapidapi.com/market/get-trending-tickers"
querystring = {"region":"US"}
headers = {
'x-rapidapi-host': "apidojo-yahoo-finance-v1.p.rapidapi.com",
'x-rapidapi-key': "86bb0847c2msh62ec4f10fcc7ed9p17aea2jsn6b82733f81a1"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text) | true | true |
7901f8712e7c9a9d62614608e1db94ba004f877a | 3,488 | py | Python | a2ml/api/auger/model.py | augerai/a2ml | 9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86 | [
"Apache-2.0"
] | 30 | 2019-07-01T13:23:27.000Z | 2022-03-16T21:19:33.000Z | a2ml/api/auger/model.py | augerai/a2ml | 9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86 | [
"Apache-2.0"
] | 234 | 2019-07-04T13:56:15.000Z | 2021-11-04T10:12:55.000Z | a2ml/api/auger/model.py | augerai/a2ml | 9d9ce0ac1b51cc81f1cb5ae331c4523131bc6a86 | [
"Apache-2.0"
] | 13 | 2019-07-04T14:00:34.000Z | 2020-07-13T11:18:44.000Z | from .impl.cloud.rest_api import RestApi
from .impl.decorators import with_project
from a2ml.api.utils.decorators import error_handler, authenticated
from .impl.model import Model
from .credentials import Credentials
class AugerModel(object):
def __init__(self, ctx):
self.ctx = ctx
self.credentials = Credentials(ctx).load()
self.ctx.rest_api = RestApi(
self.credentials.api_url, self.credentials.token)
@error_handler
@authenticated
@with_project(autocreate=False)
def deploy(self, project, model_id, locally, review, name, algorithm, score, data_path, metadata=None):
model_id = Model(self.ctx, project).deploy(model_id, locally, review, name, algorithm, score, data_path, metadata)
return {'model_id': model_id}
@error_handler
@authenticated
#@with_project(autocreate=False)
def predict(self, filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data):
if locally:
self.deploy(model_id, locally, review=False, name=None, algorithm=None, score=None, data_path=None)
predicted = Model(self.ctx, project=None).predict(
filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data)
if filename:
self.ctx.log('Predictions stored in %s' % predicted)
if isinstance(predicted, dict) and 'predicted' in predicted:
return predicted
return {'predicted': predicted}
@error_handler
@authenticated
@with_project(autocreate=False)
def actuals(self, project, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
return Model(self.ctx, project).actuals(model_id, filename, data, columns, actuals_at, actual_date_column, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def delete_actuals(self, project, model_id, with_predictions=False, begin_date=None, end_date=None, locally=False):
return Model(self.ctx, project).delete_actuals(model_id, with_predictions, begin_date, end_date, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def review_alert(self, project, model_id, parameters, name):
return Model(self.ctx, project).review_alert(model_id, parameters, name)
@error_handler
@authenticated
@with_project(autocreate=False)
def build_review_data(self, project, model_id, locally, output):
return Model(self.ctx, project).build_review_data(model_id, locally, output)
@error_handler
@authenticated
@with_project(autocreate=False)
def review(self, project, model_id):
return Model(self.ctx, project).review(model_id)
@error_handler
@authenticated
@with_project(autocreate=False)
def undeploy(self, project, model_id, locally):
Model(self.ctx, project).undeploy(model_id, locally)
return {'model_id': model_id}
@error_handler
@authenticated
#@with_project(autocreate=False)
def get_info(self, model_id, locally):
return Model(self.ctx, project=None).get_info(model_id, locally)
@error_handler
@authenticated
#@with_project(autocreate=False)
def update(self, model_id, metadata, locally):
return Model(self.ctx, project=None).update(model_id, metadata, locally)
| 38.32967 | 138 | 0.711869 | from .impl.cloud.rest_api import RestApi
from .impl.decorators import with_project
from a2ml.api.utils.decorators import error_handler, authenticated
from .impl.model import Model
from .credentials import Credentials
class AugerModel(object):
def __init__(self, ctx):
self.ctx = ctx
self.credentials = Credentials(ctx).load()
self.ctx.rest_api = RestApi(
self.credentials.api_url, self.credentials.token)
@error_handler
@authenticated
@with_project(autocreate=False)
def deploy(self, project, model_id, locally, review, name, algorithm, score, data_path, metadata=None):
model_id = Model(self.ctx, project).deploy(model_id, locally, review, name, algorithm, score, data_path, metadata)
return {'model_id': model_id}
@error_handler
@authenticated
def predict(self, filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data):
if locally:
self.deploy(model_id, locally, review=False, name=None, algorithm=None, score=None, data_path=None)
predicted = Model(self.ctx, project=None).predict(
filename, model_id, threshold, locally, data, columns, predicted_at, output,
no_features_in_result, score, score_true_data)
if filename:
self.ctx.log('Predictions stored in %s' % predicted)
if isinstance(predicted, dict) and 'predicted' in predicted:
return predicted
return {'predicted': predicted}
@error_handler
@authenticated
@with_project(autocreate=False)
def actuals(self, project, model_id, filename=None, data=None, columns=None, actuals_at=None, actual_date_column=None, locally=False):
return Model(self.ctx, project).actuals(model_id, filename, data, columns, actuals_at, actual_date_column, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def delete_actuals(self, project, model_id, with_predictions=False, begin_date=None, end_date=None, locally=False):
return Model(self.ctx, project).delete_actuals(model_id, with_predictions, begin_date, end_date, locally)
@error_handler
@authenticated
@with_project(autocreate=False)
def review_alert(self, project, model_id, parameters, name):
return Model(self.ctx, project).review_alert(model_id, parameters, name)
@error_handler
@authenticated
@with_project(autocreate=False)
def build_review_data(self, project, model_id, locally, output):
return Model(self.ctx, project).build_review_data(model_id, locally, output)
@error_handler
@authenticated
@with_project(autocreate=False)
def review(self, project, model_id):
return Model(self.ctx, project).review(model_id)
@error_handler
@authenticated
@with_project(autocreate=False)
def undeploy(self, project, model_id, locally):
Model(self.ctx, project).undeploy(model_id, locally)
return {'model_id': model_id}
@error_handler
@authenticated
def get_info(self, model_id, locally):
return Model(self.ctx, project=None).get_info(model_id, locally)
@error_handler
@authenticated
def update(self, model_id, metadata, locally):
return Model(self.ctx, project=None).update(model_id, metadata, locally)
| true | true |
7901f88050817b7d944fde8456d5af6133e7ce35 | 723 | py | Python | app/main/forms.py | edumorris/pomodoro | cde372be1d5c37dd8221ebd40b684d07fbb472b5 | [
"MIT"
] | null | null | null | app/main/forms.py | edumorris/pomodoro | cde372be1d5c37dd8221ebd40b684d07fbb472b5 | [
"MIT"
] | null | null | null | app/main/forms.py | edumorris/pomodoro | cde372be1d5c37dd8221ebd40b684d07fbb472b5 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField, ValidationError, BooleanField, TextAreaField,SelectField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class CommentForm(FlaskForm):
comment = TextAreaField('Your comment:', validators=[Required()])
submit = SubmitField('Comment')
pitch_category = [('Pickup Lines', 'Pickup Lines'), ('Interview Pitch', 'Inteview Pitch'), ('Product Pitch', 'Product Pitch'), ('Promo Pitch', 'Promo Pitch')]
class PitchForm(FlaskForm):
category = SelectField('Category', choices=pitch_category)
pitch = TextAreaField('Your pitch:', validators=[Required()])
submit = SubmitField('Submit Pitch') | 48.2 | 158 | 0.75657 | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField, ValidationError, BooleanField, TextAreaField,SelectField
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class CommentForm(FlaskForm):
comment = TextAreaField('Your comment:', validators=[Required()])
submit = SubmitField('Comment')
pitch_category = [('Pickup Lines', 'Pickup Lines'), ('Interview Pitch', 'Inteview Pitch'), ('Product Pitch', 'Product Pitch'), ('Promo Pitch', 'Promo Pitch')]
class PitchForm(FlaskForm):
category = SelectField('Category', choices=pitch_category)
pitch = TextAreaField('Your pitch:', validators=[Required()])
submit = SubmitField('Submit Pitch') | true | true |
7901fa1cbb65084bec19ce9b5114920ec080192e | 698 | py | Python | Python/climbing-stairs.py | buptcszh/LeetCode | f0cf11fee2f01b8d166a5d210c5006154e0cde2e | [
"MIT"
] | 3 | 2020-11-04T01:01:23.000Z | 2022-01-10T14:23:04.000Z | Python/climbing-stairs.py | buptcszh/LeetCode | f0cf11fee2f01b8d166a5d210c5006154e0cde2e | [
"MIT"
] | null | null | null | Python/climbing-stairs.py | buptcszh/LeetCode | f0cf11fee2f01b8d166a5d210c5006154e0cde2e | [
"MIT"
] | 3 | 2017-10-09T00:15:30.000Z | 2020-05-12T23:52:41.000Z | # Time: O(n)
# Space: O(1)
#
# You are climbing a stair case. It takes n steps to reach to the top.
#
# Each time you can either climb 1 or 2 steps.
# In how many distinct ways can you climb to the top?
class Solution:
"""
:type n: int
:rtype: int
"""
def climbStairs(self, n):
prev, current = 0, 1
for i in xrange(n):
prev, current = current, prev + current,
return current
def climbStairs1(self, n):
if n == 1:
return 1
if n == 2:
return 2
return self.climbStairs(n - 1) + self.climbStairs(n - 2)
if __name__ == "__main__":
result = Solution().climbStairs(2)
print result
| 22.516129 | 70 | 0.561605 |
class Solution:
"""
:type n: int
:rtype: int
"""
def climbStairs(self, n):
prev, current = 0, 1
for i in xrange(n):
prev, current = current, prev + current,
return current
def climbStairs1(self, n):
if n == 1:
return 1
if n == 2:
return 2
return self.climbStairs(n - 1) + self.climbStairs(n - 2)
if __name__ == "__main__":
result = Solution().climbStairs(2)
print result
| false | true |
7901fa6264ca55182e1c5eb41d5b5639610722a6 | 224 | py | Python | face_recognition/__init__.py | Jones174/Testrepo | b391468e92bf78337673bc60be884644bafa7eae | [
"MIT"
] | 19 | 2017-07-19T17:44:44.000Z | 2021-12-23T05:56:01.000Z | face_recognition/__init__.py | Jones174/Testrepo | b391468e92bf78337673bc60be884644bafa7eae | [
"MIT"
] | 3 | 2020-10-05T14:47:27.000Z | 2021-09-07T13:05:05.000Z | face_recognition/__init__.py | Jones174/Testrepo | b391468e92bf78337673bc60be884644bafa7eae | [
"MIT"
] | 7 | 2020-06-21T20:43:53.000Z | 2021-09-07T11:55:23.000Z | # -*- coding: utf-8 -*-
__author__ = """Adam Geitgey"""
__email__ = 'ageitgey@gmail.com'
__version__ = '0.1.0'
from .api import load_image_file, face_locations, face_landmarks, face_encodings, compare_faces, face_distance
| 28 | 110 | 0.741071 |
__author__ = """Adam Geitgey"""
__email__ = 'ageitgey@gmail.com'
__version__ = '0.1.0'
from .api import load_image_file, face_locations, face_landmarks, face_encodings, compare_faces, face_distance
| true | true |
7901fad835676fe2caacdaecfd63c9bb22d1e158 | 3,979 | py | Python | benchmarks/lasso_replicas/bench_plot_lasso_path_83.py | Giannos-G/scikit-learn_modified | 03df71bbea1bcb3423262b711191552420422cda | [
"BSD-3-Clause"
] | 1 | 2022-03-03T23:54:50.000Z | 2022-03-03T23:54:50.000Z | benchmarks/lasso_replicas/bench_plot_lasso_path_83.py | Giannos-G/scikit-learn_modified | 03df71bbea1bcb3423262b711191552420422cda | [
"BSD-3-Clause"
] | null | null | null | benchmarks/lasso_replicas/bench_plot_lasso_path_83.py | Giannos-G/scikit-learn_modified | 03df71bbea1bcb3423262b711191552420422cda | [
"BSD-3-Clause"
] | null | null | null | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, lars_path_gram
from sklearn.linear_model import lasso_path
from sklearn.datasets import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features // 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 500, 3).astype(int)
features_range = np.linspace(10, 1400 , 3).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
#plt.show()
| 34.301724 | 76 | 0.563961 | from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, lars_path_gram
from sklearn.linear_model import lasso_path
from sklearn.datasets import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features // 10,
'effective_rank': min(n_samples, n_features) / 10,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 500, 3).astype(int)
features_range = np.linspace(10, 1400 , 3).astype(int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
i += 1
| true | true |
7901fd13db9a01ba1cec85c30a28a62cbd3aa085 | 1,088 | py | Python | kubernetes/test/test_v1_scale_io_persistent_volume_source.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_scale_io_persistent_volume_source.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_scale_io_persistent_volume_source.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | 1 | 2019-01-10T11:13:52.000Z | 2019-01-10T11:13:52.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
class TestV1ScaleIOPersistentVolumeSource(unittest.TestCase):
""" V1ScaleIOPersistentVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOPersistentVolumeSource(self):
"""
Test V1ScaleIOPersistentVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_scale_io_persistent_volume_source.V1ScaleIOPersistentVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| 24.177778 | 112 | 0.742647 |
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
class TestV1ScaleIOPersistentVolumeSource(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOPersistentVolumeSource(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
7901fd5e6782e5ea4e57747c9d931ddc4508f949 | 2,036 | py | Python | package/spack-glew/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-glew/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-glew/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Glew(Package):
"""The OpenGL Extension Wrangler Library."""
homepage = "http://glew.sourceforge.net/"
url = "https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download"
version('2.0.0', '2a2cd7c98f13854d2fcddae0d2b20411')
depends_on("cmake", type='build')
depends_on("gl")
def install(self, spec, prefix):
options = []
options.extend(std_cmake_args)
with working_dir('build'):
cmake('./cmake/', *options)
# https://github.com/Homebrew/legacy-homebrew/issues/22025
# Note: This file is generated only after cmake is run
filter_file(r'Requires: glu',
(''), '../glew.pc')
make()
make("install")
| 38.415094 | 95 | 0.63556 | true | true | |
7901fe6ac320f868b9f6379bf974bacc48b8b001 | 18,801 | py | Python | old_stuff/harold/_time_domain.py | weightan/quaternion_polynomials | 50d00bb883c4a4249f13154cffcb459a1319ecb9 | [
"MIT"
] | 154 | 2015-05-11T03:33:54.000Z | 2022-02-23T23:34:28.000Z | old_stuff/harold/_time_domain.py | weightan/quaternion_polynomials | 50d00bb883c4a4249f13154cffcb459a1319ecb9 | [
"MIT"
] | 43 | 2015-05-11T10:35:43.000Z | 2022-02-22T10:02:10.000Z | old_stuff/harold/_time_domain.py | weightan/quaternion_polynomials | 50d00bb883c4a4249f13154cffcb459a1319ecb9 | [
"MIT"
] | 22 | 2015-06-01T21:32:57.000Z | 2021-09-27T13:35:38.000Z | import numpy as np
from numpy import (reciprocal, einsum, maximum, minimum, zeros_like,
atleast_1d, squeeze)
from scipy.linalg import eig, eigvals, matrix_balance, norm
from harold._classes import Transfer, transfer_to_state
from harold._discrete_funcs import discretize
from harold._arg_utils import _check_for_state, _check_for_state_or_transfer
__all__ = ['simulate_linear_system', 'simulate_step_response',
'simulate_impulse_response']
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
"""
Compute the linear model response to an input array sampled at given time
instances.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
u : array_like
The real-valued input sequence to force the model. 1D arrays for single
input models and 2D arrays that has as many columns as the number of
inputs are valid inputs.
t : array_like, optional
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
x0 : array_like, optional
The initial condition array. If omitted an array of zeros is assumed.
Note that Transfer models by definition assume zero initial conditions
and will raise an error.
per_channel : bool, optional
If this is set to True and if the system has multiple inputs, the
response of each input is returned individually. For example, if a
system has 4 inputs and 3 outputs then the response shape becomes
(num, p, m) instead of (num, p) where k-th slice (:, :, k) is the
response from the k-th input channel. For single input systems, this
keyword has no effect.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs.
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
Notes
-----
For Transfer models, first conversion to a state model is performed and
then the resulting model is used for computations.
"""
_check_for_state_or_transfer(sys)
# Quick initial condition checks
if x0 is not None:
if sys._isgain:
raise ValueError('Static system models can\'t have initial '
'conditions set.')
if isinstance(sys, Transfer):
raise ValueError('Transfer models can\'t have initial conditions '
'set.')
x0 = np.asarray(x0, dtype=float).squeeze()
if x0.ndim > 1:
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if sys.NumberOfStates != x0.size:
raise ValueError('The initial condition size does not match the '
'number of states of the model.')
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
n, m = sys.NumberOfStates, sys.shape[1]
is_discrete = sys.SamplingSet == 'Z'
u = np.asarray(u, dtype=float).squeeze()
if u.ndim == 1:
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
# input and time arrays are regular move on
# Static gains are simple matrix multiplications with no x0
if sys._isgain:
if sys._isSISO:
yout = u * sys.d.squeeze()
else:
# don't bother for single inputs
if m == 1:
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = u @ sys.d.T
# Dynamic model
else:
# TODO: Add FOH discretization for funky input
# ZOH discretize the continuous system based on the time increment
if not is_discrete:
sys = discretize(sys, t[1]-t[0], method='zoh')
sample_num = len(u)
a, b, c, d = sys.matrices
# Bu and Du are constant matrices so get them ready (transposed)
M_u = np.block([b.T, d.T])
at = a.T
# Explicitly skip single inputs for per_channel
if m == 1:
per_channel = False
# Shape the response as a 3D array
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = 0. if x0 is None else x0.T
Bu = u[:, [col]] @ b.T[[col], :]
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row, :, col] = xout[row-1, :, col] @ at + Bu[row-1]
# Get the output equation for each slice of inputs
# Cx + Du
yout = np.einsum('ijk,jl->ilk', xout, c.T) + \
np.einsum('ij,jk->ikj', u, d.T)
# Combined output
else:
BDu = u @ M_u
xout = np.empty([sample_num, n], dtype=float)
xout[0] = 0. if x0 is None else x0.T
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row] = (xout[row-1] @ at) + BDu[row-1, :n]
# Now we have all the state evolution get the output equation
yout = xout @ c.T + BDu[:, n:]
return yout, t
def simulate_step_response(sys, t=None):
"""
Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def simulate_impulse_response(sys, t=None):
"""
Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
"""
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = 1./ts
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def _compute_tfinal_and_dt(sys, is_step=True):
"""
Helper function to estimate a final time and a sampling period for
time domain simulations. It is essentially geared towards impulse response
but is also used for step responses.
For discrete-time models, obviously dt is inherent and only tfinal is
computed.
Parameters
----------
sys : {State, Transfer}
The system to be investigated
is_step : bool
Scales the dc value by the magnitude of the nonzero mode since
integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
Default is True.
Returns
-------
tfinal : float
The final time instance for which the simulation will be performed.
dt : float
The estimated sampling period for the simulation.
Notes
-----
Just by evaluating the fastest mode for dt and slowest for tfinal often
leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
since dt will be very small and tfinal will be too large though the fast
mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
and the simulation would be unnecessarily long and the plot is virtually
an L shape since the decay is so fast.
Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
can be used such that only the modes that have significant effect on the
time response are taken. But the sensitivity of the eigenvalues complicate
the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
the response is dependent on the size of the eigenshapes rather than the
eigenvalues themselves.
"""
sqrt_eps = np.sqrt(np.spacing(1.))
min_points = 100 # min number of points
min_points_z = 20 # min number of points
max_points = 10000 # max number of points
max_points_z = 75000 # max number of points for discrete models
default_tfinal = 5 # Default simulation horizon
total_cycles = 5 # number of cycles for oscillating modes
pts_per_cycle = 25 # Number of points divide a period of oscillation
log_decay_percent = np.log(100) # Factor of reduction for real pole decays
# if a static model is given, don't bother with checks
if sys._isgain:
if sys._isdiscrete:
return sys._dt*min_points_z, sys._dt
else:
return default_tfinal, default_tfinal / min_points
if sys._isdiscrete:
# System already has sampling fixed hence we can't fall into the same
# trap mentioned above. Just get nonintegrating slow modes together
# with the damping.
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
# Array Masks
# unstable
m_u = (np.abs(p) >= 1 + sqrt_eps)
p_u, p = p[m_u], p[~m_u]
if p_u.size > 0:
m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt))
tfinal = max(tfinal, t_emp)
# zero - negligible effect on tfinal
m_z = np.abs(p) < sqrt_eps
p = p[~m_z]
# Negative reals- treated as oscillary mode
m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
p_nr, p = p[m_nr], p[~m_nr]
if p_nr.size > 0:
t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
tfinal = max(tfinal, t_emp)
# discrete integrators
m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
p_int, p = p[m_int], p[~m_int]
# pure oscillatory modes
m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
p_w, p = p[m_w], p[~m_w]
if p_w.size > 0:
t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
tfinal = max(tfinal, t_emp)
if p.size > 0:
t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
tfinal = max(tfinal, t_emp)
if p_int.size > 0:
tfinal = tfinal * 5
# Make tfinal an integer multiple of dt
num_samples = tfinal // dt
if num_samples > max_points_z:
tfinal = dt * max_points_z
else:
tfinal = dt * num_samples
return tfinal, dt
# Improve conditioning via balancing and zeroing tiny entries
# See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
b, (sca, perm) = matrix_balance(sys.a, separate=True)
p, l, r = eig(b, left=True, right=True)
# Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
# G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1e12, eig_sens)
# Tolerances
p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
# Incorporate balancing to outer factors
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
w, v = sys.c @ r, l.T.conj() @ sys.b
origin = False
# Computing the "size" of the response of each simple mode
wn = np.abs(p)
if np.any(wn == 0.):
origin = True
dc = zeros_like(p, dtype=float)
# well-conditioned nonzero poles, np.abs just in case
ok = np.abs(eig_sens) <= 1/sqrt_eps
# the averaged t→∞ response of each simple λ on each i/o channel
# See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
# R/L eigenvector dependent)
dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
dc[wn == 0.] = 0.
# double the oscillating mode magnitude for the conjugate
dc[p.imag != 0.] *= 2
# Now get rid of noncontributing integrators and simple modes if any
relevance = (dc > 0.1*dc.max()) | ~ok
psub = p[relevance]
wnsub = wn[relevance]
tfinal, dt = [], []
ints = wnsub == 0.
iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)
# Pure imaginary?
if np.any(iw):
tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
# The rest ~ts = log(%ss value) / exp(Re(λ)t)
texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
tfinal += texp_mode.tolist()
dt += minimum(texp_mode / 50,
(2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()
# All integrators?
if len(tfinal) == 0:
return default_tfinal*5, default_tfinal*5/min_points
tfinal = np.max(tfinal)*(5 if origin else 1)
dt = np.min(dt)
dt = tfinal / max_points if tfinal // dt > max_points else dt
tfinal = dt * min_points if tfinal // dt < min_points else tfinal
return tfinal, dt
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
"""
Helper function to validate the input arguments for simulate_linear_system
"""
# Discrete models can omit t array, make one here for convenience
if t is None:
if not isdiscrete:
raise ValueError('Continuous time models need an evenly spaced '
'time sequence from which the sampling period '
'will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, (u_samples-1)*dt, num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if t.ndim != 1:
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if not np.allclose(t_diff, t_diff[0]) or not t_diff[0] > 0.:
raise ValueError('Time array should be equally spaced and '
'increasing.')
if isdiscrete and not np.isclose(dt, t_diff[0]):
raise ValueError('Time array increment {} is not equal to the'
' model sampling period {}.'.format(t_diff[0],
dt))
if u.size < 1:
raise ValueError('The input array should at least have one point.')
# First dimension is always # of samples
if len(u) != len(t):
raise ValueError('The input and time arrays should have the same'
' length. t: {} vs. u: {}'.format(t.shape,
u.shape))
if u.shape[1] != m:
raise ValueError('Number of input columns ({}) don\'t match the number'
' of inputs ({}) of the given model.'
''.format(u.shape[1], m))
return t
def _check_custom_time_input(t):
"""
Helper function for simple and rather expensive checks for sanity
"""
t = atleast_1d(t)
if t.ndim > 1:
t = squeeze(t)
if t.ndim > 1:
raise ValueError('Time array should be a 1D array but has '
'{} nontrivial dimensions'.format(t.ndim))
if t.size < 2:
raise ValueError('Time array should have at least two data points.')
dt = t[1] - t[0]
if dt <= 0.:
raise ValueError('The time increment dt cannot be negative; '
'Difference of the first two samples t1 - t0 = {}'
''.format(dt))
# np.diff is somewhat slower than the diff of the views
if not np.allclose(t[1:] - t[:-1], dt):
raise ValueError('Supplied time array is not numerically equally '
'spaced (checked via numpy.allclose).')
return t, dt
| 37.602 | 79 | 0.604489 | import numpy as np
from numpy import (reciprocal, einsum, maximum, minimum, zeros_like,
atleast_1d, squeeze)
from scipy.linalg import eig, eigvals, matrix_balance, norm
from harold._classes import Transfer, transfer_to_state
from harold._discrete_funcs import discretize
from harold._arg_utils import _check_for_state, _check_for_state_or_transfer
__all__ = ['simulate_linear_system', 'simulate_step_response',
'simulate_impulse_response']
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
_check_for_state_or_transfer(sys)
if x0 is not None:
if sys._isgain:
raise ValueError('Static system models can\'t have initial '
'conditions set.')
if isinstance(sys, Transfer):
raise ValueError('Transfer models can\'t have initial conditions '
'set.')
x0 = np.asarray(x0, dtype=float).squeeze()
if x0.ndim > 1:
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if sys.NumberOfStates != x0.size:
raise ValueError('The initial condition size does not match the '
'number of states of the model.')
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
n, m = sys.NumberOfStates, sys.shape[1]
is_discrete = sys.SamplingSet == 'Z'
u = np.asarray(u, dtype=float).squeeze()
if u.ndim == 1:
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
if sys._isgain:
if sys._isSISO:
yout = u * sys.d.squeeze()
else:
if m == 1:
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = u @ sys.d.T
# Dynamic model
else:
# TODO: Add FOH discretization for funky input
# ZOH discretize the continuous system based on the time increment
if not is_discrete:
sys = discretize(sys, t[1]-t[0], method='zoh')
sample_num = len(u)
a, b, c, d = sys.matrices
# Bu and Du are constant matrices so get them ready (transposed)
M_u = np.block([b.T, d.T])
at = a.T
# Explicitly skip single inputs for per_channel
if m == 1:
per_channel = False
# Shape the response as a 3D array
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = 0. if x0 is None else x0.T
Bu = u[:, [col]] @ b.T[[col], :]
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row, :, col] = xout[row-1, :, col] @ at + Bu[row-1]
# Get the output equation for each slice of inputs
# Cx + Du
yout = np.einsum('ijk,jl->ilk', xout, c.T) + \
np.einsum('ij,jk->ikj', u, d.T)
# Combined output
else:
BDu = u @ M_u
xout = np.empty([sample_num, n], dtype=float)
xout[0] = 0. if x0 is None else x0.T
# Main loop for xdot eq.
for row in range(1, sample_num):
xout[row] = (xout[row-1] @ at) + BDu[row-1, :n]
# Now we have all the state evolution get the output equation
yout = xout @ c.T + BDu[:, n:]
return yout, t
def simulate_step_response(sys, t=None):
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def simulate_impulse_response(sys, t=None):
_check_for_state_or_transfer(sys)
# Always works with State Models
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if t is None:
tf, ts = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, tf+ts, ts, dtype=float)
else:
t, ts = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = 1./ts
return simulate_linear_system(sys, u=u, t=t, per_channel=1)
def _compute_tfinal_and_dt(sys, is_step=True):
sqrt_eps = np.sqrt(np.spacing(1.))
min_points = 100 # min number of points
min_points_z = 20 # min number of points
max_points = 10000 # max number of points
max_points_z = 75000 # max number of points for discrete models
default_tfinal = 5 # Default simulation horizon
total_cycles = 5 # number of cycles for oscillating modes
pts_per_cycle = 25 # Number of points divide a period of oscillation
log_decay_percent = np.log(100) # Factor of reduction for real pole decays
# if a static model is given, don't bother with checks
if sys._isgain:
if sys._isdiscrete:
return sys._dt*min_points_z, sys._dt
else:
return default_tfinal, default_tfinal / min_points
if sys._isdiscrete:
# trap mentioned above. Just get nonintegrating slow modes together
# with the damping.
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
# Array Masks
# unstable
m_u = (np.abs(p) >= 1 + sqrt_eps)
p_u, p = p[m_u], p[~m_u]
if p_u.size > 0:
m_u = (p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps)
t_emp = np.max(log_decay_percent / np.abs(np.log(p_u[~m_u])/dt))
tfinal = max(tfinal, t_emp)
# zero - negligible effect on tfinal
m_z = np.abs(p) < sqrt_eps
p = p[~m_z]
# Negative reals- treated as oscillary mode
m_nr = (p.real < 0) & (np.abs(p.imag) < sqrt_eps)
p_nr, p = p[m_nr], p[~m_nr]
if p_nr.size > 0:
t_emp = np.max(log_decay_percent / np.abs((np.log(p_nr)/dt).real))
tfinal = max(tfinal, t_emp)
# discrete integrators
m_int = (p.real - 1 < sqrt_eps) & (np.abs(p.imag) < sqrt_eps)
p_int, p = p[m_int], p[~m_int]
# pure oscillatory modes
m_w = (np.abs(np.abs(p) - 1) < sqrt_eps)
p_w, p = p[m_w], p[~m_w]
if p_w.size > 0:
t_emp = total_cycles * 2 * np.pi / np.abs(np.log(p_w)/dt).min()
tfinal = max(tfinal, t_emp)
if p.size > 0:
t_emp = log_decay_percent / np.abs((np.log(p)/dt).real).min()
tfinal = max(tfinal, t_emp)
if p_int.size > 0:
tfinal = tfinal * 5
# Make tfinal an integer multiple of dt
num_samples = tfinal // dt
if num_samples > max_points_z:
tfinal = dt * max_points_z
else:
tfinal = dt * num_samples
return tfinal, dt
# Improve conditioning via balancing and zeroing tiny entries
# See <w,v> for [[1,2,0], [9,1,0.01], [1,2,10*np.pi]] before/after balance
b, (sca, perm) = matrix_balance(sys.a, separate=True)
p, l, r = eig(b, left=True, right=True)
# Reciprocal of inner product <w,v> for each λ, (bound the ~infs by 1e12)
# G = Transfer([1], [1,0,1]) gives zero sensitivity (bound by 1e-12)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1e12, eig_sens)
# Tolerances
p[np.abs(p) < np.spacing(eig_sens * norm(b, 1))] = 0.
# Incorporate balancing to outer factors
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
w, v = sys.c @ r, l.T.conj() @ sys.b
origin = False
# Computing the "size" of the response of each simple mode
wn = np.abs(p)
if np.any(wn == 0.):
origin = True
dc = zeros_like(p, dtype=float)
# well-conditioned nonzero poles, np.abs just in case
ok = np.abs(eig_sens) <= 1/sqrt_eps
# the averaged t→∞ response of each simple λ on each i/o channel
# See, A = [[-1, k], [0, -2]], response sizes are k-dependent (that is
# R/L eigenvector dependent)
dc[ok] = norm(v[ok, :], axis=1)*norm(w[:, ok], axis=0)*eig_sens[ok]
dc[wn != 0.] /= wn[wn != 0] if is_step else 1.
dc[wn == 0.] = 0.
# double the oscillating mode magnitude for the conjugate
dc[p.imag != 0.] *= 2
# Now get rid of noncontributing integrators and simple modes if any
relevance = (dc > 0.1*dc.max()) | ~ok
psub = p[relevance]
wnsub = wn[relevance]
tfinal, dt = [], []
ints = wnsub == 0.
iw = (psub.imag != 0.) & (np.abs(psub.real) <= sqrt_eps)
# Pure imaginary?
if np.any(iw):
tfinal += (total_cycles * 2 * np.pi / wnsub[iw]).tolist()
dt += (2 * np.pi / pts_per_cycle / wnsub[iw]).tolist()
# The rest ~ts = log(%ss value) / exp(Re(λ)t)
texp_mode = log_decay_percent / np.abs(psub[~iw & ~ints].real)
tfinal += texp_mode.tolist()
dt += minimum(texp_mode / 50,
(2 * np.pi / pts_per_cycle / wnsub[~iw & ~ints])).tolist()
# All integrators?
if len(tfinal) == 0:
return default_tfinal*5, default_tfinal*5/min_points
tfinal = np.max(tfinal)*(5 if origin else 1)
dt = np.min(dt)
dt = tfinal / max_points if tfinal // dt > max_points else dt
tfinal = dt * min_points if tfinal // dt < min_points else tfinal
return tfinal, dt
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
# Discrete models can omit t array, make one here for convenience
if t is None:
if not isdiscrete:
raise ValueError('Continuous time models need an evenly spaced '
'time sequence from which the sampling period '
'will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, (u_samples-1)*dt, num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if t.ndim != 1:
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if not np.allclose(t_diff, t_diff[0]) or not t_diff[0] > 0.:
raise ValueError('Time array should be equally spaced and '
'increasing.')
if isdiscrete and not np.isclose(dt, t_diff[0]):
raise ValueError('Time array increment {} is not equal to the'
' model sampling period {}.'.format(t_diff[0],
dt))
if u.size < 1:
raise ValueError('The input array should at least have one point.')
# First dimension is always # of samples
if len(u) != len(t):
raise ValueError('The input and time arrays should have the same'
' length. t: {} vs. u: {}'.format(t.shape,
u.shape))
if u.shape[1] != m:
raise ValueError('Number of input columns ({}) don\'t match the number'
' of inputs ({}) of the given model.'
''.format(u.shape[1], m))
return t
def _check_custom_time_input(t):
t = atleast_1d(t)
if t.ndim > 1:
t = squeeze(t)
if t.ndim > 1:
raise ValueError('Time array should be a 1D array but has '
'{} nontrivial dimensions'.format(t.ndim))
if t.size < 2:
raise ValueError('Time array should have at least two data points.')
dt = t[1] - t[0]
if dt <= 0.:
raise ValueError('The time increment dt cannot be negative; '
'Difference of the first two samples t1 - t0 = {}'
''.format(dt))
if not np.allclose(t[1:] - t[:-1], dt):
raise ValueError('Supplied time array is not numerically equally '
'spaced (checked via numpy.allclose).')
return t, dt
| true | true |
7901ff17cd3616c64108bab6ef18e9d01f5c3274 | 817 | py | Python | tests/bomb1.py | thefab/restful-distributed-lock-manager | 68e775a956c52dd770e68e15f731a12d756e4103 | [
"MIT"
] | 18 | 2015-01-24T15:54:33.000Z | 2022-01-05T13:08:46.000Z | tests/bomb1.py | thefab/restful-distributed-lock-manager | 68e775a956c52dd770e68e15f731a12d756e4103 | [
"MIT"
] | 3 | 2016-05-19T09:47:53.000Z | 2019-01-03T14:07:18.000Z | tests/bomb1.py | thefab/restful-distributed-lock-manager | 68e775a956c52dd770e68e15f731a12d756e4103 | [
"MIT"
] | 6 | 2018-03-13T13:53:19.000Z | 2021-11-14T14:32:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Pool
import requests
PROCESS_POOL_SIZE = 10
REQUESTS = 10000
BASE_URL = "http://localhost:8888"
RESOURCE_NAME = "resource"
def f(process_number):
resource_name = RESOURCE_NAME
raw_body = '{"title": "%i", "lifetime": 300, "wait": 20}' % process_number
r = requests.post("%s/locks/%s" % (BASE_URL, resource_name), data=raw_body)
if r.status_code != 201:
raise Exception("bad status code %i from post request" % r.status_code)
lock_url = r.headers['Location']
r = requests.delete(lock_url)
if r.status_code != 204:
raise Exception("bad status code %i from delete request" % r.status_code)
if __name__ == '__main__':
pool = Pool(processes=PROCESS_POOL_SIZE)
pool.map(f, range(0, REQUESTS))
| 30.259259 | 81 | 0.679315 |
from multiprocessing import Pool
import requests
PROCESS_POOL_SIZE = 10
REQUESTS = 10000
BASE_URL = "http://localhost:8888"
RESOURCE_NAME = "resource"
def f(process_number):
resource_name = RESOURCE_NAME
raw_body = '{"title": "%i", "lifetime": 300, "wait": 20}' % process_number
r = requests.post("%s/locks/%s" % (BASE_URL, resource_name), data=raw_body)
if r.status_code != 201:
raise Exception("bad status code %i from post request" % r.status_code)
lock_url = r.headers['Location']
r = requests.delete(lock_url)
if r.status_code != 204:
raise Exception("bad status code %i from delete request" % r.status_code)
if __name__ == '__main__':
pool = Pool(processes=PROCESS_POOL_SIZE)
pool.map(f, range(0, REQUESTS))
| true | true |
7901ff7b12e14a5ed2137abf84f344f77316038f | 3,854 | py | Python | motor/frameworks/tornado/__init__.py | smurfix/motor | 67cdcd9b9ae7f966e957113ca9b283f4b2c53b28 | [
"Apache-2.0"
] | null | null | null | motor/frameworks/tornado/__init__.py | smurfix/motor | 67cdcd9b9ae7f966e957113ca9b283f4b2c53b28 | [
"Apache-2.0"
] | null | null | null | motor/frameworks/tornado/__init__.py | smurfix/motor | 67cdcd9b9ae7f966e957113ca9b283f4b2c53b28 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tornado compatibility layer for Motor, an asynchronous MongoDB driver.
See "Frameworks" in the Developer Guide.
"""
import functools
import os
import tornado.process
import warnings
from concurrent.futures import ThreadPoolExecutor
from tornado import concurrent, gen, ioloop, version as tornado_version
from tornado.gen import chain_future, coroutine # For framework interface.
from .. import DummySession as Session
try:
import contextvars
except ImportError:
contextvars = None
CLASS_PREFIX = ''
def get_event_loop():
return ioloop.IOLoop.current()
def is_event_loop(loop):
return isinstance(loop, ioloop.IOLoop)
def check_event_loop(loop):
if not is_event_loop(loop):
raise TypeError(
"io_loop must be instance of IOLoop, not %r" % loop)
def get_future(loop):
return concurrent.Future()
if 'MOTOR_MAX_WORKERS' in os.environ:
max_workers = int(os.environ['MOTOR_MAX_WORKERS'])
else:
max_workers = tornado.process.cpu_count() * 5
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
def run_on_executor(loop, fn, *args, **kwargs):
if contextvars:
context = contextvars.copy_context()
fn = functools.partial(context.run, fn)
return loop.run_in_executor(
_EXECUTOR, functools.partial(fn, *args, **kwargs))
def chain_return_value(future, loop, return_value):
"""Compatible way to return a value in all Pythons.
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
but "return value" doesn't work in Python 2. Instead, Motor methods that
return values resolve a Future with it, and are implemented with callbacks
rather than a coroutine internally.
"""
chained = concurrent.Future()
def copy(_future):
# Return early if the task was cancelled.
if chained.done():
return
if _future.exception() is not None:
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained
def is_future(f):
return isinstance(f, concurrent.Future)
def call_soon(loop, callback, *args, **kwargs):
if args or kwargs:
loop.add_callback(functools.partial(callback, *args, **kwargs))
else:
loop.add_callback(callback)
def add_future(loop, future, callback, *args):
loop.add_future(future, functools.partial(callback, *args))
def pymongo_class_wrapper(f, pymongo_class):
"""Executes the coroutine f and wraps its result in a Motor class.
See WrapAsync.
"""
@functools.wraps(f)
async def _wrapper(self, *args, **kwargs):
result = await f(self, *args, **kwargs)
# Don't call isinstance(), not checking subclasses.
if result.__class__ == pymongo_class:
# Delegate to the current object to wrap the result.
return self.wrap(result)
else:
return result
return _wrapper
def yieldable(future):
warnings.warn(
"The yieldable function is deprecated and will be removed in "
"Motor 3.0", DeprecationWarning, stacklevel=2)
return future
def platform_info():
return 'Tornado %s' % (tornado_version,)
| 27.140845 | 78 | 0.704203 |
import functools
import os
import tornado.process
import warnings
from concurrent.futures import ThreadPoolExecutor
from tornado import concurrent, gen, ioloop, version as tornado_version
from tornado.gen import chain_future, coroutine
from .. import DummySession as Session
try:
import contextvars
except ImportError:
contextvars = None
CLASS_PREFIX = ''
def get_event_loop():
return ioloop.IOLoop.current()
def is_event_loop(loop):
return isinstance(loop, ioloop.IOLoop)
def check_event_loop(loop):
if not is_event_loop(loop):
raise TypeError(
"io_loop must be instance of IOLoop, not %r" % loop)
def get_future(loop):
return concurrent.Future()
if 'MOTOR_MAX_WORKERS' in os.environ:
max_workers = int(os.environ['MOTOR_MAX_WORKERS'])
else:
max_workers = tornado.process.cpu_count() * 5
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
def run_on_executor(loop, fn, *args, **kwargs):
if contextvars:
context = contextvars.copy_context()
fn = functools.partial(context.run, fn)
return loop.run_in_executor(
_EXECUTOR, functools.partial(fn, *args, **kwargs))
def chain_return_value(future, loop, return_value):
chained = concurrent.Future()
def copy(_future):
if chained.done():
return
if _future.exception() is not None:
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained
def is_future(f):
return isinstance(f, concurrent.Future)
def call_soon(loop, callback, *args, **kwargs):
if args or kwargs:
loop.add_callback(functools.partial(callback, *args, **kwargs))
else:
loop.add_callback(callback)
def add_future(loop, future, callback, *args):
loop.add_future(future, functools.partial(callback, *args))
def pymongo_class_wrapper(f, pymongo_class):
@functools.wraps(f)
async def _wrapper(self, *args, **kwargs):
result = await f(self, *args, **kwargs)
if result.__class__ == pymongo_class:
# Delegate to the current object to wrap the result.
return self.wrap(result)
else:
return result
return _wrapper
def yieldable(future):
warnings.warn(
"The yieldable function is deprecated and will be removed in "
"Motor 3.0", DeprecationWarning, stacklevel=2)
return future
def platform_info():
return 'Tornado %s' % (tornado_version,)
| true | true |
7902014a279ae8fdb91383784f4c8104db26782c | 1,206 | py | Python | rhasspy_skills_cli/tests/test_app.py | razzo04/rhasspy-skills-cli | 01cff9b3f1eee07902fbc04783aeb237fe98768e | [
"MIT"
] | null | null | null | rhasspy_skills_cli/tests/test_app.py | razzo04/rhasspy-skills-cli | 01cff9b3f1eee07902fbc04783aeb237fe98768e | [
"MIT"
] | null | null | null | rhasspy_skills_cli/tests/test_app.py | razzo04/rhasspy-skills-cli | 01cff9b3f1eee07902fbc04783aeb237fe98768e | [
"MIT"
] | null | null | null | import click
from typer.testing import CliRunner
import pytest
import os
from pathlib import Path
from ..main import install
from pytest_httpx import HTTPXMock
runner = CliRunner()
def get_test_resource(name: str) -> Path:
return Path(os.path.join(os.path.dirname(__file__), "testresources", name))
def test_install_invalid_archive(tmp_path):
data = b"data"
file_path = tmp_path / "test.tar"
with open(file_path, "wb") as f:
f.write(data)
with pytest.raises(click.exceptions.Exit):
install(
file_path,
["https://example.com"],
cache=False,
force=False,
start_on_boot=False,
)
assert os.listdir(tmp_path) == ["test.tar"]
def test_install(tmp_path, httpx_mock: HTTPXMock):
httpx_mock.add_response(
method="POST", json={"state": "success", "detail": "installed"}
)
time_skill = get_test_resource("time_example")
try:
install(
time_skill.as_posix(),
["https://example.com"],
cache=False,
force=False,
start_on_boot=False,
)
except click.exceptions.Exit as e:
assert e.exit_code == 0
| 25.659574 | 79 | 0.619403 | import click
from typer.testing import CliRunner
import pytest
import os
from pathlib import Path
from ..main import install
from pytest_httpx import HTTPXMock
runner = CliRunner()
def get_test_resource(name: str) -> Path:
return Path(os.path.join(os.path.dirname(__file__), "testresources", name))
def test_install_invalid_archive(tmp_path):
data = b"data"
file_path = tmp_path / "test.tar"
with open(file_path, "wb") as f:
f.write(data)
with pytest.raises(click.exceptions.Exit):
install(
file_path,
["https://example.com"],
cache=False,
force=False,
start_on_boot=False,
)
assert os.listdir(tmp_path) == ["test.tar"]
def test_install(tmp_path, httpx_mock: HTTPXMock):
httpx_mock.add_response(
method="POST", json={"state": "success", "detail": "installed"}
)
time_skill = get_test_resource("time_example")
try:
install(
time_skill.as_posix(),
["https://example.com"],
cache=False,
force=False,
start_on_boot=False,
)
except click.exceptions.Exit as e:
assert e.exit_code == 0
| true | true |
7902027bd17bd5449f1cb27614e7291f76386885 | 1,791 | py | Python | zeeguu_core/crowd_translations/__init__.py | C0DK/Zeeguu-Core | 55b6a7ce1223f368614fb7e5dd9e53d4e46ae69e | [
"MIT"
] | null | null | null | zeeguu_core/crowd_translations/__init__.py | C0DK/Zeeguu-Core | 55b6a7ce1223f368614fb7e5dd9e53d4e46ae69e | [
"MIT"
] | null | null | null | zeeguu_core/crowd_translations/__init__.py | C0DK/Zeeguu-Core | 55b6a7ce1223f368614fb7e5dd9e53d4e46ae69e | [
"MIT"
] | null | null | null | from sqlalchemy.orm.exc import NoResultFound
from zeeguu_core.model import User, Language, UserWord, Text, Bookmark
def own_or_crowdsourced_translation(user, word: str, from_lang_code: str, context: str):
own_past_translation = get_own_past_translation(user, word, from_lang_code, context)
if own_past_translation:
translations = [{'translation': own_past_translation,
'service_name': 'Own Last Translation',
'quality': 100}]
return translations
others_past_translation = get_others_past_translation(word, from_lang_code, context)
if others_past_translation:
translations = [{'translation': others_past_translation,
'service_name': 'Contributed Translation',
'quality': 100}]
return translations
return None
def get_others_past_translation(word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context)
def get_own_past_translation(user, word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context, user)
def _get_past_translation(word: str, from_lang_code: str, context: str, user: User = None):
try:
from_language = Language.find(from_lang_code)
origin_word = UserWord.find(word, from_language)
text = Text.query.filter_by(content=context).one()
query = Bookmark.query.filter_by(origin_id=origin_word.id, text_id=text.id)
if user:
query = query.filter_by(user_id=user.id)
# prioritize older users
query.order_by(Bookmark.user_id.asc())
return query.first().translation.word
except Exception as e:
print(e)
return None
| 31.982143 | 91 | 0.685092 | from sqlalchemy.orm.exc import NoResultFound
from zeeguu_core.model import User, Language, UserWord, Text, Bookmark
def own_or_crowdsourced_translation(user, word: str, from_lang_code: str, context: str):
own_past_translation = get_own_past_translation(user, word, from_lang_code, context)
if own_past_translation:
translations = [{'translation': own_past_translation,
'service_name': 'Own Last Translation',
'quality': 100}]
return translations
others_past_translation = get_others_past_translation(word, from_lang_code, context)
if others_past_translation:
translations = [{'translation': others_past_translation,
'service_name': 'Contributed Translation',
'quality': 100}]
return translations
return None
def get_others_past_translation(word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context)
def get_own_past_translation(user, word: str, from_lang_code: str, context: str):
return _get_past_translation(word, from_lang_code, context, user)
def _get_past_translation(word: str, from_lang_code: str, context: str, user: User = None):
try:
from_language = Language.find(from_lang_code)
origin_word = UserWord.find(word, from_language)
text = Text.query.filter_by(content=context).one()
query = Bookmark.query.filter_by(origin_id=origin_word.id, text_id=text.id)
if user:
query = query.filter_by(user_id=user.id)
query.order_by(Bookmark.user_id.asc())
return query.first().translation.word
except Exception as e:
print(e)
return None
| true | true |
790202c43ee119e0b0769b912e21e567661b64b9 | 25,739 | py | Python | tools/run_tests/performance/massage_qps_stats.py | 4con/grpc-win-xp | 26e73cad8721030ada9b5765bea627376ccaef9e | [
"Apache-2.0"
] | 91 | 2018-11-24T05:33:58.000Z | 2022-03-16T05:58:05.000Z | tools/run_tests/performance/massage_qps_stats.py | 4con/grpc-win-xp | 26e73cad8721030ada9b5765bea627376ccaef9e | [
"Apache-2.0"
] | 11 | 2019-06-02T23:50:17.000Z | 2022-02-04T23:58:56.000Z | tools/run_tests/performance/massage_qps_stats.py | 4con/grpc-win-xp | 26e73cad8721030ada9b5765bea627376ccaef9e | [
"Apache-2.0"
] | 18 | 2018-11-24T10:35:29.000Z | 2021-04-22T07:22:10.000Z | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
def massage_qps_stats(scenario_result):
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" not in stats: return
core_stats = stats["coreStats"]
del stats["coreStats"]
stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_calls_created")
stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
stats[
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_subchannels_created")
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_pings_sent")
stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update")
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled")
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_call_initial_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_offer_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
stats["core_tcp_read_offer_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
stats["core_http2_send_message_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
| 54.76383 | 118 | 0.661875 |
import massage_qps_stats_helpers
def massage_qps_stats(scenario_result):
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" not in stats: return
core_stats = stats["coreStats"]
del stats["coreStats"]
stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_calls_created")
stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
stats[
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_subchannels_created")
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_pings_sent")
stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update")
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled")
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_call_initial_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_offer_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
stats["core_tcp_read_offer_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
stats["core_http2_send_message_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
| true | true |
790202e73857055b473af492c794cada47a6f534 | 283 | py | Python | tests/test.py | monkeyusage/duplicates | 73635259342c2d6ee66de6197a9be2fc8175b67f | [
"MIT"
] | null | null | null | tests/test.py | monkeyusage/duplicates | 73635259342c2d6ee66de6197a9be2fc8175b67f | [
"MIT"
] | null | null | null | tests/test.py | monkeyusage/duplicates | 73635259342c2d6ee66de6197a9be2fc8175b67f | [
"MIT"
] | null | null | null | import sys
sys.path.append('../scripts')
from detect_duplicates import df
def test_nan_names():
assert df.name.isnull().sum() == 0
def test_dup_pid():
assert df.patient_id.duplicated().sum() == 0
def test_phone_dup():
assert df.phone_number.duplicated().sum() == 0 | 23.583333 | 50 | 0.689046 | import sys
sys.path.append('../scripts')
from detect_duplicates import df
def test_nan_names():
assert df.name.isnull().sum() == 0
def test_dup_pid():
assert df.patient_id.duplicated().sum() == 0
def test_phone_dup():
assert df.phone_number.duplicated().sum() == 0 | true | true |
79020397743da07961d1fcabb837c88c78e08d89 | 1,558 | py | Python | scraper/apis/wikipedia.py | antimike/citation-scraper | f9c9749cac683394e1401731a31579bf1756c130 | [
"MIT"
] | null | null | null | scraper/apis/wikipedia.py | antimike/citation-scraper | f9c9749cac683394e1401731a31579bf1756c130 | [
"MIT"
] | null | null | null | scraper/apis/wikipedia.py | antimike/citation-scraper | f9c9749cac683394e1401731a31579bf1756c130 | [
"MIT"
] | null | null | null | import wikipedia as wiki
from ..parsing import get_wiki_page_id, get_wiki_lines, get_wiki_sections
def get_wiki_references(url, outfile=None):
"""get_wiki_references.
Extracts references from predefined sections of wiki page
Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs)
:param url: URL of wiki article to scrape
:param outfile: File to write extracted references to
"""
def _check(l):
return (not l['doi'] or l['doi'] == l['refs'][-1]['doi']) \
and (not l['arxiv'] or l['arxiv'] == l['refs'][-1]['arxiv'])
page = wiki.page(get_wiki_page_id(url))
sections = get_wiki_sections(page.content)
lines = sum([get_wiki_lines(s, predicate=any) for s in sections.values()], [])
links = sum([wikiparse.parse(s).external_links for s in sections.values()], [])
summary = sum([
[
{
'raw': l,
'links': urlscan.parse_text_urls(l),
'refs': refextract.extract_references_from_string(l),
'doi': doi.find_doi_in_text(l),
'arxiv': m.group(1) if (m := arxiv_url_regex.matches(l)) is not None else None
} for l in get_wiki_lines(s, predicate=any)
] for s in sections.values()
])
failed = [ld for ld in summary if not _check(ld)]
if any(failed):
logger.warning('Consistency check failed for the following lines: {}'.format(failed))
return _serialize(summary, outfile)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 94 | 0.622593 | import wikipedia as wiki
from ..parsing import get_wiki_page_id, get_wiki_lines, get_wiki_sections
def get_wiki_references(url, outfile=None):
def _check(l):
return (not l['doi'] or l['doi'] == l['refs'][-1]['doi']) \
and (not l['arxiv'] or l['arxiv'] == l['refs'][-1]['arxiv'])
page = wiki.page(get_wiki_page_id(url))
sections = get_wiki_sections(page.content)
lines = sum([get_wiki_lines(s, predicate=any) for s in sections.values()], [])
links = sum([wikiparse.parse(s).external_links for s in sections.values()], [])
summary = sum([
[
{
'raw': l,
'links': urlscan.parse_text_urls(l),
'refs': refextract.extract_references_from_string(l),
'doi': doi.find_doi_in_text(l),
'arxiv': m.group(1) if (m := arxiv_url_regex.matches(l)) is not None else None
} for l in get_wiki_lines(s, predicate=any)
] for s in sections.values()
])
failed = [ld for ld in summary if not _check(ld)]
if any(failed):
logger.warning('Consistency check failed for the following lines: {}'.format(failed))
return _serialize(summary, outfile)
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
790203d7f5131f002db428b0f81e71d0bcd5b012 | 10,315 | py | Python | d2go/setup.py | ananthsub/d2go | 8c3618d9e73518d32350ab4e6d0fb6509c9e08b6 | [
"Apache-2.0"
] | null | null | null | d2go/setup.py | ananthsub/d2go | 8c3618d9e73518d32350ab4e6d0fb6509c9e08b6 | [
"Apache-2.0"
] | null | null | null | d2go/setup.py | ananthsub/d2go | 8c3618d9e73518d32350ab4e6d0fb6509c9e08b6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import os
import time
import detectron2.utils.comm as comm
import torch
from d2go.config import (
CfgNode as CN,
auto_scale_world_size,
reroute_config_path,
temp_defrost,
)
from d2go.distributed import get_local_rank, get_num_processes_per_machine
from d2go.runner import GeneralizedRCNNRunner, create_runner
from d2go.utils.launch_environment import get_launch_environment
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from detectron2.utils.serialize import PicklableWrapper
from d2go.utils.helper import run_once
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail
logger = logging.getLogger(__name__)
def basic_argument_parser(
distributed=True,
requires_config_file=True,
requires_output_dir=True,
):
""" Basic cli tool parser for Detectron2Go binaries """
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--runner",
type=str,
default="d2go.runner.GeneralizedRCNNRunner",
help="Full class name, i.e. (package.)module.class",
)
parser.add_argument(
"--config-file",
help="path to config file",
default="",
required=requires_config_file,
metavar="FILE",
)
parser.add_argument(
"--output-dir",
help="When given, this will override the OUTPUT_DIR in the config-file",
required=requires_output_dir,
default=None,
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
if distributed:
parser.add_argument(
"--num-processes", type=int, default=1, help="number of gpus per machine"
)
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time())
)
parser.add_argument("--dist-backend", type=str, default="NCCL")
if not requires_config_file:
# NOTE if not passing yaml file, user should explicitly set the
# following args, and use `opts` for non-common usecase.
parser.add_argument(
"--datasets",
type=str,
nargs="+",
required=True,
help="cfg.DATASETS.TEST",
)
parser.add_argument(
"--min_size",
type=int,
required=True,
help="cfg.INPUT.MIN_SIZE_TEST",
)
parser.add_argument(
"--max_size",
type=int,
required=True,
help="cfg.INPUT.MAX_SIZE_TEST",
)
return parser
return parser
def create_cfg_from_cli_args(args, default_cfg):
"""
Instead of loading from defaults.py, this binary only includes necessary
configs building from scratch, and overrides them from args. There're two
levels of config:
_C: the config system used by this binary, which is a sub-set of training
config, override by configurable_cfg. It can also be override by
args.opts for convinience.
configurable_cfg: common configs that user should explicitly specify
in the args.
"""
_C = CN()
_C.INPUT = default_cfg.INPUT
_C.DATASETS = default_cfg.DATASETS
_C.DATALOADER = default_cfg.DATALOADER
_C.TEST = default_cfg.TEST
if hasattr(default_cfg, "D2GO_DATA"):
_C.D2GO_DATA = default_cfg.D2GO_DATA
if hasattr(default_cfg, "TENSORBOARD"):
_C.TENSORBOARD = default_cfg.TENSORBOARD
# NOTE configs below might not be necessary, but must add to make code work
_C.MODEL = CN()
_C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE
_C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON
_C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON
_C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS
assert _C.MODEL.LOAD_PROPOSALS is False, "caffe2 model doesn't support"
_C.OUTPUT_DIR = args.output_dir
configurable_cfg = [
"DATASETS.TEST",
args.datasets,
"INPUT.MIN_SIZE_TEST",
args.min_size,
"INPUT.MAX_SIZE_TEST",
args.max_size,
]
cfg = _C.clone()
cfg.merge_from_list(configurable_cfg)
cfg.merge_from_list(args.opts)
return cfg
def prepare_for_launch(args):
"""
Load config, figure out working directory, create runner.
- when args.config_file is empty, returned cfg will be the default one
- returned output_dir will always be non empty, args.output_dir has higher
priority than cfg.OUTPUT_DIR.
"""
print(args)
runner = create_runner(args.runner)
cfg = runner.get_default_cfg()
if args.config_file:
with PathManager.open(reroute_config_path(args.config_file), "r") as f:
print("Loaded config file {}:\n{}".format(args.config_file, f.read()))
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
else:
cfg = create_cfg_from_cli_args(args, default_cfg=cfg)
cfg.freeze()
assert args.output_dir or args.config_file
output_dir = args.output_dir or cfg.OUTPUT_DIR
return cfg, output_dir, runner
def setup_after_launch(cfg, output_dir, runner):
"""
Set things up after entering DDP, including
- creating working directory
- setting up logger
- logging environment
- initializing runner
"""
create_dir_on_global_main_process(output_dir)
comm.synchronize()
setup_loggers(output_dir)
cfg.freeze()
if cfg.OUTPUT_DIR != output_dir:
with temp_defrost(cfg):
logger.warning(
"Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format(
cfg.OUTPUT_DIR, output_dir
)
)
cfg.OUTPUT_DIR = output_dir
logger.info("Initializing runner ...")
runner = initialize_runner(runner, cfg)
log_info(cfg, runner)
dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
@run_once()
def setup_loggers(output_dir, color=None):
if not color:
color = get_launch_environment() == "local"
d2_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="detectron2",
abbrev_name="d2",
)
fvcore_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="fvcore",
)
d2go_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="d2go",
abbrev_name="d2go",
)
mobile_cv_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="mobile_cv",
abbrev_name="mobile_cv",
)
# NOTE: all above loggers have FileHandler pointing to the same file as d2_logger.
# Those files are opened upon creation, but it seems fine in 'a' mode.
# NOTE: the root logger might has been configured by other applications,
# since this already sub-top level, just don't propagate to root.
d2_logger.propagate = False
fvcore_logger.propagate = False
d2go_logger.propagate = False
mobile_cv_logger.propagate = False
def log_info(cfg, runner):
num_processes = get_num_processes_per_machine()
logger.info(
"Using {} processes per machine. Rank of current process: {}".format(
num_processes, comm.get_rank()
)
)
logger.info("Environment info:\n" + collect_env_info())
logger.info("Running with full config:\n{}".format(cfg))
logger.info("Running with runner: {}".format(runner))
def dump_cfg(cfg, path):
if comm.is_main_process():
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
def create_dir_on_local_main_process(dir):
if get_local_rank() == 0 and dir:
PathManager.mkdirs(dir)
def create_dir_on_global_main_process(dir):
if comm.get_rank() == 0 and dir:
PathManager.mkdirs(dir)
def initialize_runner(runner, cfg):
runner = runner or GeneralizedRCNNRunner()
runner._initialize(cfg)
return runner
def caffe2_global_init(logging_print_net_summary=0, num_threads=None):
if num_threads is None:
if get_num_processes_per_machine() > 1:
# by default use single thread when DDP with multiple processes
num_threads = 1
else:
# GlobalInit will clean PyTorch's num_threads and set it to 1,
# thus keep PyTorch's default value to make it truly default.
num_threads = torch.get_num_threads()
if not get_local_rank() == 0:
logging_print_net_summary = 0 # only enable for local main process
from caffe2.python import workspace
workspace.GlobalInit(
[
"caffe2",
"--caffe2_log_level=2",
"--caffe2_logging_print_net_summary={}".format(logging_print_net_summary),
"--caffe2_omp_num_threads={}".format(num_threads),
"--caffe2_mkl_num_threads={}".format(num_threads),
]
)
logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads()))
def post_mortem_if_fail_for_main(main_func):
def new_main_func(cfg, output_dir, *args, **kwargs):
pdb_ = (
MultiprocessingPdb(FolderLock(output_dir))
if comm.get_world_size() > 1
else None # fallback to use normal pdb for single process
)
return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs)
return PicklableWrapper(new_main_func)
| 31.544343 | 88 | 0.654678 |
import argparse
import logging
import os
import time
import detectron2.utils.comm as comm
import torch
from d2go.config import (
CfgNode as CN,
auto_scale_world_size,
reroute_config_path,
temp_defrost,
)
from d2go.distributed import get_local_rank, get_num_processes_per_machine
from d2go.runner import GeneralizedRCNNRunner, create_runner
from d2go.utils.launch_environment import get_launch_environment
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from detectron2.utils.serialize import PicklableWrapper
from d2go.utils.helper import run_once
from detectron2.utils.file_io import PathManager
from mobile_cv.common.misc.py import FolderLock, MultiprocessingPdb, post_mortem_if_fail
logger = logging.getLogger(__name__)
def basic_argument_parser(
distributed=True,
requires_config_file=True,
requires_output_dir=True,
):
parser = argparse.ArgumentParser(description="PyTorch Object Detection Training")
parser.add_argument(
"--runner",
type=str,
default="d2go.runner.GeneralizedRCNNRunner",
help="Full class name, i.e. (package.)module.class",
)
parser.add_argument(
"--config-file",
help="path to config file",
default="",
required=requires_config_file,
metavar="FILE",
)
parser.add_argument(
"--output-dir",
help="When given, this will override the OUTPUT_DIR in the config-file",
required=requires_output_dir,
default=None,
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
if distributed:
parser.add_argument(
"--num-processes", type=int, default=1, help="number of gpus per machine"
)
parser.add_argument("--num-machines", type=int, default=1)
parser.add_argument(
"--machine-rank",
type=int,
default=0,
help="the rank of this machine (unique per machine)",
)
parser.add_argument(
"--dist-url", default="file:///tmp/d2go_dist_file_{}".format(time.time())
)
parser.add_argument("--dist-backend", type=str, default="NCCL")
if not requires_config_file:
parser.add_argument(
"--datasets",
type=str,
nargs="+",
required=True,
help="cfg.DATASETS.TEST",
)
parser.add_argument(
"--min_size",
type=int,
required=True,
help="cfg.INPUT.MIN_SIZE_TEST",
)
parser.add_argument(
"--max_size",
type=int,
required=True,
help="cfg.INPUT.MAX_SIZE_TEST",
)
return parser
return parser
def create_cfg_from_cli_args(args, default_cfg):
_C = CN()
_C.INPUT = default_cfg.INPUT
_C.DATASETS = default_cfg.DATASETS
_C.DATALOADER = default_cfg.DATALOADER
_C.TEST = default_cfg.TEST
if hasattr(default_cfg, "D2GO_DATA"):
_C.D2GO_DATA = default_cfg.D2GO_DATA
if hasattr(default_cfg, "TENSORBOARD"):
_C.TENSORBOARD = default_cfg.TENSORBOARD
_C.MODEL = CN()
_C.MODEL.META_ARCHITECTURE = default_cfg.MODEL.META_ARCHITECTURE
_C.MODEL.MASK_ON = default_cfg.MODEL.MASK_ON
_C.MODEL.KEYPOINT_ON = default_cfg.MODEL.KEYPOINT_ON
_C.MODEL.LOAD_PROPOSALS = default_cfg.MODEL.LOAD_PROPOSALS
assert _C.MODEL.LOAD_PROPOSALS is False, "caffe2 model doesn't support"
_C.OUTPUT_DIR = args.output_dir
configurable_cfg = [
"DATASETS.TEST",
args.datasets,
"INPUT.MIN_SIZE_TEST",
args.min_size,
"INPUT.MAX_SIZE_TEST",
args.max_size,
]
cfg = _C.clone()
cfg.merge_from_list(configurable_cfg)
cfg.merge_from_list(args.opts)
return cfg
def prepare_for_launch(args):
print(args)
runner = create_runner(args.runner)
cfg = runner.get_default_cfg()
if args.config_file:
with PathManager.open(reroute_config_path(args.config_file), "r") as f:
print("Loaded config file {}:\n{}".format(args.config_file, f.read()))
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
else:
cfg = create_cfg_from_cli_args(args, default_cfg=cfg)
cfg.freeze()
assert args.output_dir or args.config_file
output_dir = args.output_dir or cfg.OUTPUT_DIR
return cfg, output_dir, runner
def setup_after_launch(cfg, output_dir, runner):
create_dir_on_global_main_process(output_dir)
comm.synchronize()
setup_loggers(output_dir)
cfg.freeze()
if cfg.OUTPUT_DIR != output_dir:
with temp_defrost(cfg):
logger.warning(
"Override cfg.OUTPUT_DIR ({}) to be the same as output_dir {}".format(
cfg.OUTPUT_DIR, output_dir
)
)
cfg.OUTPUT_DIR = output_dir
logger.info("Initializing runner ...")
runner = initialize_runner(runner, cfg)
log_info(cfg, runner)
dump_cfg(cfg, os.path.join(output_dir, "config.yaml"))
auto_scale_world_size(cfg, new_world_size=comm.get_world_size())
@run_once()
def setup_loggers(output_dir, color=None):
if not color:
color = get_launch_environment() == "local"
d2_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="detectron2",
abbrev_name="d2",
)
fvcore_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="fvcore",
)
d2go_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="d2go",
abbrev_name="d2go",
)
mobile_cv_logger = setup_logger(
output_dir,
distributed_rank=comm.get_rank(),
color=color,
name="mobile_cv",
abbrev_name="mobile_cv",
)
# NOTE: all above loggers have FileHandler pointing to the same file as d2_logger.
# Those files are opened upon creation, but it seems fine in 'a' mode.
# NOTE: the root logger might has been configured by other applications,
# since this already sub-top level, just don't propagate to root.
d2_logger.propagate = False
fvcore_logger.propagate = False
d2go_logger.propagate = False
mobile_cv_logger.propagate = False
def log_info(cfg, runner):
num_processes = get_num_processes_per_machine()
logger.info(
"Using {} processes per machine. Rank of current process: {}".format(
num_processes, comm.get_rank()
)
)
logger.info("Environment info:\n" + collect_env_info())
logger.info("Running with full config:\n{}".format(cfg))
logger.info("Running with runner: {}".format(runner))
def dump_cfg(cfg, path):
if comm.is_main_process():
with PathManager.open(path, "w") as f:
f.write(cfg.dump())
logger.info("Full config saved to {}".format(path))
def create_dir_on_local_main_process(dir):
if get_local_rank() == 0 and dir:
PathManager.mkdirs(dir)
def create_dir_on_global_main_process(dir):
if comm.get_rank() == 0 and dir:
PathManager.mkdirs(dir)
def initialize_runner(runner, cfg):
runner = runner or GeneralizedRCNNRunner()
runner._initialize(cfg)
return runner
def caffe2_global_init(logging_print_net_summary=0, num_threads=None):
if num_threads is None:
if get_num_processes_per_machine() > 1:
num_threads = 1
else:
# thus keep PyTorch's default value to make it truly default.
num_threads = torch.get_num_threads()
if not get_local_rank() == 0:
logging_print_net_summary = 0
from caffe2.python import workspace
workspace.GlobalInit(
[
"caffe2",
"--caffe2_log_level=2",
"--caffe2_logging_print_net_summary={}".format(logging_print_net_summary),
"--caffe2_omp_num_threads={}".format(num_threads),
"--caffe2_mkl_num_threads={}".format(num_threads),
]
)
logger.info("Using {} threads after GlobalInit".format(torch.get_num_threads()))
def post_mortem_if_fail_for_main(main_func):
def new_main_func(cfg, output_dir, *args, **kwargs):
pdb_ = (
MultiprocessingPdb(FolderLock(output_dir))
if comm.get_world_size() > 1
else None
)
return post_mortem_if_fail(pdb_)(main_func)(cfg, output_dir, *args, **kwargs)
return PicklableWrapper(new_main_func)
| true | true |
7902045a8bcac920a9e2f9d298a662706cdcfa87 | 7,289 | py | Python | train.py | petersvenningsson/radar-Bayesian-human-motion | 728db0f39c107faccf9d711670177aac74456e3f | [
"MIT"
] | 1 | 2022-02-01T20:42:24.000Z | 2022-02-01T20:42:24.000Z | train.py | petersvenningsson/radar-Bayesian-human-motion | 728db0f39c107faccf9d711670177aac74456e3f | [
"MIT"
] | null | null | null | train.py | petersvenningsson/radar-Bayesian-human-motion | 728db0f39c107faccf9d711670177aac74456e3f | [
"MIT"
] | null | null | null | import argparse
import numpy as np
from sklearn.metrics import accuracy_score, jaccard_score, balanced_accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import dataloader
import track
from classifiers import ObservationsConditionsClassifier
from classifiers import ClassifierComposition
np.seterr(all='ignore')
class_set = 9
n_pca_components = 20
def train():
global parsed_args
test_sequence = 'Mix'
measurement_costs = [0.1*i for i in range(0,15)]
measurement_costs.extend([0.01*i for i in range(1, 15)])
loader = dataloader.DataLoaderSpectrogram()
features = [f'PC_{i}' for i in range(n_pca_components)]
classifiers = [
(ObservationsConditionsClassifier(features, discriminant_model='calibrated_Gaussian', n_angle_bins=8), 'Conditioned on $\phi$', 'Isotonic calibration'),
(ObservationsConditionsClassifier(features, discriminant_model='Gaussian', n_angle_bins=8), 'Conditioned on $\phi$','Uncalibrated'),
(ClassifierComposition(features, discriminant_model='Gaussian'), 'Not conditioned on $\phi$', 'Uncalibrated'),
(ClassifierComposition(features, discriminant_model='calibrated_Gaussian'), 'Not conditioned on $\phi$', 'Isotonic calibration'),
]
rows = []
for cost in measurement_costs:
for i_model, (classifier, observation_condition, discriminant_model) in enumerate(classifiers):
if parsed_args.rebuild:
track.state_estimation(load_directory = './data/dataset/RD')
dataset_path = r'C:\Users\peter\Documents\pulseON'
loader = dataloader.DataLoaderSpectrogram()
loader.build(dataset_path,'PCA')
else:
loader.load('./data/dataset_df')
result_df = evaluate_classifier(classifier, loader.df, test_persons = loader.df.person.unique(), test_sequence = test_sequence, measurement_cost = cost)
predictions = result_df.loc[result_df['sequence_type'] == test_sequence]['prediction'].to_numpy()
lables = result_df.loc[result_df['sequence_type'] == test_sequence]['lable'].to_numpy()
accuracy = accuracy_score(lables, predictions)
rows.append({
'Accuracy': accuracy, 'Balanced accuracy': balanced_accuracy_score(lables, predictions), 'Macro-averaged Jaccard index': jaccard_score(lables, predictions, average='macro'),
'Observation conditions': observation_condition, 'Calibration': discriminant_model, 'Cost': cost,
'result_df': result_df, 'model_index': i_model,
})
sns.lineplot(data = pd.DataFrame(rows), x = 'Cost', y = 'Accuracy', style = 'Observation conditions', hue = 'Calibration')
plt.tight_layout()
plt.show()
def evaluate_classifier(model, df, test_persons, measurement_cost, test_sequence = 'Mix', prior = [1/class_set for i in range(class_set)], render_seq=False):
df['prediction'] = -6666
for test_person in test_persons:
training_df = df.loc[df['person'] != test_person]
test_df = df.loc[(df['person'] == test_person) & (df['sequence_type'] == test_sequence)].copy()
transition_matrix = estimate_transition_matrix(
training_df.loc[training_df['sequence_type'] == 'Mix']
)
model.fit(training_df)
for j, file in enumerate(test_df.file_index.unique()):
print(f'File {j}/{len(test_df.file_index.unique())}')
seq_df = test_df.loc[test_df['file_index'] == file].copy()
seq_df = predict_sequence(model, seq_df, transition_matrix, measurement_cost)
if render_seq:
render.render_classification_sequence(seq_df)
df.loc[seq_df.index, 'belief'] = seq_df['belief']
df.loc[seq_df.index, 'prediction'] = seq_df['prediction']
df.loc[seq_df.index, 'Selected'] = seq_df['Selected']
return df
def predict_sequence(model, df, transition_matrix, measurement_cost, prior=[1/class_set for _ in range(class_set)]):
belief = np.reshape(prior, (class_set, 1))
for time in np.sort(df.time.unique()):
df_step = df[df['time'] == time].copy()
if measurement_cost:
selected_sensors = information_selection(df_step, model, belief, measurement_cost)
else:
selected_sensors = df_step.index
df.loc[selected_sensors, 'Selected'] = True
for i, row in df_step.loc[selected_sensors].iterrows():
row = row.to_frame().transpose()
prop_likelihood = model.predict_proba(row)
posterior = prop_likelihood[0, :, np.newaxis] * belief
posterior = posterior/(posterior.sum())
belief = posterior
# save prediction
df['belief'] = np.nan
df['belief'] = df['belief'].astype(object)
for index in df_step.index:
df.loc[index, 'belief'] = [belief]
df.loc[index ,'prediction'] = belief.argmax() + 1
# Transition step
belief = transition_matrix @ np.reshape(belief, (class_set,1))
return df
def information_selection(df, model, belief, measurement_cost):
# Calculate information and sort indices by information
df['information'] = df.apply(lambda row: model.information(belief, [row['predicted_angle']]), axis=1)
potential_sensors = df.sort_values('information').index.to_list()
selected_sensors = []
sensor_utility = {0:[]}
while potential_sensors:
selected_sensors.append(potential_sensors.pop())
information = model.information(belief, sensors=df.loc[selected_sensors]['predicted_angle'].to_list())
utility = information - measurement_cost*len(selected_sensors)
sensor_utility[utility] = selected_sensors[:]
return sensor_utility[np.max(list(sensor_utility.keys()))]
def estimate_transition_matrix(df):
transition_count = np.zeros((class_set,class_set))
df = df.loc[df['radar'] == df.radar.unique()[0]]
sequences = df['file_index'].unique()
for sequence_index in sequences:
df_seq = df.loc[df['file_index'] == sequence_index].sort_values('time').reset_index(drop=True)
previous_state = None
for i, row in df_seq.iterrows():
state = row['lable']
if not previous_state:
previous_state = state
continue
transition_count[state - 1, previous_state - 1] += 1
previous_state = state
transition_matrix = transition_count/transition_count.sum(axis=0,keepdims=1)
transition_matrix = transition_matrix/transition_matrix.sum(axis=0,keepdims=1)
return transition_matrix
def load_options():
global parsed_args
parser = argparse.ArgumentParser(description='Entry point to fit and evaluate\
a Bayesian model of human motion',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--rebuild', dest='rebuild', action='store_true')
parser.add_argument('--no-rebuild', dest='rebuild', action='store_false')
parser.set_defaults(rebuild=False)
parsed_args = parser.parse_args()
if __name__ == '__main__':
load_options()
train() | 40.949438 | 189 | 0.664563 | import argparse
import numpy as np
from sklearn.metrics import accuracy_score, jaccard_score, balanced_accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import dataloader
import track
from classifiers import ObservationsConditionsClassifier
from classifiers import ClassifierComposition
np.seterr(all='ignore')
class_set = 9
n_pca_components = 20
def train():
global parsed_args
test_sequence = 'Mix'
measurement_costs = [0.1*i for i in range(0,15)]
measurement_costs.extend([0.01*i for i in range(1, 15)])
loader = dataloader.DataLoaderSpectrogram()
features = [f'PC_{i}' for i in range(n_pca_components)]
classifiers = [
(ObservationsConditionsClassifier(features, discriminant_model='calibrated_Gaussian', n_angle_bins=8), 'Conditioned on $\phi$', 'Isotonic calibration'),
(ObservationsConditionsClassifier(features, discriminant_model='Gaussian', n_angle_bins=8), 'Conditioned on $\phi$','Uncalibrated'),
(ClassifierComposition(features, discriminant_model='Gaussian'), 'Not conditioned on $\phi$', 'Uncalibrated'),
(ClassifierComposition(features, discriminant_model='calibrated_Gaussian'), 'Not conditioned on $\phi$', 'Isotonic calibration'),
]
rows = []
for cost in measurement_costs:
for i_model, (classifier, observation_condition, discriminant_model) in enumerate(classifiers):
if parsed_args.rebuild:
track.state_estimation(load_directory = './data/dataset/RD')
dataset_path = r'C:\Users\peter\Documents\pulseON'
loader = dataloader.DataLoaderSpectrogram()
loader.build(dataset_path,'PCA')
else:
loader.load('./data/dataset_df')
result_df = evaluate_classifier(classifier, loader.df, test_persons = loader.df.person.unique(), test_sequence = test_sequence, measurement_cost = cost)
predictions = result_df.loc[result_df['sequence_type'] == test_sequence]['prediction'].to_numpy()
lables = result_df.loc[result_df['sequence_type'] == test_sequence]['lable'].to_numpy()
accuracy = accuracy_score(lables, predictions)
rows.append({
'Accuracy': accuracy, 'Balanced accuracy': balanced_accuracy_score(lables, predictions), 'Macro-averaged Jaccard index': jaccard_score(lables, predictions, average='macro'),
'Observation conditions': observation_condition, 'Calibration': discriminant_model, 'Cost': cost,
'result_df': result_df, 'model_index': i_model,
})
sns.lineplot(data = pd.DataFrame(rows), x = 'Cost', y = 'Accuracy', style = 'Observation conditions', hue = 'Calibration')
plt.tight_layout()
plt.show()
def evaluate_classifier(model, df, test_persons, measurement_cost, test_sequence = 'Mix', prior = [1/class_set for i in range(class_set)], render_seq=False):
df['prediction'] = -6666
for test_person in test_persons:
training_df = df.loc[df['person'] != test_person]
test_df = df.loc[(df['person'] == test_person) & (df['sequence_type'] == test_sequence)].copy()
transition_matrix = estimate_transition_matrix(
training_df.loc[training_df['sequence_type'] == 'Mix']
)
model.fit(training_df)
for j, file in enumerate(test_df.file_index.unique()):
print(f'File {j}/{len(test_df.file_index.unique())}')
seq_df = test_df.loc[test_df['file_index'] == file].copy()
seq_df = predict_sequence(model, seq_df, transition_matrix, measurement_cost)
if render_seq:
render.render_classification_sequence(seq_df)
df.loc[seq_df.index, 'belief'] = seq_df['belief']
df.loc[seq_df.index, 'prediction'] = seq_df['prediction']
df.loc[seq_df.index, 'Selected'] = seq_df['Selected']
return df
def predict_sequence(model, df, transition_matrix, measurement_cost, prior=[1/class_set for _ in range(class_set)]):
belief = np.reshape(prior, (class_set, 1))
for time in np.sort(df.time.unique()):
df_step = df[df['time'] == time].copy()
if measurement_cost:
selected_sensors = information_selection(df_step, model, belief, measurement_cost)
else:
selected_sensors = df_step.index
df.loc[selected_sensors, 'Selected'] = True
for i, row in df_step.loc[selected_sensors].iterrows():
row = row.to_frame().transpose()
prop_likelihood = model.predict_proba(row)
posterior = prop_likelihood[0, :, np.newaxis] * belief
posterior = posterior/(posterior.sum())
belief = posterior
df['belief'] = np.nan
df['belief'] = df['belief'].astype(object)
for index in df_step.index:
df.loc[index, 'belief'] = [belief]
df.loc[index ,'prediction'] = belief.argmax() + 1
belief = transition_matrix @ np.reshape(belief, (class_set,1))
return df
def information_selection(df, model, belief, measurement_cost):
df['information'] = df.apply(lambda row: model.information(belief, [row['predicted_angle']]), axis=1)
potential_sensors = df.sort_values('information').index.to_list()
selected_sensors = []
sensor_utility = {0:[]}
while potential_sensors:
selected_sensors.append(potential_sensors.pop())
information = model.information(belief, sensors=df.loc[selected_sensors]['predicted_angle'].to_list())
utility = information - measurement_cost*len(selected_sensors)
sensor_utility[utility] = selected_sensors[:]
return sensor_utility[np.max(list(sensor_utility.keys()))]
def estimate_transition_matrix(df):
transition_count = np.zeros((class_set,class_set))
df = df.loc[df['radar'] == df.radar.unique()[0]]
sequences = df['file_index'].unique()
for sequence_index in sequences:
df_seq = df.loc[df['file_index'] == sequence_index].sort_values('time').reset_index(drop=True)
previous_state = None
for i, row in df_seq.iterrows():
state = row['lable']
if not previous_state:
previous_state = state
continue
transition_count[state - 1, previous_state - 1] += 1
previous_state = state
transition_matrix = transition_count/transition_count.sum(axis=0,keepdims=1)
transition_matrix = transition_matrix/transition_matrix.sum(axis=0,keepdims=1)
return transition_matrix
def load_options():
global parsed_args
parser = argparse.ArgumentParser(description='Entry point to fit and evaluate\
a Bayesian model of human motion',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--rebuild', dest='rebuild', action='store_true')
parser.add_argument('--no-rebuild', dest='rebuild', action='store_false')
parser.set_defaults(rebuild=False)
parsed_args = parser.parse_args()
if __name__ == '__main__':
load_options()
train() | true | true |
790204c38aa70d72bd8331ea5ca48df4c3df59c3 | 1,214 | py | Python | apps/user/admin.py | guoxianru/newcoder | be3354c8be7c0202c0c587633a9d766fd95fb682 | [
"Apache-2.0"
] | 5 | 2019-03-19T06:41:54.000Z | 2020-04-07T17:16:11.000Z | apps/user/admin.py | guoxianru/newcoder2.0 | 9e46156fa68d98d3c23762227a5ee1d84db3f322 | [
"Apache-2.0"
] | 3 | 2020-06-05T20:41:45.000Z | 2021-06-11T02:45:32.000Z | apps/user/admin.py | guoxianru/newcoder | be3354c8be7c0202c0c587633a9d766fd95fb682 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from user.models import *
# 此处设置页面头部标题
admin.site.site_title = '新码农站点后台'
# 此处设置页面显示标题
admin.site.site_header = '新码农后台管理系统'
@admin.register(User)
class Useradmin(admin.ModelAdmin):
list_display = ['id', 'username', 'password', 'nickname', 'birthday', 'gender', 'photo', 'phone', 'email', 'desc',
'addtime']
# list_display_links 设置其他字段也可以点击链接进入编辑界面
list_display_links = ['id', 'username']
list_per_page = 50
list_filter = ['gender', 'birthday']
search_fields = ['username', 'nickname', 'phone']
# list_editable 设置默认可编辑字段
list_editable = ['nickname', 'birthday', 'gender', 'phone', 'email', 'desc']
ordering = ['-addtime']
# date_hierarchy 详细时间分层筛选
date_hierarchy = 'addtime'
@admin.register(Leavemsg)
class Leavemsgadmin(admin.ModelAdmin):
list_display = ['id', 'content', 'user', 'addtime']
# list_display_links 设置其他字段也可以点击链接进入编辑界面
list_display_links = ['id', 'user']
list_per_page = 50
list_filter = ['user']
search_fields = ['user']
# list_editable 设置默认可编辑字段
list_editable = ['content']
ordering = ['-addtime']
# date_hierarchy 详细时间分层筛选
date_hierarchy = 'addtime'
| 30.35 | 118 | 0.666392 | from django.contrib import admin
from user.models import *
admin.site.site_title = '新码农站点后台'
admin.site.site_header = '新码农后台管理系统'
@admin.register(User)
class Useradmin(admin.ModelAdmin):
list_display = ['id', 'username', 'password', 'nickname', 'birthday', 'gender', 'photo', 'phone', 'email', 'desc',
'addtime']
list_display_links = ['id', 'username']
list_per_page = 50
list_filter = ['gender', 'birthday']
search_fields = ['username', 'nickname', 'phone']
list_editable = ['nickname', 'birthday', 'gender', 'phone', 'email', 'desc']
ordering = ['-addtime']
date_hierarchy = 'addtime'
@admin.register(Leavemsg)
class Leavemsgadmin(admin.ModelAdmin):
list_display = ['id', 'content', 'user', 'addtime']
list_display_links = ['id', 'user']
list_per_page = 50
list_filter = ['user']
search_fields = ['user']
list_editable = ['content']
ordering = ['-addtime']
date_hierarchy = 'addtime'
| true | true |
790204ded8ffd476a486cba293fe42182d914606 | 199,114 | py | Python | pycdf/__init__.py | cpiker/condaCDF | 58f0c15fa387798f49c0372cc33d3639d997112d | [
"Unlicense"
] | null | null | null | pycdf/__init__.py | cpiker/condaCDF | 58f0c15fa387798f49c0372cc33d3639d997112d | [
"Unlicense"
] | 2 | 2020-04-07T21:20:11.000Z | 2021-03-26T16:29:19.000Z | pycdf/__init__.py | das-developers/condaCDF | 58f0c15fa387798f49c0372cc33d3639d997112d | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
das developers note:
This a is modification of the original SpacePy pycdf package. All
refereneces to the greater spacepy package have been removed to create
a small standalone module.
--cwp 2018-10-18
The libcdf.so location code has been changed to find the version installed
in anaconda.
--cwp 2020-04-06
This package provides a Python interface to the Common Data Format (CDF)
library used for many NASA missions, available at http://cdf.gsfc.nasa.gov/.
It is targeted at Python 2.6+ and should work without change on either
Python 2 or Python 3.
The interface is intended to be 'pythonic' rather than reproducing the
C interface. To open or close a CDF and access its variables, see the :class:`CDF`
class. Accessing data within the variables is via the :class:`Var`
class. The :data:`lib` object provides access to some routines
that affect the functionality of the library in general. The
:mod:`~pycdf.const` module contains constants useful for accessing
the underlying library.
Authors: Jon Niehof
Institution: University of New Hampshire
Contact: Jonathan.Niehof@unh.edu
Copyright 2010-2015 Los Alamos National Security, LLC.
"""
__contact__ = 'Jon Niehof, Jonathan.Niehof@unh.edu'
try:
from collections.abc import MutableMapping, MutableSequence
except ImportError:
from collections import MutableMapping, MutableSequence
import ctypes
import ctypes.util
import datetime
import operator
import os
import os.path
import shutil
import sys
import tempfile
import warnings
import weakref
import numpy
import numpy.ma
#Import const AFTER library loaded, so failed load doesn't leave half-imported
#from . import const
try:
str_classes = (str, bytes, unicode)
except NameError:
str_classes = (str, bytes)
class Library(object):
"""
Abstraction of the base CDF C library and its state.
Not normally intended for end-user use. An instance of this class
is created at package load time as the :data:`~pycdf.lib` variable, providing
access to the underlying C library if necessary. The CDF library itself
is described in section 2.1 of the CDF user's guide, as well as the CDF
C reference manual.
Calling the C library directly requires knowledge of
:mod:`ctypes`.
Instantiating this object loads the C library, see :doc:`/pycdf` docs
for details.
.. autosummary::
~Library.call
~Library.check_status
~Library.datetime_to_epoch
~Library.datetime_to_epoch16
~Library.datetime_to_tt2000
~Library.epoch_to_datetime
~Library.epoch_to_epoch16
~Library.epoch_to_num
~Library.epoch_to_tt2000
~Library.epoch16_to_datetime
~Library.epoch16_to_epoch
~Library.epoch16_to_tt2000
~Library.set_backward
supports_int8
~Library.tt2000_to_datetime
~Library.tt2000_to_epoch
~Library.tt2000_to_epoch16
v_datetime_to_epoch
v_datetime_to_epoch16
v_datetime_to_tt2000
v_epoch_to_datetime
v_epoch_to_tt2000
v_epoch16_to_datetime
v_epoch16_to_tt2000
v_tt2000_to_datetime
v_tt2000_to_epoch
v_tt2000_to_epoch16
libpath
version
.. automethod:: call
.. automethod:: check_status
.. automethod:: datetime_to_epoch
.. automethod:: datetime_to_epoch16
.. automethod:: datetime_to_tt2000
.. automethod:: epoch_to_datetime
.. automethod:: epoch_to_epoch16
.. automethod:: epoch_to_num
.. automethod:: epoch_to_tt2000
.. automethod:: epoch16_to_datetime
.. automethod:: epoch16_to_epoch
.. automethod:: epoch16_to_tt2000
.. automethod:: set_backward
.. attribute:: supports_int8
True if this library supports INT8 and TIME_TT2000 types; else False.
.. automethod:: tt2000_to_datetime
.. automethod:: tt2000_to_epoch
.. automethod:: tt2000_to_epoch16
.. method:: v_datetime_to_epoch(datetime)
A vectorized version of :meth:`datetime_to_epoch` which takes a
numpy array of datetimes as input and returns an array of epochs.
.. method:: v_datetime_to_epoch16(datetime)
A vectorized version of :meth:`datetime_to_epoch16` which takes a
numpy array of datetimes as input and returns an array of epoch16.
.. method:: v_datetime_to_tt2000(datetime)
A vectorized version of :meth:`datetime_to_tt2000` which takes a
numpy array of datetimes as input and returns an array of TT2000.
.. method:: v_epoch_to_datetime(epoch)
A vectorized version of :meth:`epoch_to_datetime` which takes a
numpy array of epochs as input and returns an array of datetimes.
.. method:: v_epoch_to_tt2000(epoch)
A vectorized version of :meth:`epoch_to_tt2000` which takes a
numpy array of epochs as input and returns an array of tt2000s.
.. method:: v_epoch16_to_datetime(epoch0, epoch1)
A vectorized version of :meth:`epoch16_to_datetime` which takes
a numpy array of epoch16 as input and returns an array of datetimes.
An epoch16 is a pair of doubles; the input array's last dimension
must be two (and the returned array will have one fewer dimension).
.. method:: v_epoch16_to_tt2000(epoch16)
A vectorized version of :meth:`epoch16_to_tt2000` which takes
a numpy array of epoch16 as input and returns an array of tt2000s.
An epoch16 is a pair of doubles; the input array's last dimension
must be two (and the returned array will have one fewer dimension).
.. method:: v_tt2000_to_datetime(tt2000)
A vectorized version of :meth:`tt2000_to_datetime` which takes
a numpy array of tt2000 as input and returns an array of datetimes.
.. method:: v_tt2000_to_epoch(tt2000)
A vectorized version of :meth:`tt2000_to_epoch` which takes
a numpy array of tt2000 as input and returns an array of epochs.
.. method:: v_tt2000_to_epoch16(tt2000)
A vectorized version of :meth:`tt2000_to_epoch16` which takes
a numpy array of tt2000 as input and returns an array of epoch16.
.. attribute:: libpath
The path where pycdf found the CDF C library, potentially useful in
debugging. If this contains just the name of a file (with no path
information), then the system linker found the library for pycdf.
On Linux, ``ldconfig -p`` may be useful for displaying the system's
library resolution.
.. attribute:: version
Version of the CDF library, (version, release, increment, subincrement)
"""
def __init__(self, libpath=None, library=None):
"""Load the CDF C library.
Searches for the library in the order:
1. Appropriately-named file in CDF_LIB
2. Appropriately-named file in CDF_BASE
3. Standard library search path
@raise CDFError: BAD_DATA_TYPE if can't map types properly
"""
if not 'CDF_TMP' in os.environ:
os.environ['CDF_TMP'] = tempfile.gettempdir()
if not library:
if not libpath:
self.libpath, self._library = self._find_lib()
if self._library is None:
raise Exception((
'Cannot load CDF C library; checked {0}. '
'Try \'os.environ["CDF_LIB"] = library_directory\' '
'before import.').format(', '.join(self.libpath)))
else:
self._library = ctypes.CDLL(libpath)
self.libpath = libpath
else:
self._library = library
self.libpath = libpath
self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here
self._library.EPOCHbreakdown.restype = ctypes.c_long
self._library.computeEPOCH.restype = ctypes.c_double
self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7
self._library.computeEPOCH16.restype = ctypes.c_double
self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \
[ctypes.POINTER(ctypes.c_double * 2)]
if hasattr(self._library, 'CDFsetFileBackward'):
self._library.CDFsetFileBackward.restype = None
self._library.CDFsetFileBackward.argtypes = [ctypes.c_long]
#Map old name to the 3.7.1+ name
if not hasattr(self._library, 'computeTT2000') \
and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'):
self._library.computeTT2000 \
= self._library.CDF_TT2000_from_UTC_parts
if hasattr(self._library, 'computeTT2000'):
self._library.computeTT2000.restype = ctypes.c_longlong
self._library.computeTT2000.argtypes = \
[ctypes.c_double] *9
#Map old name to the 3.7.1+ name
if not hasattr(self._library, 'breakdownTT2000') \
and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'):
self._library.breakdownTT2000 \
= self._library.CDF_TT2000_to_UTC_parts
if hasattr(self._library, 'breakdownTT2000'):
self._library.breakdownTT2000.restype = None
self._library.breakdownTT2000.argtypes = \
[ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9
if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'):
self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double
self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong]
if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'):
self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong
self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double]
if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'):
self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double
self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \
[ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)]
if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'):
self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \
ctypes.c_longlong
self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \
[ctypes.POINTER(ctypes.c_double * 2)]
#Get CDF version information
ver = ctypes.c_long(0)
rel = ctypes.c_long(0)
inc = ctypes.c_long(0)
sub = ctypes.c_char(b' ')
self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver),
const.GET_, const.LIB_RELEASE_, ctypes.byref(rel),
const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc),
const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub))
ver = ver.value
rel = rel.value
inc = inc.value
sub = sub.value
self.version = (ver, rel, inc, sub)
self._del_middle_rec_bug = ver < 3 or (ver == 3 and
(rel < 4 or
(rel == 4 and inc < 1)))
self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4))
self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE',
const.CDF_CHAR.value: 'CDF_CHAR',
const.CDF_INT1.value: 'CDF_INT1',
const.CDF_UCHAR.value: 'CDF_UCHAR',
const.CDF_UINT1.value: 'CDF_UINT1',
const.CDF_INT2.value: 'CDF_INT2',
const.CDF_UINT2.value: 'CDF_UINT2',
const.CDF_INT4.value: 'CDF_INT4',
const.CDF_UINT4.value: 'CDF_UINT4',
const.CDF_INT8.value: 'CDF_INT8',
const.CDF_FLOAT.value: 'CDF_FLOAT',
const.CDF_REAL4.value: 'CDF_REAL4',
const.CDF_DOUBLE.value: 'CDF_DOUBLE',
const.CDF_REAL8.value: 'CDF_REAL8',
const.CDF_EPOCH.value: 'CDF_EPOCH',
const.CDF_EPOCH16.value: 'CDF_EPOCH16',
const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000',
}
self.numpytypedict = {const.CDF_BYTE.value: numpy.int8,
const.CDF_CHAR.value: numpy.int8,
const.CDF_INT1.value: numpy.int8,
const.CDF_UCHAR.value: numpy.uint8,
const.CDF_UINT1.value: numpy.uint8,
const.CDF_INT2.value: numpy.int16,
const.CDF_UINT2.value: numpy.uint16,
const.CDF_INT4.value: numpy.int32,
const.CDF_UINT4.value: numpy.uint32,
const.CDF_INT8.value: numpy.int64,
const.CDF_FLOAT.value: numpy.float32,
const.CDF_REAL4.value: numpy.float32,
const.CDF_DOUBLE.value: numpy.float64,
const.CDF_REAL8.value: numpy.float64,
const.CDF_EPOCH.value: numpy.float64,
const.CDF_EPOCH16.value:
numpy.dtype((numpy.float64, 2)),
const.CDF_TIME_TT2000.value: numpy.int64,
}
self.timetypes = [const.CDF_EPOCH.value,
const.CDF_EPOCH16.value,
const.CDF_TIME_TT2000.value]
if not self.supports_int8:
del self.cdftypenames[const.CDF_INT8.value]
del self.numpytypedict[const.CDF_INT8.value]
del self.cdftypenames[const.CDF_TIME_TT2000.value]
del self.numpytypedict[const.CDF_TIME_TT2000.value]
elif sys.platform.startswith('linux') \
and os.uname()[4].startswith('arm') \
and hasattr(self._library, 'computeTT2000') \
and self._library.computeTT2000(
2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000:
#TT2000 call failed, so probably need to type-pun
#double arguments to variadic functions.
#Calling convention for non-variadic functions with floats
#is unique, but convention for ints is same as variadic.
#So type-pun arguments to integers to force that calling
#convention.
if ctypes.sizeof(ctypes.c_longlong) != \
ctypes.sizeof(ctypes.c_double):
warnings.warn('ARM with unknown type sizes; '
'TT2000 functions will not work.')
else:
self._library.computeTT2000.argtypes = \
[ctypes.c_longlong] * 9
c_ll_p = ctypes.POINTER(ctypes.c_longlong)
if self._library.computeTT2000(
ctypes.cast(ctypes.pointer(ctypes.c_double(
2010)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
1)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
1)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents) != 315576066184000000:
warnings.warn('ARM with unknown calling convention; '
'TT2000 functions will not work.')
self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned
v_epoch16_to_datetime = numpy.frompyfunc(
self.epoch16_to_datetime, 2, 1)
self.v_epoch16_to_datetime = \
lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1])
self.v_epoch_to_datetime = numpy.frompyfunc(
self.epoch_to_datetime, 1, 1)
self.v_tt2000_to_datetime = numpy.frompyfunc(
self.tt2000_to_datetime, 1, 1)
self.v_datetime_to_epoch = numpy.vectorize(
self.datetime_to_epoch, otypes=[numpy.float64])
v_datetime_to_epoch16 = numpy.frompyfunc(
self.datetime_to_epoch16, 1, 2)
#frompyfunc returns a TUPLE of the returned values,
#implicitly the 0th dimension. We want everything from one
#call paired, so this rolls the 0th dimension to the last
#(via the second-to-last)
def _v_datetime_to_epoch16(x):
retval = numpy.require(v_datetime_to_epoch16(x),
dtype=numpy.float64)
if len(retval.shape) > 1:
return numpy.rollaxis(
numpy.rollaxis(retval, 0, -1),
-1, -2)
else:
return retval
self.v_datetime_to_epoch16 = _v_datetime_to_epoch16
self.v_datetime_to_tt2000 = numpy.vectorize(
self.datetime_to_tt2000, otypes=[numpy.int64])
self.v_epoch_to_tt2000 = numpy.vectorize(
self.epoch_to_tt2000, otypes=[numpy.int64])
self.v_tt2000_to_epoch = numpy.vectorize(
self.tt2000_to_epoch, otypes=[numpy.float64])
v_epoch16_to_tt2000 = numpy.frompyfunc(
self.epoch16_to_tt2000, 2, 1)
self.v_epoch16_to_tt2000 = \
lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1])
v_tt2000_to_epoch16 = numpy.frompyfunc(
self.tt2000_to_epoch16, 1, 2)
#frompyfunc returns a TUPLE of the returned values,
#implicitly the 0th dimension. We want everything from one
#call paired, so this rolls the 0th dimension to the last
#(via the second-to-last)
def _v_tt2000_to_epoch16(x):
retval = numpy.require(v_tt2000_to_epoch16(x),
dtype=numpy.float64)
if len(retval.shape) > 1:
return numpy.rollaxis(
numpy.rollaxis(retval, 0, -1),
-1, -2)
else:
return retval
self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16
if not self.supports_int8:
self.datetime_to_tt2000 = self._bad_tt2000
self.tt2000_to_datetime = self._bad_tt2000
self.v_datetime_to_tt2000 = self._bad_tt2000
self.v_tt2000_to_datetime = self._bad_tt2000
self.epoch_to_tt2000 = self._bad_tt2000
self.v_epoch_to_tt2000 = self._bad_tt2000
self.tt2000_to_epoch = self._bad_tt2000
self.v_tt2000_to_epoch = self._bad_tt2000
self.epoch_16_to_tt2000 = self._bad_tt2000
self.v_epoch16_to_tt2000 = self._bad_tt2000
self.tt2000_to_epoch16 = self._bad_tt2000
self.v_tt2000_to_epoch16 = self._bad_tt2000
#Default to V2 CDF
self.set_backward(True)
@staticmethod
def _find_lib():
"""
Search for the CDF library
Searches in likely locations for CDF libraries and attempts to load
them. Stops at first successful load and, if fails, reports all
the files that were tried as libraries.
Returns
=======
out : tuple
This is either (path to library, loaded library)
or, in the event of failure, (None, list of libraries tried)
"""
failed = []
for libpath in Library._lib_paths():
try:
lib = ctypes.CDLL(libpath)
except:
failed.append(libpath)
else:
return libpath, lib
return (failed, None)
@staticmethod
def _lib_paths():
"""Find candidate paths for the CDF library
Does not check that the library is actually in any particular directory,
just returns a list of possible locations, in priority order.
Returns
=======
out : generator of str
paths that look like the CDF library
"""
#What the library might be named
names = { 'win32': ['cdf.dll'],
'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'],
'linux2': ['libcdf.so'],
'linux': ['libcdf.so'],
}
names = names.get(sys.platform, ['libcdf.so'])
#All existing CDF-library-like paths within a directory
search_dir = lambda x: \
[os.path.join(x, fname) for fname in names
if os.path.exists(os.path.join(x, fname))]
# Only use anaconda locations...
# Defined during builds ...
if 'PREFIX' in os.environ:
if sys.platform == 'win32':
for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')):
yield p
else:
for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')):
yield p
# defined when conda is activated ...
if 'CONDA_PREFIX' in os.environ:
if sys.platform == 'win32':
for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')):
yield p
else:
for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')):
yield p
# Special subdirectory for anaconda unix packages on windows
if 'LIBRARY_BIN' in os.environ:
for p in search_dir(os.environ['LIBRARY_BIN']):
yield p
ctypespath = ctypes.util.find_library(
'cdf.dll' if sys.platform == 'win32' else 'cdf')
if ctypespath:
yield ctypespath
def check_status(self, status, ignore=()):
"""
Raise exception or warning based on return status of CDF call
Parameters
==========
status : int
status returned by the C library
Other Parameters
================
ignore : sequence of ctypes.c_long
CDF statuses to ignore. If any of these is returned by CDF library,
any related warnings or exceptions will *not* be raised.
(Default none).
Raises
======
CDFError : if status < CDF_WARN, indicating an error
Warns
=====
CDFWarning : if CDF_WARN <= status < CDF_OK, indicating a warning.
Returns
=======
out : int
status (unchanged)
"""
if status == const.CDF_OK or status in ignore:
return status
if status < const.CDF_WARN:
raise CDFError(status)
else:
warning = CDFWarning(status)
warning.warn()
return status
def call(self, *args, **kwargs):
"""
Call the CDF internal interface
Passes all parameters directly through to the CDFlib routine of the
CDF library's C internal interface. Checks the return value with
:meth:`check_status`.
Terminal NULL is automatically added to args.
Parameters
==========
args : various, see :mod:`ctypes`
Passed directly to the CDF library interface. Useful
constants are defined in the :mod:`~pycdf.const` module.
Other Parameters
================
ignore : sequence of CDF statuses
sequence of CDF statuses to ignore. If any of these
is returned by CDF library, any related warnings or
exceptions will *not* be raised.
Returns
=======
out : int
CDF status from the library
Raises
======
CDFError : if CDF library reports an error
Warns
=====
CDFWarning : if CDF library reports a warning
"""
if 'ignore' in kwargs:
return self.check_status(self._library.CDFlib(
*(args + (const.NULL_, ))
), kwargs['ignore'])
else:
return self.check_status(self._library.CDFlib(
*(args + (const.NULL_, ))
))
def set_backward(self, backward=True):
"""
Set backward compatibility mode for new CDFs
Unless backward compatible mode is set, CDF files created by
the version 3 library can not be read by V2.
Parameters
==========
backward : boolean
Set backward compatible mode if True; clear it if False.
Raises
======
ValueError : if backward=False and underlying CDF library is V2
"""
if self.version[0] < 3:
if not backward:
raise ValueError(
'Cannot disable backward-compatible mode for CDF version 2.')
else:
return
self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward
else const.BACKWARDFILEoff)
def epoch_to_datetime(self, epoch):
"""
Converts a CDF epoch value to a datetime
Parameters
==========
epoch : float
epoch value from CDF
Returns
=======
out : :class:`datetime.datetime`
date and time corresponding to epoch. Invalid values are set to
usual epoch invalid value, i.e. last moment of year 9999.
See Also
========
v_epoch_to_datetime
"""
yyyy = ctypes.c_long(0)
mm = ctypes.c_long(0)
dd = ctypes.c_long(0)
hh = ctypes.c_long(0)
min = ctypes.c_long(0)
sec = ctypes.c_long(0)
msec = ctypes.c_long(0)
self._library.EPOCHbreakdown(ctypes.c_double(epoch),
ctypes.byref(yyyy), ctypes.byref(mm),
ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min),
ctypes.byref(sec), ctypes.byref(msec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000)
else:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
msec.value * 1000)
def datetime_to_epoch(self, dt):
"""
Converts a Python datetime to a CDF Epoch value
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : float
epoch corresponding to dt
See Also
========
v_datetime_to_epoch
"""
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt.replace(tzinfo=None)
micro = dt.microsecond % 1000
if micro >= 500 and dt.year < 9999:
dt += datetime.timedelta(0, 0, 1000)
return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000))
def epoch16_to_datetime(self, epoch0, epoch1):
"""
Converts a CDF epoch16 value to a datetime
.. note::
The call signature has changed since SpacePy 0.1.2. Formerly
this method took a single argument with two values; now it
requires two arguments (one for each value). To convert existing
code, replace ``epoch16_to_datetime(epoch)`` with
``epoch16_to_datetime(*epoch)``.
Parameters
==========
epoch0 : float
epoch16 value from CDF, first half
epoch1 : float
epoch16 value from CDF, second half
Raises
======
EpochError : if input invalid
Returns
=======
out : :class:`datetime.datetime`
date and time corresponding to epoch. Invalid values are set to
usual epoch invalid value, i.e. last moment of year 9999.
See Also
========
v_epoch16_to_datetime
"""
yyyy = ctypes.c_long(0)
mm = ctypes.c_long(0)
dd = ctypes.c_long(0)
hh = ctypes.c_long(0)
min = ctypes.c_long(0)
sec = ctypes.c_long(0)
msec = ctypes.c_long(0)
usec = ctypes.c_long(0)
nsec = ctypes.c_long(0)
psec = ctypes.c_long(0)
self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1),
ctypes.byref(yyyy), ctypes.byref(mm),
ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min),
ctypes.byref(sec), ctypes.byref(msec),
ctypes.byref(usec), ctypes.byref(nsec),
ctypes.byref(psec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999)
micro = int(float(msec.value) * 1000 + float(usec.value) +
float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5)
if micro < 1000000:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
micro)
else:
add_sec = int(micro / 1000000)
try:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
micro - add_sec * 1000000) + \
datetime.timedelta(seconds=add_sec)
except OverflowError:
return datetime.datetime(datetime.MAXYEAR, 12, 31,
23, 59, 59,
999999)
def datetime_to_epoch16(self, dt):
"""
Converts a Python datetime to a CDF Epoch16 value
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : list of float
epoch16 corresponding to dt
See Also
========
v_datetime_to_epoch16
"""
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt.replace(tzinfo=None)
#Default to "illegal epoch"
epoch16 = (ctypes.c_double * 2)(-1., -1.)
if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000),
dt.microsecond % 1000, 0, 0,
epoch16):
return (-1., -1.) #Failure, so illegal epoch
return (epoch16[0], epoch16[1])
def epoch_to_epoch16(self, epoch):
"""
Converts a CDF EPOCH to a CDF EPOCH16 value
Parameters
==========
epoch : double
EPOCH to convert. Lists and numpy arrays are acceptable.
Returns
=======
out : (double, double)
EPOCH16 corresponding to epoch
"""
e = numpy.require(epoch, numpy.float64)
s = numpy.trunc(e / 1000.0)
#ugly numpy stuff, probably a better way....
res = numpy.hstack((s, (e - s * 1000.0) * 1e9))
if len(res) <= 2:
return res
newshape = list(res.shape[0:-2])
newshape.append(res.shape[-1] // 2)
newshape.append(2)
return numpy.rollaxis(res.reshape(newshape), -1, -2)
def epoch_to_num(self, epoch):
"""
Convert CDF EPOCH to matplotlib number.
Same output as :func:`~matplotlib.dates.date2num` and useful for
plotting large data sets without converting the times through datetime.
Parameters
==========
epoch : double
EPOCH to convert. Lists and numpy arrays are acceptable.
Returns
=======
out : double
Floating point number representing days since 0001-01-01.
"""
#date2num day 1 is 1/1/1 00UT
#epoch 1/1/1 00UT is 31622400000.0 (millisecond)
return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0
def epoch16_to_epoch(self, epoch16):
"""
Converts a CDF EPOCH16 to a CDF EPOCH value
Parameters
==========
epoch16 : (double, double)
EPOCH16 to convert. Lists and numpy arrays are acceptable.
LAST dimension should be 2: the two pairs of EPOCH16
Returns
=======
out : double
EPOCH corresponding to epoch16
"""
e = numpy.require(epoch16, numpy.float64)
return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9)
def tt2000_to_datetime(self, tt2000):
"""
Converts a CDF TT2000 value to a datetime
.. note::
Although TT2000 values support leapseconds, Python's datetime
object does not. Any times after 23:59:59.999999 will
be truncated to 23:59:59.999999.
Parameters
==========
tt2000 : int
TT2000 value from CDF
Raises
======
EpochError : if input invalid
Returns
=======
out : :class:`datetime.datetime`
date and time corresponding to epoch. Invalid values are set to
usual epoch invalid value, i.e. last moment of year 9999.
See Also
========
v_tt2000_to_datetime
"""
yyyy = ctypes.c_double(0)
mm = ctypes.c_double(0)
dd = ctypes.c_double(0)
hh = ctypes.c_double(0)
min = ctypes.c_double(0)
sec = ctypes.c_double(0)
msec = ctypes.c_double(0)
usec = ctypes.c_double(0)
nsec = ctypes.c_double(0)
self._library.breakdownTT2000(
ctypes.c_longlong(tt2000),
ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec),
ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999)
sec = int(sec.value)
if sec >= 60:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), 59, 999999)
micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5)
if micro < 1000000:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), sec, micro)
else:
add_sec = int(micro / 1000000)
try:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), sec,
micro - add_sec * 1000000) + \
datetime.timedelta(seconds=add_sec)
except OverflowError:
return datetime.datetime(datetime.MAXYEAR, 12, 31,
23, 59, 59, 999999)
def datetime_to_tt2000(self, dt):
"""
Converts a Python datetime to a CDF TT2000 value
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : int
tt2000 corresponding to dt
See Also
========
v_datetime_to_tt2000
"""
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt = dt.replace(tzinfo=None)
if dt == datetime.datetime.max:
return -2**63
return self._library.computeTT2000(
dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000),
dt.microsecond % 1000, 0)
def _datetime_to_tt2000_typepunned(self, dt):
"""
Converts a Python datetime to a CDF TT2000 value
Typepunned version that passes doubles as longlongs, to get around
ARM calling convention oddness.
Parameters
==========
dt : :class:`datetime.datetime`
date and time to convert
Returns
=======
out : int
tt2000 corresponding to dt
See Also
========
v_datetime_to_tt2000
"""
c_ll_p = ctypes.POINTER(ctypes.c_longlong)
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt = dt.replace(tzinfo=None)
if dt == datetime.datetime.max:
return -2**63
return self._library.computeTT2000(
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.year)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.month)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.day)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.hour)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.minute)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.second)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.microsecond // 1000)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.microsecond % 1000)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents)
def epoch_to_tt2000(self, epoch):
"""
Converts a CDF EPOCH to a CDF TT2000 value
Parameters
==========
epoch : double
EPOCH to convert
Returns
=======
out : int
tt2000 corresponding to epoch
See Also
========
v_epoch_to_tt2000
"""
return self._library.CDF_TT2000_from_UTC_EPOCH(epoch)
def tt2000_to_epoch(self, tt2000):
"""
Converts a CDF TT2000 value to a CDF EPOCH
.. note::
Although TT2000 values support leapseconds, CDF EPOCH values
do not. Times during leapseconds are rounded up to beginning
of the next day.
Parameters
==========
tt2000 : int
TT2000 value from CDF
Raises
======
EpochError : if input invalid
Returns
=======
out : double
EPOCH corresponding to the TT2000 input time
See Also
========
v_tt2000_to_epoch
"""
return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000)
def epoch16_to_tt2000(self, epoch0, epoch1):
"""
Converts a CDF epoch16 value to TT2000
.. note::
Because TT2000 does not support picoseconds, the picoseconds
value in epoch is ignored (i.e., truncated.)
Parameters
==========
epoch0 : float
epoch16 value from CDF, first half
epoch1 : float
epoch16 value from CDF, second half
Raises
======
EpochError : if input invalid
Returns
=======
out : long
TT2000 corresponding to epoch.
See Also
========
v_epoch16_to_tt2000
"""
return self._library.CDF_TT2000_from_UTC_EPOCH16(
(ctypes.c_double * 2)(epoch0, epoch1))
def tt2000_to_epoch16(self, tt2000):
"""
Converts a CDF TT2000 value to a CDF EPOCH16
.. note::
Although TT2000 values support leapseconds, CDF EPOCH16 values
do not. Times during leapseconds are rounded up to beginning
of the next day.
Parameters
==========
tt2000 : int
TT2000 value from CDF
Raises
======
EpochError : if input invalid
Returns
=======
out : double, double
EPOCH16 corresponding to the TT2000 input time
See Also
========
v_tt2000_to_epoch16
"""
#Default to "illegal epoch" if isn't populated
epoch16 = (ctypes.c_double * 2)(-1., -1.)
if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16):
return (-1., -1.) #Failure; illegal epoch
return (epoch16[0], epoch16[1])
def _bad_tt2000(*args, **kwargs):
"""Convenience function for complaining that TT2000 not supported"""
raise NotImplementedError(
'TT2000 functions require CDF library 3.4.0 or later')
def download_library():
"""Download and install the CDF library"""
if sys.platform != 'win32':
raise NotImplementedError(
'CDF library install only supported on Windows')
try:
import html.parser as HTMLParser
except ImportError:
import HTMLParser
#https://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj
class LinkParser(HTMLParser.HTMLParser, object):
def __init__(self, *args, **kwargs):
self.links_found = []
super(LinkParser, self).__init__(*args, **kwargs)
def handle_starttag(self, tag, attrs):
if tag != 'a' or attrs[0][0] != 'href':
return
self.links_found.append(attrs[0][1])
import re
import subprocess
try:
import urllib.request as u
except ImportError:
import urllib as u
# Removed reference to spacepy
#import spacepy
#if spacepy.config.get('user_agent', None):
# class AppURLopener(u.FancyURLopener):
# version = spacepy.config['user_agent']
# u._urlopener = AppURLopener()
baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/'
url = u.urlopen(baseurl)
listing = url.read()
url.close()
p = LinkParser()
p.feed(listing)
cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)]
if not cdfdist:
raise RuntimeError(
"Couldn't find CDF distribution directory to download")
cdfdist.sort(key=lambda x: x.rstrip('/').split('_'))
cdfverbase = cdfdist[-1].rstrip('/')
instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \
'-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4)
insturl = baseurl + cdfverbase + '/windows/' + instfname
tmpdir = tempfile.mkdtemp()
try:
fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname))
subprocess.check_call([fname, '/install', '/q1'], shell=False)
finally:
shutil.rmtree(tmpdir)
_libpath, _library = Library._find_lib()
if _library is None:
raise Exception(('Cannot load CDF C library; checked {0}. '
'Try \'os.environ["CDF_LIB"] = library_directory\' '
'before import.').format(', '.join(_libpath)))
from . import const
lib = Library(_libpath, _library)
"""Module global library object.
Initalized at module load time so all classes have ready
access to the CDF library and a common state. E.g:
>>> import pycdf
>>> pycdf.lib.version
(3, 3, 0, ' ')
"""
class CDFException(Exception):
"""
Base class for errors or warnings in the CDF library.
Not normally used directly, but in subclasses :class:`CDFError`
and :class:`CDFWarning`.
Error messages provided by this class are looked up from the underlying
C library.
"""
def __init__(self, status):
"""
Create a CDF Exception
Uses CDF C library to look up an appropriate error message.
Parameters
==========
status : ctypes.c_long
CDF status
"""
self.status = status
self.string = 'CDF error ' + repr(status) + ', unable to get details.'
message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1)
try:
retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_,
ctypes.c_long(status),
const.GET_, const.STATUS_TEXT_, message,
const.NULL_)
if retval == const.CDF_OK:
if isinstance(message.value, str):
self.string = message.value
elif isinstance(message.value, bytes):
self.string = message.value.decode()
except:
pass
def __str__(self):
"""
Error string associated with the library error.
Returns
=======
out : str
Error message from the CDF library.
"""
return self.string
class CDFError(CDFException):
"""Raised for an error in the CDF library."""
pass
class CDFWarning(CDFException, UserWarning):
"""Used for a warning in the CDF library."""
def warn(self, level=4):
"""
Issues a warning based on the information stored in my exception
Intended for use in check_status or similar wrapper function.
Other Parameters
================
level : int
optional (default 3), how far up the stack the warning should
be reported. Passed directly to :class:`warnings.warn`.
"""
warnings.warn(self, self.__class__, level)
class EpochError(Exception):
"""Used for errors in epoch routines"""
pass
def _compress(obj, comptype=None, param=None):
"""Set or check the compression of a :py:class:`pycdf.CDF` or :py:class:`pycdf.Var`
@param obj: object on which to set or check compression
@type obj: :py:class:`pycdf.CDF` or :py:class:`pycdf.Var`
@param comptype: type of compression to change to, see CDF C reference
manual section 4.10. Constants for this parameter
are in :py:mod:`pycdf.const`. If not specified, will not change
compression.
@type comptype: ctypes.c_long
@param param: Compression parameter, see CDF CRM 4.10 and :py:mod:`pycdf.const`.
If not specified, will choose reasonable default (5 for
gzip; other types have only one possible parameter.)
@type param: ctypes.c_long
@return: (comptype, param) currently in effect
@rtype: tuple
"""
if isinstance(obj, CDF):
COMPRESSION_ = const.CDF_COMPRESSION_
elif isinstance(obj, Var):
COMPRESSION_ = const.zVAR_COMPRESSION_
else:
raise ValueError('Must specify a CDF or Var type.')
validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)],
const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs],
const.HUFF_COMPRESSION.value:
[const.OPTIMAL_ENCODING_TREES],
const.AHUFF_COMPRESSION.value:
[const.OPTIMAL_ENCODING_TREES],
const.GZIP_COMPRESSION.value: [ctypes.c_long(5),
ctypes.c_long(1),
ctypes.c_long(2),
ctypes.c_long(3),
ctypes.c_long(4),
ctypes.c_long(6),
ctypes.c_long(7),
ctypes.c_long(8),
ctypes.c_long(9),
],
}
comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION,
const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION,
const.GZIP_COMPRESSION]
comptypevalues = [i.value for i in comptypes]
if comptype != None:
if not hasattr(comptype, 'value'):
comptype = ctypes.c_long(comptype)
if param is None:
if not comptype.value in validparams:
raise CDFError(const.BAD_COMPRESSION)
param = validparams[comptype.value][0]
paramlist = (ctypes.c_long * 1)(param)
obj._call(const.PUT_, COMPRESSION_,
comptype, paramlist)
params = (ctypes.c_long *
const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS))
comptype = ctypes.c_long(0)
percent = ctypes.c_long(0)
obj._call(const.GET_, COMPRESSION_,
ctypes.byref(comptype), ctypes.byref(params),
ctypes.byref(percent))
param = params[0]
if not comptype.value in comptypevalues:
raise CDFError(const.BAD_COMPRESSION)
validparamvalues = [i.value for i in validparams[comptype.value]]
if not param in validparamvalues:
raise CDFError(const.BAD_COMPRESSION_PARM)
comptype = comptypes[comptypevalues.index(comptype.value)]
if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION,
const.AHUFF_COMPRESSION):
param = validparams[comptype.value][validparamvalues.index(param)]
return (comptype, param)
class CDF(MutableMapping):
"""
Python object representing a CDF file.
Open or create a CDF file by creating an object of this class.
Parameters
==========
pathname : string
name of the file to open or create
masterpath : string
name of the master CDF file to use in creating
a new file. If not provided, an existing file is
opened; if provided but evaluates to ``False``
(e.g., ``''``), an empty new CDF is created.
create : bool
Create a new CDF even if masterpath isn't provided
readonly : bool
Open the CDF read-only. Default True if opening an
existing CDF; False if creating a new one. A readonly
CDF with many variables may be slow to close. See
:meth:`readonly`.
Raises
======
CDFError
if CDF library reports an error
Warns
=====
CDFWarning
if CDF library reports a warning and interpreter
is set to error on warnings.
Examples
========
Open a CDF by creating a CDF object, e.g.:
>>> cdffile = pycdf.CDF('cdf_filename.cdf')
Be sure to :meth:`close` or :meth:`save` when
done.
.. note::
Existing CDF files are opened read-only by default, see
:meth:`readonly` to change.
CDF supports the `with
<http://docs.python.org/tutorial/inputoutput.html#methods-of-file-objects>`_
keyword, like other file objects, so:
>>> with pycdf.CDF('cdf_filename.cdf') as cdffile:
... #do brilliant things with the CDF
will open the CDF, execute the indented statements, and close the CDF when
finished or when an error occurs. The `python docs
<http://docs.python.org/reference/compound_stmts.html#with>`_ include more
detail on this 'context manager' ability.
CDF objects behave like a python `dictionary
<http://docs.python.org/tutorial/datastructures.html#dictionaries>`_,
where the keys are names of variables in the CDF, and the values,
:class:`Var` objects. As a dictionary, they are also `iterable
<http://docs.python.org/tutorial/classes.html#iterators>`_ and it is easy
to loop over all of the variables in a file. Some examples:
#. List the names of all variables in the open CDF ``cdffile``:
>>> cdffile.keys()
>>> for k in cdffile: #Alternate
... print(k)
#. Get a :class:`Var` object for the variable named ``Epoch``:
>>> epoch = cdffile['Epoch']
#. Determine if a CDF contains a variable named ``B_GSE``:
>>> if 'B_GSE' in cdffile:
... print('B_GSE is in the file')
... else:
... print('B_GSE is not in the file')
#. Find how many variables are in the file:
>>> print(len(cdffile))
#. Delete the variable ``Epoch`` from the open CDF file ``cdffile``:
>>> del cdffile['Epoch']
#. Display a summary of variables and types in open CDF file ``cdffile``:
>>> print(cdffile)
#. Open the CDF named ``cdf_filename.cdf``, read *all* the data from
all variables into dictionary ``data``, and close it when done or
if an error occurs:
>>> with pycdf.CDF('cdf_filename.cdf') as cdffile:
... data = cdffile.copy()
This last example can be very inefficient as it reads the entire CDF.
Normally it's better to treat the CDF as a dictionary and access only
the data needed, which will be pulled transparently from disc. See
:class:`Var` for more subtle examples.
Potentially useful dictionary methods and related functions:
- `in <http://docs.python.org/reference/expressions.html#in>`_
- `keys <http://docs.python.org/tutorial/datastructures.html#dictionaries>`_
- :py:func:`len`
- `list comprehensions
<http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_
- :py:func:`sorted`
- :py:func:`~spacepy.toolbox.dictree`
The CDF user's guide section 2.2 has more background information on CDF
files.
The :attr:`~CDF.attrs` Python attribute acts as a dictionary
referencing CDF attributes (do not confuse the two); all the
dictionary methods above also work on the attribute dictionary.
See :class:`gAttrList` for more on the dictionary of global
attributes.
Creating a new CDF from a master (skeleton) CDF has similar syntax to
opening one:
>>> cdffile = pycdf.CDF('cdf_filename.cdf', 'master_cdf_filename.cdf')
This creates and opens ``cdf_filename.cdf`` as a copy of
``master_cdf_filename.cdf``.
Using a skeleton CDF is recommended over making a CDF entirely from
scratch, but this is possible by specifying a blank master:
>>> cdffile = pycdf.CDF('cdf_filename.cdf', '')
When CDFs are created in this way, they are opened read-write, see
:py:meth:`readonly` to change.
By default, new CDFs (without a master) are created in version 2
(backward-compatible) format. To create a version 3 CDF, use
:meth:`Library.set_backward`:
>>> pycdf.lib.set_backward(False)
>>> cdffile = pycdf.CDF('cdf_filename.cdf', '')
Add variables by direct assignment, which will automatically set type
and dimension based on the data provided:
>>> cdffile['new_variable_name'] = [1, 2, 3, 4]
or, if more control is needed over the type and dimensions, use
:py:meth:`new`.
Although it is supported to assign Var objects to Python variables
for convenience, there are some minor pitfalls that can arise when
changing a CDF that will not affect most users. This is only a
concern when assigning a zVar object to a Python variable, changing the
CDF through some other variable, and then trying to use the zVar
object via the originally assigned variable.
Deleting a variable:
>>> var = cdffile['Var1']
>>> del cdffile['Var1']
>>> var[0] #fail, no such variable
Renaming a variable:
>>> var = cdffile['Var1']
>>> cdffile['Var1'].rename('Var2')
>>> var[0] #fail, no such variable
Renaming via the same variable works:
>>> var = cdffile['Var1']
>>> var.rename('Var2')
>>> var[0] #succeeds, aware of new name
Deleting a variable and then creating another variable with the same name
may lead to some surprises:
>>> var = cdffile['Var1']
>>> var[...] = [1, 2, 3, 4]
>>> del cdffile['Var1']
>>> cdffile.new('Var1', data=[5, 6, 7, 8]
>>> var[...]
[5, 6, 7, 8]
.. autosummary::
~CDF.attr_num
~CDF.attrs
~CDF.add_attr_to_cache
~CDF.add_to_cache
~CDF.backward
~CDF.checksum
~CDF.clear_attr_from_cache
~CDF.clear_from_cache
~CDF.clone
~CDF.close
~CDF.col_major
~CDF.compress
~CDF.copy
~CDF.from_data
~CDF.new
~CDF.raw_var
~CDF.readonly
~CDF.save
~CDF.var_num
~CDF.version
.. attribute:: CDF.attrs
Global attributes for this CDF in a dict-like format.
See :class:`gAttrList` for details.
.. attribute:: CDF.backward
True if this CDF was created in backward-compatible mode
(for opening with CDF library before 3.x)
.. automethod:: add_to_cache
.. automethod:: add_attr_to_cache
.. automethod:: attr_num
.. automethod:: checksum
.. automethod:: clear_from_cache
.. automethod:: clear_attr_from_cache
.. automethod:: clone
.. automethod:: close
.. automethod:: col_major
.. automethod:: compress
.. automethod:: copy
.. automethod:: from_data
.. automethod:: new
.. automethod:: raw_var
.. automethod:: readonly
.. automethod:: save
.. automethod:: var_num
.. automethod:: version
"""
def __init__(self, pathname, masterpath=None, create=None, readonly=None):
"""Open or create a CDF file.
Parameters
==========
pathname : string
name of the file to open or create
masterpath : string
name of the master CDF file to use in creating
a new file. If not provided, an existing file is
opened; if provided but evaluates to ``False``
(e.g., ``''``), an empty new CDF is created.
create : bool
Create a new CDF even if masterpath isn't provided
readonly : bool
Open the CDF read-only. Default True if opening an
existing CDF; False if creating a new one.
Raises
======
CDFError
if CDF library reports an error
CDFWarning
if CDF library reports a warning and interpreter
is set to error on warnings.
Examples
========
Open a CDF by creating a CDF object, e.g.:
>>> cdffile = pycdf.CDF('cdf_filename.cdf')
Be sure to :py:meth:`pycdf.CDF.close` or :py:meth:`pycdf.CDF.save`
when done.
"""
if masterpath is not None: #Looks like we want to create
if create is False:
raise ValueError('Cannot specify a master CDF without creating a CDF')
if readonly is True:
raise ValueError('Cannot create a CDF in readonly mode')
if create and readonly:
raise ValueError('Cannot create a CDF in readonly mode')
try:
self.pathname = pathname.encode()
except AttributeError:
raise ValueError(
'pathname must be string-like: {0}'.format(pathname))
self._handle = ctypes.c_void_p(None)
self._opened = False
if masterpath is None and not create:
self._open(True if readonly is None else readonly)
elif masterpath:
self._from_master(masterpath.encode())
else:
self._create()
lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2))
self._attrlistref = weakref.ref(gAttrList(self))
self.backward = self.version()[0] < 3
self._var_nums = {}
"""Cache of name-to-number mappings for variables in this CDF"""
self._attr_info = {}
"""Cache of name-to-(number, global) mappings for attributes
in this CDF"""
def __del__(self):
"""Destructor; called when CDF object is destroyed.
Close CDF file if there is still a valid handle.
.. note::
To avoid data loss, explicitly call :py:meth:`pycdf.CDF.close`
or :py:meth:`pycdf.CDF.save`.
"""
if self._opened:
self.close()
def __delitem__(self, name):
"""Delete a zVariable in this CDF, by name or number
Parameters
==========
name : string or int
Name or number of the CDF variable
.. note:
Variable numbers may change if variables are added or removed.
Examples
========
Delete the variable ``Epoch`` from the open CDF file ``cdffile``.
>>> del cdffile['Epoch']
"""
self[name]._delete()
def __enter__(self):
"""Context manager entrance function."""
return self
def __exit__(self, type, value, traceback):
"""Context manager exit function.
Close CDF file.
"""
self.close()
def __getitem__(self, name):
"""Gets a zVariable in this CDF, by name or number
The CDF acts like a dict
@param name: Name or number of the CDF variable
@type name: string or int
@return: CDF variable named or numbered L{name}
@rtype: :py:class:`pycdf.Var`
@raise KeyError: for pretty much any problem in lookup
@note: variable numbers may change if variables are added or removed.
"""
try:
return Var(self, name)
except CDFException as e:
raise KeyError('{0}: {1}'.format(name, e))
def __setitem__(self, name, data):
"""Writes data to a zVariable in this CDF
If the zVariable does not exist, will create one matching
L{data}. If it does exist, will attempt to write L{data}
to it without changing the type or dimensions.
@param name: name or number of the variable to write
@type name: str or int
@param data: data to write, or a :py:class:`pycdf.Var` to copy
"""
if isinstance(data, Var):
self.clone(data, name)
elif name in self:
self[name][...] = data
if hasattr(data, 'attrs'):
self[name].attrs.clone(data.attrs)
else:
self.new(name, data)
def __iter__(self, current = 0):
"""Iterates over zVars in CDF
Iterators for dicts return keys
@note: Returned in variable-number order
"""
while current < self.__len__():
name = self[current].name()
value = (yield name)
if value is None:
current += 1
else:
current = self[value]._num()
current += 1
def __len__(self):
"""Implements 'length' of CDF (number of zVars)
@return: number of zVars in the CDF
@rtype: int
"""
count = ctypes.c_long(0)
self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count))
return count.value
def __contains__(self, key):
"""Determines whether a particular variable name is in the CDF
@note: Essentially an efficiency function; L{__iter__} is called
if this isn't defined
@param key: key/variable name to check
@type key: string
@return: True if L{key} is the name of a variable in CDF, else False
@rtype: Boolean
"""
try:
foo = self[key]
return True
except KeyError as e:
expected = str(key) + \
": NO_SUCH_VAR: Named variable not found in this CDF."
if expected in e.args:
return False
raise
def __repr__(self):
"""Returns representation of CDF
Cannot return anything that can be eval'd to create a copy of the
CDF, so just wrap the informal representation in angle brackets.
@return: all the data in this list of attributes
@rtype: str
"""
return '<CDF:\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the CDF
This is an 'informal' representation in that it cannot be evaluated
directly to create a :py:class:`pycdf.CDF`, just the names, types, and sizes of all
variables. (Attributes are not listed.)
@return: description of the variables in the CDF
@rtype: str
"""
if self._opened:
return '\n'.join([key + ': ' + str(value)
for (key, value) in sorted(self.items())])
#can get away with this sort because second value in tuple isn't
#compared unless first are different, and variable name is unique.
else:
if isinstance(self.pathname, str):
return 'Closed CDF {0}'.format(self.pathname)
else:
return 'Closed CDF {0}'.format(self.pathname.decode('ascii'))
def _open(self, readonly=True):
"""Opens the CDF file (called on init)
Will open an existing CDF file read/write.
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
.. note:
Not intended for direct call; pass parameters to
:py:class:`pycdf.CDF` constructor.
"""
lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle))
self._opened = True
if readonly: #Default is RW
self.readonly(readonly)
def _create(self):
"""Creates (and opens) a new CDF file
Created at ``pathname``.
Assumes zero-dimension r variables
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
.. note:
Not intended for direct call; pass parameters to
:py:class:`pycdf.CDF` constructor.
"""
lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0),
(ctypes.c_long * 1)(0), ctypes.byref(self._handle))
self._opened = True
def _from_master(self, master_path):
"""Creates a new CDF from a master CDF file
``master_path`` is copied to ``pathname`` and opened.
Parameters
==========
master_path : string
location of the master CDF file
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
.. note:
Not intended for direct call; pass parameters to
:py:class:`pycdf.CDF` constructor.
"""
if os.path.exists(self.pathname):
raise CDFError(const.CDF_EXISTS)
shutil.copy2(master_path, self.pathname)
self._open(False)
def _call(self, *args, **kwargs):
"""Select this CDF as current and call the CDF internal interface
Adds call to select this CDF to L{args} and passes all parameters
directly through to the CDFlib routine of the CDF library's C internal
interface. Checks the return value with L{Library.check_status}.
Parameters
==========
args : various, see :py:mod:`ctypes`.
Passed directly to the CDF library interface. Useful
constants are defined in the :doc:`const <pycdf_const>`
module of this package.
Returns
=======
out : ctypes.c_long
CDF status from the library
.. note:
Terminal NULL_ is automatically added to ``args``.
Raises
======
CDFError : if CDF library reports an error
CDFWarning : if CDF library reports a warning and interpreter
is set to error on warnings.
"""
return lib.call(const.SELECT_, const.CDF_, self._handle,
*args, **kwargs)
def clone(self, zVar, name=None, data=True):
"""
Clone a zVariable (from another CDF or this) into this CDF
Parameters
==========
zVar : :py:class:`Var`
variable to clone
Other Parameters
================
name : str
Name of the new variable (default: name of the original)
data : boolean (optional)
Copy data, or only type, dimensions, variance, attributes?
(default: True, copy data as well)
Returns
=======
out : :py:class:`Var`
The newly-created zVar in this CDF
"""
if name is None:
name = zVar.name()
if name in self:
del self[name]
self.new(name, type=zVar.type(), recVary=zVar.rv(),
dimVarys=zVar.dv(), dims=zVar._dim_sizes(),
n_elements=zVar._nelems())
self[name].compress(*zVar.compress())
self[name].attrs.clone(zVar.attrs)
if data:
r = zVar._raw
zVar._raw = True
self.raw_var(name)[...] = zVar[...]
zVar._raw = r
return zVar
def col_major(self, new_col=None):
"""
Finds the majority of this CDF file
Other Parameters
================
new_col : boolean
Specify True to change to column-major, False to change to
row major, or do not specify to check the majority
rather than changing it.
(default is check only)
Returns
=======
out : boolean
True if column-major, false if row-major
"""
if new_col != None:
new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR
self._call(const.PUT_, const.CDF_MAJORITY_, new_maj)
maj = ctypes.c_long(0)
self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj))
if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value):
raise CDFError(const.BAD_MAJORITY)
return maj.value == const.COLUMN_MAJOR.value
def readonly(self, ro=None):
"""
Sets or check the readonly status of this CDF
If the CDF has been changed since opening, setting readonly mode
will have no effect.
.. note::
Closing a CDF that has been opened readonly, or setting readonly
False, may take a substantial amount of time if there are many
variables in the CDF, as a (potentially large) cache needs to
be cleared. Consider specifying ``readonly=False`` when opening
the file if this is an issue. However, this may make some reading
operations slower.
Other Parameters
================
ro : Boolean
True to set the CDF readonly, False to set it read/write,
or leave out to check only.
Returns
=======
out : Boolean
True if CDF is read-only, else False
Raises
======
CDFError : if bad mode is set
"""
if ro == True:
self._call(const.SELECT_, const.CDF_READONLY_MODE_,
const.READONLYon)
elif ro == False:
self._call(const.SELECT_, const.CDF_READONLY_MODE_,
const.READONLYoff)
mode = ctypes.c_long(0)
self._call(const.CONFIRM_, const.CDF_READONLY_MODE_,
ctypes.byref(mode))
if mode.value == const.READONLYon.value:
return True
elif mode.value == const.READONLYoff.value:
return False
else:
raise CDFError(const.BAD_READONLY_MODE.value)
def checksum(self, new_val=None):
"""
Set or check the checksum status of this CDF. If checksums
are enabled, the checksum will be verified every time the file
is opened.
Other Parameters
================
new_val : boolean
True to enable checksum, False to disable, or leave out
to simply check.
Returns
=======
out : boolean
True if the checksum is enabled or False if disabled
"""
if new_val != None:
self._call(const.PUT_, const.CDF_CHECKSUM_,
const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM)
chk = ctypes.c_long(0)
self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk))
if not chk.value in (const.MD5_CHECKSUM.value,
const.NO_CHECKSUM.value):
raise CDFError(const.BAD_CHECKSUM)
return chk.value == const.MD5_CHECKSUM.value
def close(self):
"""
Closes the CDF file
Although called on object destruction (:meth:`~CDF.__del__`),
to ensure all data are saved, the user should explicitly call
:meth:`~CDF.close` or :meth:`~CDF.save`.
Raises
======
CDFError : if CDF library reports an error
Warns
=====
CDFWarning : if CDF library reports a warning
"""
self._call(const.CLOSE_, const.CDF_)
self._opened = False
def compress(self, comptype=None, param=None):
"""
Set or check the compression of this CDF
Sets compression on entire *file*, not per-variable.
See section 2.6 of the CDF user's guide for more information on
compression.
Other Parameters
================
comptype : ctypes.c_long
type of compression to change to, see CDF C reference manual
section 4.10. Constants for this parameter are in
:mod:`~pycdf.const`. If not specified, will not change
compression.
param : ctypes.c_long
Compression parameter, see CDF CRM 4.10 and
:mod:`~pycdf.const`.
If not specified, will choose reasonable default (5 for gzip;
other types have only one possible parameter.)
Returns
=======
out : tuple
(comptype, param) currently in effect
See Also
========
:meth:`Var.compress`
Examples
========
Set file ``cdffile`` to gzip compression, compression level 9:
>>> cdffile.compress(pycdf.const.GZIP_COMPRESSION, 9)
"""
return _compress(self, comptype, param)
def new(self, name, data=None, type=None, recVary=True, dimVarys=None,
dims=None, n_elements=None, compress=None, compress_param=None):
"""
Create a new zVariable in this CDF
.. note::
Either ``data`` or ``type`` must be specified. If type is not
specified, it is guessed from ``data``.
Parameters
==========
name : str
name of the new variable
Other Parameters
================
data
data to store in the new variable. If this has a an ``attrs``
attribute (e.g., :class:`~spacepy.datamodel.dmarray`), it
will be used to populate attributes of the new variable.
type : ctypes.c_long
CDF type of the variable, from :mod:`~pycdf.const`.
See section 2.5 of the CDF user's guide for more information on
CDF data types.
recVary : boolean
record variance of the variable (default True)
dimVarys : list of boolean
dimension variance of each dimension, default True for all
dimensions.
dims : list of int
size of each dimension of this variable, default zero-dimensional.
Note this is the dimensionality as defined by CDF, i.e., for
record-varying variables it excludes the leading record dimension.
See :py:class:`Var`.
n_elements : int
number of elements, should be 1 except for CDF_CHAR,
for which it's the length of the string.
compress : ctypes.c_long
Compression to apply to this variable, default None.
See :py:meth:`Var.compress`.
compress_param : ctypes.c_long
Compression parameter if compression used; reasonable default
is chosen. See :py:meth:`Var.compress`.
Returns
=======
out : :py:class:`Var`
the newly-created zVariable
Raises
======
ValueError : if neither data nor sufficient typing information
is provided.
Notes
=====
Any given data may be representable by a range of CDF types; if
the type is not specified, pycdf will guess which
the CDF types which can represent this data. This breaks down to:
#. If input data is a numpy array, match the type of that array
#. Proper kind (numerical, string, time)
#. Proper range (stores highest and lowest number provided)
#. Sufficient resolution (EPOCH16 required if datetime has
microseconds or below.)
If more than one value satisfies the requirements, types are returned
in preferred order:
#. Type that matches precision of data first, then
#. integer type before float type, then
#. Smallest type first, then
#. signed type first, then
#. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1)
So for example, EPOCH_16 is preferred over EPOCH if ``data`` specifies
below the millisecond level (rule 1), but otherwise EPOCH is preferred
(rule 2).
For floats, four-byte is preferred unless eight-byte is required:
#. absolute values between 0 and 3e-39
#. absolute values greater than 1.7e38
This will switch to an eight-byte double in some cases where four bytes
would be sufficient for IEEE 754 encoding, but where DEC formats would
require eight.
"""
if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \
and self.backward:
raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 '
'in backward-compatible CDF')
if not lib.supports_int8 and \
type in (const.CDF_INT8, const.CDF_TIME_TT2000):
raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0')
if data is None:
if type is None:
raise ValueError('Must provide either data or a CDF type.')
if dims is None:
dims = []
if n_elements is None:
n_elements = 1
else:
(guess_dims, guess_types, guess_elements) = _Hyperslice.types(data)
if dims is None:
if recVary:
if guess_dims == ():
raise ValueError(
'Record-varying data cannot be scalar. '
'Specify NRV with CDF.new() or put data in array.')
dims = guess_dims[1:]
else:
dims = guess_dims
if type is None:
type = guess_types[0]
if type == const.CDF_EPOCH16.value and self.backward:
type = const.CDF_EPOCH
if n_elements is None:
n_elements = guess_elements
if dimVarys is None:
dimVarys = [True for i in dims]
recVary = const.VARY if recVary else const.NOVARY
dimVarys = [const.VARY if dimVary else const.NOVARY
for dimVary in dimVarys]
if not hasattr(type, 'value'):
type = ctypes.c_long(type)
if type.value == const.CDF_INT8.value and not lib.supports_int8:
raise ValueError(
'64-bit integer support require CDF library 3.4.0')
if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value,
const.CDF_TIME_TT2000.value) \
and self.backward:
raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; '
'incompatible with backward-compatible CDF')
new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys)
if compress != None:
new_var.compress(compress, compress_param)
if data is not None:
new_var[...] = data
if hasattr(data, 'attrs'):
new_var.attrs.clone(data.attrs)
return new_var
def raw_var(self, name):
"""
Get a "raw" :class:`Var` object.
Normally a :class:`Var` will perform translation of values for
certain types (to/from Unicode for CHAR variables on Py3k,
and to/from datetime for all time types). A "raw" object
does not perform this translation, on read or write.
This does *not* affect the data on disk, and in fact it
is possible to maintain multiple Python objects with access
to the same zVariable.
Parameters
==========
name : str
name or number of the zVariable
"""
v = self[name]
v._raw = True
return v
def save(self):
"""
Saves the CDF file but leaves it open.
If closing the CDF, :meth:`close` is sufficient;
there is no need to call
:meth:`save` before :meth:`close`.
.. note::
Relies on an undocumented call of the CDF C library, which is
also used in the Java interface.
Raises
======
CDFError : if CDF library reports an error
Warns
=====
CDFWarning : if CDF library reports a warning
"""
self._call(const.SAVE_, const.CDF_)
def copy(self):
"""
Make a copy of all data and attributes in this CDF
Returns
=======
out : :py:class:`CDFCopy`
:class:`~spacepy.datamodel.SpaceData`-like object of all data
"""
return CDFCopy(self)
def version(self):
"""
Get version of library that created this CDF
Returns
=======
out : tuple
version of CDF library, in form (version, release, increment)
"""
ver = ctypes.c_long(0)
rel = ctypes.c_long(0)
inc = ctypes.c_long(0)
self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver),
const.GET_, const.CDF_RELEASE_, ctypes.byref(rel),
const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc))
return (ver.value, rel.value, inc.value)
def _get_attrs(self):
"""Get attribute list
Provide access to the CDF's attribute list without holding a
strong reference, as the attribute list has a (strong)
back-reference to its parent.
Either deref a weak reference (to try and keep the object the same),
or make a new AttrList instance and assign it to the weak reference
for next time.
"""
al = self._attrlistref()
if al is None:
al = gAttrList(self)
self._attrlistref = weakref.ref(al)
return al
def _set_attrs(self, value):
"""Assign to the attribute list
Clears all elements of the attribute list and copies from value
"""
self.attrs.clone(value)
attrs = property(
_get_attrs, _set_attrs, None,
"""Global attributes for this CDF in a dict-like format.
See :class:`gAttrList` for details.
""")
def var_num(self, varname):
"""Get the variable number of a particular variable name
This maintains a cache of name-to-number mappings for zVariables
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
Raises
======
CDFError : if variable is not found
Returns
=======
out : int
Variable number of this zvariable.
"""
num = self._var_nums.get(varname, None)
if num is None: #Copied from Var._get, which can hopefully be thinned
varNum = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMBER_, varname,
ctypes.byref(varNum))
num = varNum.value
self._var_nums[varname] = num
return num
def attr_num(self, attrname):
"""Get the attribute number and scope by attribute name
This maintains a cache of name-to-number mappings for attributes
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
attrname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
Raises
======
CDFError : if variable is not found
Returns
=======
out : tuple
attribute number, scope (True for global) of this attribute
"""
res = self._attr_info.get(attrname, None)
if res is None: #Copied from Var._get, which can hopefully be thinned
attrNum = ctypes.c_long(0)
self._call(const.GET_, const.ATTR_NUMBER_, attrname,
ctypes.byref(attrNum))
scope = ctypes.c_long(0)
self._call(const.SELECT_, const.ATTR_, attrNum,
const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope))
if scope.value == const.GLOBAL_SCOPE.value:
scope = True
elif scope.value == const.VARIABLE_SCOPE.value:
scope = False
else:
raise CDFError(const.BAD_SCOPE)
res = (attrNum.value, scope)
self._attr_info[attrname] = res
return res
def clear_attr_from_cache(self, attrname):
"""Mark an attribute deleted in the name-to-number cache
Will remove an attribute, and all attributes with higher numbers,
from the attribute cache.
Does NOT delete the variable!
This maintains a cache of name-to-number mappings for attributes
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
attrname : bytes
name of the attribute. Not this is NOT a string in Python 3!
"""
num, scope = self.attr_num(attrname)
#All numbers higher than this are renumbered
for a, n in list(self._attr_info.items()):
if n[0] >= num:
del self._attr_info[a]
def clear_from_cache(self, varname):
"""Mark a variable deleted in the name-to-number cache
Will remove a variable, and all variables with higher numbers,
from the variable cache.
Does NOT delete the variable!
This maintains a cache of name-to-number mappings for zVariables
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
"""
num = self.var_num(varname)
#All numbers higher than this are renumbered
for v, n in list(self._var_nums.items()):
if n >= num:
del self._var_nums[v]
def add_attr_to_cache(self, attrname, num, scope):
"""Add an attribute to the name-to-number cache
This maintains a cache of name-to-number mappings for attributes
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
num : int
number of the variable
scope : bool
True if global scope; False if variable scope.
"""
self._attr_info[attrname] = (num, scope)
def add_to_cache(self, varname, num):
"""Add a variable to the name-to-number cache
This maintains a cache of name-to-number mappings for zVariables
to keep from having to query the CDF library constantly. It's mostly
an internal function.
Parameters
==========
varname : bytes
name of the zVariable. Not this is NOT a string in Python 3!
num : int
number of the variable
"""
self._var_nums[varname] = num
#Note there is no function for delete, currently handled in Var.rename
#and Attr.rename by just deleting from the dict directly. Maybe this
#should be differen (maybe should be possible to follow a variable across
#a rename...)
class Var(MutableSequence):
"""
A CDF variable.
This object does not directly store the data from the CDF; rather,
it provides access to the data in a format that much like a Python
list or numpy :class:`~numpy.ndarray`.
General list information is available in the python docs:
`1 <http://docs.python.org/tutorial/introduction.html#lists>`_,
`2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_,
`3 <http://docs.python.org/library/stdtypes.html#typesseq>`_.
The CDF user's guide, section 2.3, provides background on variables.
.. note::
Not intended to be created directly; use methods of :class:`CDF` to gain access to a variable.
A record-varying variable's data are viewed as a hypercube of dimensions
n_dims+1 (the extra dimension is the record number). They are indexed in
row-major fashion, i.e. the last index changes most frequently / is
contiguous in memory. If the CDF is column-major, the data are
transformed to row-major before return.
Non record-varying variables are similar, but do not have the extra
dimension of record number.
Variables can be subscripted by a multidimensional index to return the
data. Indices are in row-major order with the first dimension
representing the record number. If the CDF is column major,
the data are reordered to row major. Each dimension is specified
by standard Python
`slice <http://docs.python.org/tutorial/introduction.html#strings>`_
notation, with dimensions separated by commas. The ellipsis fills in
any missing dimensions with full slices. The returned data are
lists; Python represents multidimensional arrays as nested lists.
The innermost set of lists represents contiguous data.
.. note::
numpy 'fancy indexing' is *not* supported.
Degenerate dimensions are 'collapsed', i.e. no list of only one
element will be returned if a single subscript is specified
instead of a range. (To avoid this, specify a slice like 1:2,
which starts with 1 and ends before 2).
Two special cases:
1. requesting a single-dimension slice for a
record-varying variable will return all data for that
record number (or those record numbers) for that variable.
2. Requests for multi-dimensional variables may skip the record-number
dimension and simply specify the slice on the array itself. In that
case, the slice of the array will be returned for all records.
In the event of ambiguity (e.g., single-dimension slice on a one-dimensional
variable), case 1 takes priority.
Otherwise, mismatch between the number of dimensions specified in
the slice and the number of dimensions in the variable will cause
an :exc:`~exceptions.IndexError` to be thrown.
This all sounds very complicated but it is essentially attempting
to do the 'right thing' for a range of slices.
An unusual case is scalar (zero-dimensional) non-record-varying variables.
Clearly they cannot be subscripted normally. In this case, use the
``[...]`` syntax meaning 'access all data.':
>>> import pycdf
>>> testcdf = pycdf.CDF('test.cdf', '')
>>> variable = testcdf.new('variable', recVary=False,
... type=pycdf.const.CDF_INT4)
>>> variable[...] = 10
>>> variable
<Var:
CDF_INT4 [] NRV
>
>>> variable[...]
10
Reading any empty non-record-varying variable will return an empty
with the same *number* of dimensions, but all dimensions will be
of zero length. The scalar is, again, a special case: due to the
inability to have a numpy array which is both zero-dimensional and empty,
reading an NRV scalar variable with no data will return an empty
one-dimensional array. This is really not recommended.
As a list type, variables are also `iterable
<http://docs.python.org/tutorial/classes.html#iterators>`_; iterating
over a variable returns a single complete record at a time.
This is all clearer with examples. Consider a variable ``B_GSM``, with
three elements per record (x, y, z components) and fifty records in
the CDF. Then:
1. ``B_GSM[0, 1]`` is the y component of the first record.
2. ``B_GSM[10, :]`` is a three-element list, containing x, y, and z
components of the 11th record. As a shortcut, if only one dimension
is specified, it is assumed to be the record number, so this
could also be written ``B_GSM[10]``.
3. ``B_GSM[...]`` reads all data for ``B_GSM`` and returns it as a
fifty-element list, each element itself being a three-element
list of x, y, z components.
Multidimensional example: consider fluxes stored as a function of
pitch angle and energy. Such a variable may be called Flux and
stored as a two-dimensional array, with the first dimension
representing (say) ten energy steps and the second, eighteen
pitch angle bins (ten degrees wide, centered from 5 to 175 degrees).
Assume 100 records stored in the CDF (i.e. 100 different times).
1. ``Flux[4]`` is a list of ten elements, one per energy step,
each element being a list of 18 fluxes, one per pitch bin.
All are taken from the fifth record in the CDF.
2. ``Flux[4, :, 0:4]`` is the same record, all energies, but
only the first four pitch bins (roughly, field-aligned).
3. ``Flux[..., 0:4]`` is a 100-element list (one per record),
each element being a ten-element list (one per energy step),
each containing fluxes for the first four pitch bins.
This slicing notation is very flexible and allows reading
specifically the desired data from the CDF.
All data are, on read, converted to appropriate Python data
types; EPOCH, EPOCH16, and TIME_TT2000 types are converted to
:class:`~datetime.datetime`. Data are returned in numpy arrays.
.. note::
Although pycdf supports TIME_TT2000 variables, the Python
:class:`~datetime.datetime` object does not support leap
seconds. Thus, on read, any seconds past 59 are truncated
to 59.999999 (59 seconds, 999 milliseconds, 999 microseconds).
Potentially useful list methods and related functions:
- `count <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_
- `in <http://docs.python.org/reference/expressions.html#in>`_
- `index <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_
- `len <http://docs.python.org/library/functions.html#len>`_
- `list comprehensions
<http://docs.python.org/tutorial/datastructures.html#list-comprehensions>`_
- `sorted <http://docs.python.org/library/functions.html#sorted>`_
The topic of array majority can be very confusing; good background material
is available at `IDL Array Storage and Indexing
<http://www.idlcoyote.com/misc_tips/colrow_major.html>`_. In brief,
*regardless of the majority stored in the CDF*, pycdf will always present
the data in the native Python majority, row-major order, also known as
C order. This is the default order in `NumPy
<http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html
#internal-memory-layout-of-an-ndarray>`_.
However, packages that render image data may expect it in column-major
order. If the axes seem 'swapped' this is likely the reason.
The :attr:`~Var.attrs` Python attribute acts as a dictionary referencing
zAttributes (do not confuse the two); all the dictionary methods above
also work on the attribute dictionary. See :class:`zAttrList` for more on
the dictionary of attributes.
With writing, as with reading, every attempt has been made to match the
behavior of Python lists. You can write one record, many records, or even
certain elements of all records. There is one restriction: only the record
dimension (i.e. dimension 0) can be resized by write, as all records
in a variable must have the same dimensions. Similarly, only whole
records can be deleted.
.. note::
Unusual error messages on writing data usually mean that pycdf is
unable to interpret the data as a regular array of a single type
matching the type and shape of the variable being written.
A 5x4 array is supported; an irregular array where one row has
five columns and a different row has six columns is not. Error messages
of this type include:
- ``Data must be well-formed, regular array of number, string, or datetime``
- ``setting an array element with a sequence.``
- ``shape mismatch: objects cannot be broadcast to a
single shape``
For these examples, assume Flux has 100 records and dimensions [2, 3].
Rewrite the first record without changing the rest:
>>> Flux[0] = [[1, 2, 3], [4, 5, 6]]
Writes a new first record and delete all the rest:
>>> Flux[...] = [[1, 2, 3], [4, 5, 6]]
Write a new record in the last position and add a new record after:
>>> Flux[99:] = [[[1, 2, 3], [4, 5, 6]],
... [[11, 12, 13], [14, 15, 16]]]
Insert two new records between the current number 5 and 6:
>>> Flux[5:6] = [[[1, 2, 3], [4, 5, 6]], [[11, 12, 13],
... [14, 15, 16]]]
This operation can be quite slow, as it requires reading and
rewriting the entire variable. (CDF does not directly support
record insertion.)
Change the first element of the first two records but leave other
elements alone:
>>> Flux[0:2, 0, 0] = [1, 2]
Remove the first record:
>>> del Flux[0]
Removes record 5 (the sixth):
>>> del Flux[5]
Due to the need to work around a bug in the CDF library, this operation
can be quite slow.
Delete *all data* from ``Flux``, but leave the variable definition intact:
>>> del Flux[...]
.. note::
Although this interface only directly supports zVariables, zMode is
set on opening the CDF so rVars appear as zVars. See p.24 of the
CDF user's guide; pyCDF uses zMode 2.
.. autosummary::
~Var.attrs
~Var.compress
~Var.copy
~Var.dtype
~Var.dv
~Var.insert
~Var.name
~Var.rename
~Var.rv
~Var.shape
~Var.type
.. attribute:: Var.attrs
zAttributes for this zVariable in a dict-like format.
See :class:`zAttrList` for details.
.. automethod:: compress
.. automethod:: copy
.. autoattribute:: dtype
.. automethod:: dv
.. automethod:: insert
.. automethod:: name
.. automethod:: rename
.. automethod:: rv
.. autoattribute:: shape
.. automethod:: type
"""
def __init__(self, cdf_file, var_name, *args):
"""Create or locate a variable
Parameters
==========
cdf_file : :py:class:`pycdf.CDF`
CDF file containing this variable
var_name : string
name of this variable
Other Parameters
================
args
additional arguments passed to :py:meth:`_create`. If none,
opens an existing variable. If provided, creates a
new one.
Raises
======
CDFError
if CDF library reports an error
Warns
=====
CDFWarning
if CDF library reports a warning
"""
self.cdf_file = cdf_file
#This is the definitive "identify" of variable
self._name = None
self._type = None #CDF type (long)
self._raw = False #Raw access (skip all conversions)
if len(args) == 0:
self._get(var_name)
else:
self._create(var_name, *args)
#Weak reference to attribute list (use attrs instead)
#This avoids a reference loop
self._attrlistref = weakref.ref(zAttrList(self))
def __getitem__(self, key):
"""Returns a slice from the data array. Details under :py:class:`pycdf.Var`.
@return: The data from this variable
@rtype: list-of-lists of appropriate type.
@raise IndexError: if L{key} is out of range, mismatches dimensions,
or simply unparseable.
@raise CDFError: for errors from the CDF library
"""
hslice = _Hyperslice(self, key)
#Hyperslice mostly catches this sort of thing, but
#an empty variable is a special case, since we might want to
#WRITE to 0th record (which Hyperslice also supports) but
#can't READ from it, and iterating over tries to read from it.
if hslice.rv:
if hslice.dimsizes[0] == 0 and hslice.degen[0] and \
hslice.starts[0] == 0:
raise IndexError('record index out of range')
#For NRV, again hslice will assume 0th record exists since we might
#want to write. So ANY degenerate dim other than the glued-on 0th
#suggests an explicit index that should fail. None degenerate suggests
#make an empty array.
#Note this is pulling a lot of hyperslice stuff into getitem!
elif hslice.dimsizes[0] == 0:
if len(hslice.degen) > 1 and max(hslice.degen[1:]):
raise IndexError('record index out of range')
else:
#The zero-length dimension is degenerate so it gets chopped,
#and you can't have a zero-length numpy array that still
#maintains the size of all other dimensions. So just force
#a zero-dim array and the rest will follow
hslice.counts[...] = 0
#If this is a scalar, need to make a single non-degenerate
#dimension so it can be empty.
if len(hslice.counts) == 1:
hslice.degen[0] = False
result = hslice.create_array()
if hslice.counts[0] != 0:
hslice.select()
lib.call(const.GET_, const.zVAR_HYPERDATA_,
result.ctypes.data_as(ctypes.c_void_p))
return hslice.convert_input_array(result)
def __delitem__(self, key):
"""Removes a record (or set of records) from the CDF
Only whole records can be deleted, so the del call must either specify
only one dimension or it must specify all elements of the non-record
dimensions. This is *not* a way to resize a variable!
Deleting records from the middle of a variable may be very slow in
some circumstances. To work around a bug in CDF library versions
3.4.0 and before, all the data must be read in, the requested deletions
done, and then all written back out.
@param key: index or slice to delete
@type key: int or slice
@raise TypeError: if an attempt is made to delete from a non
record-varying variable, or to delete below
the record level
"""
if not self.rv():
raise TypeError('Cannot delete records from non-record-varying '
'variable.')
hslice = _Hyperslice(self, key)
if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any():
raise TypeError('Can only delete entire records.')
if hslice.counts[0] == 0:
return
start = hslice.starts[0]
count = hslice.counts[0]
interval = hslice.intervals[0]
dimsize = hslice.dimsizes[0]
self._call()
dangerous_delete = False
if lib._del_middle_rec_bug and \
(interval != 1 or (start != 0 and start + count < dimsize)):
#delete from middle is dangerous if only have one index entry
entries = ctypes.c_long(0)
lib.call(const.GET_, const.zVAR_nINDEXENTRIES_,
ctypes.byref(entries))
dangerous_delete = (entries.value == 1)
if dangerous_delete:
data = self[...]
data = numpy.delete(
data,
numpy.arange(start, start + count * interval, interval),
0)
self[0:dimsize - count] = data
first_rec = dimsize - count
last_rec = dimsize - 1
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(first_rec), ctypes.c_long(last_rec))
elif interval == 1:
first_rec = ctypes.c_long(start)
last_rec = ctypes.c_long(start + count - 1)
lib.call(const.DELETE_, const.zVAR_RECORDS_,
first_rec, last_rec)
else:
self._call()
#delete from end to avoid renumbering of records
for recno in range(start + (count - 1) * interval,
start - 1, -1 * interval):
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(recno), ctypes.c_long(recno))
def __setitem__(self, key, data):
"""Puts a slice into the data array. Details under :py:class:`pycdf.Var`.
@param key: index or slice to store
@type key: int or slice
@param data: data to store
@type data: numpy.array
@raise IndexError: if L{key} is out of range, mismatches dimensions,
or simply unparseable. IndexError will
@raise CDFError: for errors from the CDF library
"""
hslice = _Hyperslice(self, key)
n_recs = hslice.counts[0]
hslice.expand(data)
cdf_type = self.type()
if cdf_type == const.CDF_EPOCH16.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch16(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_EPOCH.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_TIME_TT2000.value:
if not self._raw:
try:
data = lib.v_datetime_to_tt2000(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.int64)
else:
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=self._np_type())
if cdf_type == const.CDF_EPOCH16.value:
datashape = data.shape[:-1]
else:
datashape = data.shape
#Check data sizes
if datashape != tuple(hslice.expected_dims()):
raise ValueError('attempt to assign data of dimensions ' +
str(datashape) + ' to slice of dimensions ' +
str(tuple(hslice.expected_dims())))
#Flip majority and reversed dimensions, see convert_input_array
data = hslice.convert_output_array(data)
#Handle insertions and similar weirdness
if hslice.counts[0] > n_recs and \
hslice.starts[0] + n_recs < hslice.dimsizes[0]:
#Specified slice ends before last record, so insert in middle
saved_data = self[hslice.starts[0] + n_recs:]
if hslice.counts[0] > 0:
hslice.select()
lib.call(const.PUT_, const.zVAR_HYPERDATA_,
data.ctypes.data_as(ctypes.c_void_p))
if hslice.counts[0] < n_recs:
first_rec = hslice.starts[0] + hslice.counts[0]
last_rec = hslice.dimsizes[0] - 1
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(first_rec), ctypes.c_long(last_rec))
elif hslice.counts[0] > n_recs and \
hslice.starts[0] + n_recs < hslice.dimsizes[0]:
#Put saved data in after inserted data
self[hslice.starts[0] + hslice.counts[0]:] = saved_data
def extend(self, data):
"""
Append multiple values to the end of this variable
This is an efficiency function which overrides the base implementation
in MutableSequence.
Parameters
----------
data :
the data to append
"""
self[len(self):] = data
def insert(self, index, data):
"""
Inserts a *single* record before an index
Parameters
----------
index : int
index before which to insert the new record
data :
the record to insert
"""
self[index:index] = [data]
def _create(self, var_name, datatype, n_elements = 1, dims = (),
recVary = const.VARY, dimVarys = None):
"""Creates a new zVariable
@param var_name: name of this variable
@type var_name: string
@param datatype: CDF data type
@type datatype: ctypes.c_long
@param n_elements: number of elements (should be 1 except for
CDF_CHAR variables).
@type n_elements: long
@param dims: size of each dimension for multi-dimensional variable,
or empty for a zero-dimensional
@type dims: sequence of long
@param recVary: record variance for this variable (VARY/NOVARY)
@type recVary: long
@param dimVarys: array of VARY or NOVARY, variance for each dimension
@type dimVarys: sequence of long
@return: new variable with this name
@rtype: :py:class:`pycdf.Var`
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
@note: Not intended to be used directly; use L{CDF.new}.
"""
dim_array = (ctypes.c_long * len(dims))(*dims)
enc_name = var_name.encode('ascii')
if dimVarys is None:
dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY)
else:
dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys)
varNum = ctypes.c_long(0)
self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype,
ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array,
recVary, dim_vary_array, ctypes.byref(varNum))
self._name = enc_name
self.cdf_file.add_to_cache(enc_name, varNum.value)
def _delete(self):
"""Removes this zVariable from the CDF
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
self._call(const.DELETE_, const.zVAR_)
self.cdf_file.clear_from_cache(self._name)
self._name = None
def _get(self, var_name):
"""Gets an existing zVariable
@param var_name: name of this variable
@type var_name: string
@return: variable with this name
@rtype: :py:class:`pycdf.Var`
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
@note: Not intended to be used directly; use L{CDF.__getitem__}.
"""
if isinstance(var_name, str_classes):
try:
enc_name = var_name.encode('ascii').rstrip()
except AttributeError:
enc_name = var_name.rstrip() #already in ASCII
#'touch' CDF to cause an error if the name isn't there; get number
varNum = ctypes.c_long(0)
self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum))
self._name = enc_name
self.cdf_file.add_to_cache(enc_name, varNum.value)
else: #Looking up by number
name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1)
self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name),
const.GET_, const.zVAR_NAME_, name)
self._name = name.value.rstrip()
self.cdf_file.add_to_cache(self._name, var_name)
def _num(self):
"""Returns the zVar number for this variable
@return: number of this zVar
@rtype: int
"""
return self.cdf_file.var_num(self._name)
def __len__(self):
"""Get number of records for this variable in this file
@return: Number of records
@rtype: long
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
count = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count))
return (count.value + 1)
def __repr__(self):
"""Returns representation of the variable
Cannot return anything that can be eval'd to create a copy,
so just wrap the informal representation in angle brackets.
@return: info on this zVar
@rtype: str
"""
return '<Var:\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the variable
This is an 'informal' representation in that it cannot be evaluated
directly to create a :py:class:`pycdf.Var`.
@return: info on this zVar, CDFTYPE [dimensions] NRV
(if not record-varying)
@rtype: str
"""
if self.cdf_file._opened:
cdftype = self.type()
chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value)
rv = self.rv()
typestr = lib.cdftypenames[cdftype] + \
('*' + str(self._nelems()) if cdftype in chartypes else '' )
if rv:
sizestr = str([len(self)] + self._dim_sizes())
else:
sizestr = str(self._dim_sizes())
return typestr + ' ' + sizestr + ('' if rv else ' NRV')
else:
if isinstance(self._name, str):
return 'zVar "{0}" in closed CDF {1}'.format(
self._name, self.cdf_file.pathname)
else:
return 'zVar "{0}" in closed CDF {1}'.format(
self._name.decode('ascii'),
self.cdf_file.pathname.decode('ascii'))
def _n_dims(self):
"""Get number of dimensions for this variable
@return: the number of dimensions
@rtype: long
"""
n_dims = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims))
return n_dims.value
def _dim_sizes(self):
"""Get the dimension sizes for this variable
@return: sequence of sizes
@rtype: sequence of long
@note: This will always be in Python order (i.e. row major, last index
iterates most quickly), *regardless* of the majority of the CDF.
"""
sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0)
self._call(const.GET_, const.zVAR_DIMSIZES_, sizes)
sizes = sizes[0:self._n_dims()]
return sizes
def rv(self, new_rv=None):
"""
Gets or sets whether this variable has record variance
If the variance is unknown, True is assumed
(this replicates the apparent behavior of the CDF library on
variable creation).
Other Parameters
================
new_rv : boolean
True to change to record variance, False to change to NRV,
unspecified to simply check variance.
Returns
=======
out : Boolean
True if record varying, False if NRV
"""
if new_rv != None:
self._call(const.PUT_, const.zVAR_RECVARY_,
const.VARY if new_rv else const.NOVARY)
vary = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary))
return vary.value != const.NOVARY.value
def dv(self, new_dv=None):
"""
Gets or sets dimension variance of each dimension of variable.
If the variance is unknown, True is assumed
(this replicates the apparent behavior of the
CDF library on variable creation).
Parameters
==========
new_dv : list of boolean
Each element True to change that dimension to dimension
variance, False to change to not dimension variance.
(Unspecified to simply check variance.)
Returns
=======
out : list of boolean
True if that dimension has variance, else false.
"""
ndims = self._n_dims()
if new_dv != None:
if len(new_dv) != ndims:
raise ValueError('Must specify variance for ' +
str(ndims) + 'dimensions.')
varies = (ctypes.c_long * ndims)(
*[const.VARY if dv else const.NOVARY for dv in new_dv])
self._call(const.PUT_, const.zVAR_DIMVARYS_,
varies)
if ndims == 0:
return []
varies = (ctypes.c_long * const.CDF_MAX_DIMS)()
self._call(const.GET_, const.zVAR_DIMVARYS_, varies)
return [dv != const.NOVARY.value for dv in varies[0:ndims]]
def _call(self, *args, **kwargs):
"""Select this CDF and variable and call the CDF internal interface
Adds call to select this CDF to L{args} and passes all parameters
directly through to the CDFlib routine of the CDF library's C internal
interface. Checks the return value with L{Library.check_status}.
@param args: Passed directly to the CDF library interface. Useful
constants are defined in the :py:mod:`pycdf.const` module of this package.
@type args: various, see :py:mod:`ctypes`.
@return: CDF status from the library
@rtype: ctypes.c_long
@note: Terminal NULL_ is automatically added to L{args}.
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
return self.cdf_file._call(
const.SELECT_, const.zVAR_,
ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs)
def _np_type(self):
"""Returns the numpy type of this variable
This is the numpy type that will come directly out of the CDF;
see :meth:`dtype` for the representation post-conversion.
Raises
======
CDFError : for library-reported error or failure to find numpy type
Returns
=======
out : dtype
numpy dtype that will hold value from this variable
"""
cdftype = self.type()
if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value:
return numpy.dtype('S' + str(self._nelems()))
try:
return lib.numpytypedict[cdftype]
except KeyError:
raise CDFError(const.BAD_DATA_TYPE)
def type(self, new_type=None):
"""
Returns or sets the CDF type of this variable
Parameters
==========
new_type : ctypes.c_long
the new type from :mod:`~pycdf.const`
Returns
=======
out : int
CDF type
"""
if new_type != None:
if not hasattr(new_type, 'value'):
new_type = ctypes.c_long(new_type)
n_elements = ctypes.c_long(self._nelems())
self._call(const.PUT_, const.zVAR_DATASPEC_,
new_type, n_elements)
self._type = None
if self._type is None:
cdftype = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_DATATYPE_,
ctypes.byref(cdftype))
self._type = cdftype.value
return self._type
def _nelems(self):
"""Number of elements for each value in this variable
This is the length of strings for CHAR and UCHAR,
should be 1 otherwise.
@return: length of strings
@rtype: int
"""
nelems = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems))
return nelems.value
def name(self):
"""
Returns the name of this variable
Returns
=======
out : str
variable's name
"""
if isinstance(self._name, str):
return self._name
elif isinstance(self._name, bytes):
return self._name.decode()
def compress(self, comptype=None, param=None):
"""
Set or check the compression of this variable
Compression may not be changeable on variables with data already
written; even deleting the data may not permit the change.
See section 2.6 of the CDF user's guide for more information on
compression.
Other Parameters
================
comptype : ctypes.c_long
type of compression to change to, see CDF C reference
manual section 4.10. Constants for this parameter
are in :mod:`~pycdf.const`. If not specified, will not
change compression.
param : ctypes.c_long
Compression parameter, see CDF CRM 4.10 and
:mod:`~pycdf.const`.
If not specified, will choose reasonable default (5 for
gzip; other types have only one possible parameter.)
Returns
=======
out : tuple
the (comptype, param) currently in effect
"""
return _compress(self, comptype, param)
def copy(self):
"""
Copies all data and attributes from this variable
Returns
=======
out : :class:`VarCopy`
list of all data in record order
"""
return VarCopy(self)
def rename(self, new_name):
"""
Renames this variable
Parameters
==========
new_name : str
the new name for this variable
"""
try:
enc_name = new_name.encode('ascii')
except AttributeError:
enc_name = new_name
if len(enc_name) > const.CDF_VAR_NAME_LEN256:
raise CDFError(const.BAD_VAR_NAME)
self._call(const.PUT_, const.zVAR_NAME_, enc_name)
self.cdf_file.add_to_cache(
enc_name,
self.cdf_file.var_num(self._name)) #Still in cache
del self.cdf_file._var_nums[self._name]
self._name = enc_name
@property
def shape(self):
"""
Provides the numpy array-like shape of this variable.
Returns a tuple; first element is number of records (RV variable
only) And the rest provide the dimensionality of the variable.
.. note::
Assigning to this attribute will not change the shape.
"""
if self.rv():
return tuple([len(self)] + self._dim_sizes())
else:
return tuple(self._dim_sizes())
@property
def dtype(self):
"""
Provide the numpy dtype equivalent to the CDF type of this variable.
Data from this variable will be returned in numpy arrays of this type.
See Also
--------
type
"""
cdftype = self.type()
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \
str is not bytes and not self._raw:
return numpy.dtype('U' + str(self._nelems()))
if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value,
const.CDF_TIME_TT2000.value) and not self._raw:
return numpy.dtype('O')
return self._np_type()
def _get_attrs(self):
"""Get attribute list
Provide access to the zVar's attribute list without holding a
strong reference, as the attribute list has a (strong)
back-reference to its parent.
Either deref a weak reference (to try and keep the object the same),
or make a new AttrList instance and assign it to the weak reference
for next time.
"""
al = self._attrlistref()
if al is None:
al = zAttrList(self)
self._attrlistref = weakref.ref(al)
return al
def _set_attrs(self, value):
"""Assign to the attribute list
Clears all elements of the attribute list and copies from value
"""
self.attrs.clone(value)
attrs = property(
_get_attrs, _set_attrs, None,
"""zAttributes for this zVariable in a dict-like format.
See :class:`zAttrList` for details.
""")
class _Hyperslice(object):
"""Represents a CDF 'slice' used for the hyper CDF functions
For internal module use only.
@ivar dims: number of dimensions to this slice, usually
number of dimensions to the variable plus one
for the record, which represents the 0th
(least rapidly varying) dimension.
@type dims: int
@ivar dimsizes: size of each dimension (0th is number of records)
@type dimsizes: list of int
@ivar starts: index of the start value for each dimension
('dimension indices' in CDF speak)
@type starts: list of int
@ivar counts: number of values to get from each dimension.
Final result will be the product of everything
in counts.
('dimension counts' in CDF speak)
@type counts: numpy.array
@ivar intervals: interval between successive indices
to use for each dimension.
('dimension invervals' in CDF speak)
@type intervals: list of int
@ivar degen: is this dimension degenerate, i.e. should be
removed in the returned dataset. A 3D array
with one dimension degenerate will be returned
as a 2D array (i.e. list-of-lists.)
@type degen: numpy.array
@ivar rev: should this dimension be returned in reverse order?
@type rev: numpy.array
@ivar column: is this slice in column-major mode (if false, row-major)
@type column: boolean
@ivar zvar: what CDF variable this object slices on
@type zvar: :py:class:`pycdf.Var`
@ivar expanded_key: fully-expanded version of the key passed to the
constructor (all dimensions filled in)
@type expanded_key: tuple
@note: All dimension-related variables are stored row-major
(Python order)
"""
def __init__(self, zvar, key):
"""Create a Hyperslice
@param zvar: zVariable that this slices
@type zvar: :py:class:`pycdf.Var`
@param key: Python multi-dimensional slice as passed to
__getitem__
@type key: tuple of slice and/or int
@raise IndexError: if slice is out of range, mismatches dimensions, or
otherwise unparsable.
@raise ValueError: if slice has invalid values
"""
self.zvar = zvar
self.rv = self.zvar.rv()
#dim of records, + 1 record dim (NRV always is record 0)
self.dims = zvar._n_dims() + 1
self.dimsizes = [len(zvar)] + \
zvar._dim_sizes()
self.starts = [0] * self.dims
self.counts = numpy.empty((self.dims,), dtype=numpy.int32)
self.counts.fill(1)
self.intervals = [1] * self.dims
self.degen = numpy.zeros(self.dims, dtype=numpy.bool)
self.rev = numpy.zeros(self.dims, dtype=numpy.bool)
#key is:
#1. a single value (integer or slice object) if called 1D
#2. a tuple (of integers and/or slice objects) if called nD
#3. Each item is either a single value (degenerate dim)
# or a slice object.
if not hasattr(key, '__len__'): #Not a container object, pack in tuple
key = (key, )
if not self.rv:
key = (0, ) + key #NRV, so always get 0th record (degenerate)
key = self.expand_ellipsis(key, self.dims)
if self.rv: #special-cases for RV variables
if len(key) == 1: #get all data for this record(s)
key = self.expand_ellipsis(key + (Ellipsis, ), self.dims)
elif len(key) == self.dims - 1: #get same slice from each record
key = (slice(None, None, None), ) + key
if len(key) == self.dims:
self.expanded_key = key
for i in range(self.dims):
idx = key[i]
if hasattr(idx, 'start'): #slice
(self.starts[i], self.counts[i],
self.intervals[i], self.rev[i]) = \
self.convert_range(idx.start, idx.stop,
idx.step, self.dimsizes[i])
else: #Single degenerate value
if idx < 0:
idx += self.dimsizes[i]
if idx != 0 and (idx >= self.dimsizes[i] or idx < 0):
raise IndexError('list index out of range')
self.starts[i] = idx
self.degen[i] = True
else:
raise IndexError('Slice does not match dimensions for zVar ' +
str(zvar._name))
self.column = zvar.cdf_file.col_major()
def expected_dims(self, data=None):
"""Calculate size of non-degenerate dimensions
Figures out size, in each dimension, of expected input data
@return: size of each dimension for this slice, excluding degenerate
@rtype: list of int
"""
return [self.counts[i] for i in range(self.dims) if not self.degen[i]]
def expand(self, data):
"""Expands the record dimension of this slice to hold a set of data
If the length of data (outermost dimension) is larger than the record
count (counts[0]) for this slice, expand the slice to hold all the data.
This requires that the record dimension of the slice not be degenerate,
and also that it not have been completely specified when the hyperslice
was created (i.e. record dimension either ellipsis or no specified
stop.)
Does *not* expand any other dimension, since that's Very Hard in CDF.
@param data: the data which are intended to be stored in this slice
@type data: list
"""
rec_slice = self.expanded_key[0]
if not self.rv or isinstance(data, str_classes) or self.degen[0] or \
not hasattr(rec_slice, 'stop'):
return
if len(data) < self.counts[0]: #Truncate to fit data
if rec_slice.stop is None and rec_slice.step in (None, 1):
self.counts[0] = len(data)
elif len(data) > self.counts[0]: #Expand to fit data
if rec_slice.step in (None, 1):
self.counts[0] = len(data)
def create_array(self):
"""Creates a numpy array to hold the data from this slice
Returns
=======
out : numpy.array
array sized, typed, and dimensioned to hold data from
this slice
"""
counts = self.counts
degen = self.degen
if self.column:
counts = self.reorder(counts)
degen = self.reorder(degen)
#TODO: Forcing C order for now, revert to using self.column later
array = numpy.empty(
[counts[i] for i in range(len(counts)) if not degen[i]],
self.zvar._np_type(), order='C')
return numpy.require(array, requirements=('C', 'A', 'W'))
def convert_input_array(self, buffer):
"""Converts a buffer of raw data from this slice
EPOCH(16) variables always need to be converted.
CHAR need converted to Unicode if py3k
Parameters
==========
buffer : numpy.array
data as read from the CDF file
Returns
=======
out : numpy.array
converted data
"""
result = self._flip_array(buffer)
#Convert to derived types
cdftype = self.zvar.type()
if not self.zvar._raw:
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \
str != bytes:
dt = numpy.dtype('U{0}'.format(result.dtype.itemsize))
result = numpy.require(numpy.char.array(result).decode(),
dtype=dt)
elif cdftype == const.CDF_EPOCH.value:
result = lib.v_epoch_to_datetime(result)
elif cdftype == const.CDF_EPOCH16.value:
result = lib.v_epoch16_to_datetime(result)
elif cdftype == const.CDF_TIME_TT2000.value:
result = lib.v_tt2000_to_datetime(result)
return result
def convert_output_array(self, buffer):
"""Convert a buffer of data that will go into this slice
Parameters
==========
buffer : numpy.array
data to go into the CDF file
Returns
=======
out : numpy.array
input with majority flipped and dimensions reversed to be
suitable to pass directly to CDF library.
"""
buffer = self._flip_array(buffer)
return numpy.require(buffer, requirements=('C', 'A', 'W'))
def _flip_array(self, data):
"""
Operations for majority, etc. common between convert_input and _output
"""
cdftype = self.zvar.type()
#Flip majority if any non-degenerate dimensions exist
if self.column and not min(self.degen):
#Record-number dim degen, swap whole thing
if self.degen[0]:
if cdftype == const.CDF_EPOCH16.value:
#Maintain last dimension
data = data.transpose(
list(range(len(data.shape) - 2, 0, -1)) +
[len(data.shape) - 1]
)
else:
data = data.transpose()
#Record-number dimension is not degenerate, so keep it first
else:
if cdftype == const.CDF_EPOCH16.value:
data = data.transpose(
[0] + list(range(len(data.shape) - 2, 0, -1)) +
[len(data.shape) - 1]
)
else:
data = data.transpose(
[0] + list(range(len(data.shape) - 1, 0, -1)))
#Reverse non-degenerate dimensions in rev
#Remember that the degenerate indices are already gone!
if self.rev.any():
sliced = [(slice(None, None, -1) if self.rev[i] else slice(None))
for i in range(self.dims) if not self.degen[i]]
if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim
sliced.extend(slice(None))
data = operator.getitem(data, tuple(sliced))
return data
def select(self):
"""Selects this hyperslice in the CDF
Calls the CDF library to select the CDF, variable, records, and
array elements corresponding to this slice.
"""
args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]),
const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]),
const.SELECT_, const.zVAR_RECINTERVAL_,
ctypes.c_long(self.intervals[0]))
if self.dims > 1:
dims = self.dims - 1
args += (const.SELECT_, const.zVAR_DIMINDICES_,
(ctypes.c_long * dims)(*self.starts[1:]),
const.SELECT_, const.zVAR_DIMCOUNTS_,
(ctypes.c_long * dims)(*self.counts[1:]),
const.SELECT_, const.zVAR_DIMINTERVALS_,
(ctypes.c_long * dims)(*self.intervals[1:]))
self.zvar._call(*args)
@staticmethod
def expand_ellipsis(slices, n_dims):
"""Expands any ellipses into correct number of full-size slices
@param slices: tuple of slices, integers, or ellipse objects
@type slices: tuple
@param n_dims: number of dimensions this slice is over
@type n_dims: int
@return: L{slices} with ellipses replaced by appropriate number of
full-dimension slices
@rtype: tuple
@raise IndexError: if ellipses specified when already have enough
dimensions
"""
if slices is Ellipsis:
return tuple([slice(None, None, None)
for i in range(n_dims)])
#Elements might be numpy arrays, so can't use in/index
idx = [i for i, v in enumerate(slices) if v is Ellipsis]
if not idx: #no ellipsis
return slices
if len(idx) > 1: #multiples!
raise IndexError('Ellipses can only be used once per slice.')
idx = idx[0]
#how many dims to expand ellipsis to
#remember the ellipsis is in len(slices) and must be replaced!
extra = n_dims - len(slices) + 1
if extra < 0:
raise IndexError('too many indices')
result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:]
return result
@staticmethod
def check_well_formed(data):
"""Checks if input data is well-formed, regular array"""
d = numpy.asanyarray(data)
if d.dtype == numpy.object: #this is probably going to be bad
try:
len(d.flat[0])
except TypeError: #at least it's not a list
pass
else:
raise ValueError(
'Data must be well-formed, regular array of number, '
'string, or datetime')
@staticmethod
def dimensions(data):
"""Finds the dimensions of a nested list-of-lists
@param data: data of which dimensions are desired
@type data: list (of lists)
@return: dimensions of L{data}, in order outside-in
@rtype: list of int
@raise ValueError: if L{data} has irregular dimensions
"""
d = numpy.asanyarray(data)
_Hyperslice.check_well_formed(d)
return d.shape
@staticmethod
def types(data, backward=False):
"""Find dimensions and valid types of a nested list-of-lists
Any given data may be representable by a range of CDF types; infer
the CDF types which can represent this data. This breaks down to:
1. Proper kind (numerical, string, time)
2. Proper range (stores highest and lowest number)
3. Sufficient resolution (EPOCH16 required if datetime has
microseconds or below.)
If more than one value satisfies the requirements, types are returned
in preferred order:
1. Type that matches precision of data first, then
2. integer type before float type, then
3. Smallest type first, then
4. signed type first, then
5. specifically-named (CDF_BYTE) vs. generically named (CDF_INT1)
So for example, EPOCH_16 is preferred over EPOCH if L{data} specifies
below the millisecond level (rule 1), but otherwise EPOCH is preferred
(rule 2).
For floats, four-byte is preferred unless eight-byte is required:
1. absolute values between 0 and 3e-39
2. absolute values greater than 1.7e38
This will switch to an eight-byte double in some cases where four bytes
would be sufficient for IEEE 754 encoding, but where DEC formats would
require eight.
@param data: data for which dimensions and CDF types are desired
@type data: list (of lists)
@param backward: limit to pre-CDF3 types
@type backward: bool
@return: dimensions of L{data}, in order outside-in;
CDF types which can represent this data;
number of elements required (i.e. length of longest string)
@rtype: 3-tuple of lists ([int], [ctypes.c_long], [int])
@raise ValueError: if L{data} has irregular dimensions
"""
d = numpy.asanyarray(data)
dims = d.shape
elements = 1
types = []
_Hyperslice.check_well_formed(d)
if d.dtype.kind in ('S', 'U'): #it's a string
types = [const.CDF_CHAR, const.CDF_UCHAR]
elements = d.dtype.itemsize
if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per
elements //= 4
elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'):
if max((dt.microsecond % 1000 for dt in d.flat)) > 0:
types = [const.CDF_EPOCH16, const.CDF_EPOCH,
const.CDF_TIME_TT2000]
else:
types = [const.CDF_EPOCH, const.CDF_EPOCH16,
const.CDF_TIME_TT2000]
if backward:
del types[types.index(const.CDF_EPOCH16)]
del types[-1]
elif not lib.supports_int8:
del types[-1]
elif d is data or isinstance(data, numpy.generic):
#numpy array came in, use its type (or byte-swapped)
types = [k for k in lib.numpytypedict
if (lib.numpytypedict[k] == d.dtype
or lib.numpytypedict[k] == d.dtype.newbyteorder())
and not k in lib.timetypes]
if (not lib.supports_int8 or backward) \
and const.CDF_INT8.value in types:
del types[types.index(const.CDF_INT8.value)]
#Maintain priority to match the ordered lists below:
#float/double (44, 45) before real (21/22), and
#byte (41) before int (1) before char (51). So hack.
#Consider making typedict an ordered dict once 2.6 is dead.
types.sort(key=lambda x: x % 50, reverse=True)
if not types: #not a numpy array, or can't parse its type
if d.dtype.kind == 'O': #Object. Try to make it numeric
#Can't do safe casting from Object, so try and compare
#Basically try most restrictive to least restrictive
trytypes = (numpy.uint64, numpy.int64, numpy.float64)
for t in trytypes:
try:
newd = d.astype(dtype=t)
except: #Failure to cast, try next type
continue
if (newd == d).all(): #Values preserved, use this type
d = newd
#Continue with normal guessing, as if a list
break
else:
#fell through without a match
raise ValueError(
'Cannot convert generic objects to CDF type.')
if d.dtype.kind in ('i', 'u'): #integer
minval = numpy.min(d)
maxval = numpy.max(d)
if minval < 0:
types = [const.CDF_BYTE, const.CDF_INT1,
const.CDF_INT2, const.CDF_INT4, const.CDF_INT8,
const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63,
1.7e38, 1.7e38, 8e307, 8e307]
else:
types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1,
const.CDF_INT2, const.CDF_UINT2,
const.CDF_INT4, const.CDF_UINT4,
const.CDF_INT8,
const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
cutoffs = [2 ** 7, 2 ** 7, 2 ** 8,
2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63,
1.7e38, 1.7e38, 8e307, 8e307]
types = [t for (t, c) in zip(types, cutoffs) if c > maxval
and (minval >= 0 or minval >= -c)]
if (not lib.supports_int8 or backward) \
and const.CDF_INT8 in types:
del types[types.index(const.CDF_INT8)]
else: #float
if dims is ():
if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39):
types = [const.CDF_DOUBLE, const.CDF_REAL8]
else:
types = [const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
else:
absolutes = numpy.abs(d[d != 0])
if len(absolutes) > 0 and \
(numpy.max(absolutes) > 1.7e38 or
numpy.min(absolutes) < 3e-39):
types = [const.CDF_DOUBLE, const.CDF_REAL8]
else:
types = [const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
types = [t.value if hasattr(t, 'value') else t for t in types]
return (dims, types, elements)
@staticmethod
def reorder(seq):
"""Reorders seq to switch array majority
Used to take an array of subscripts between row
and column majority. First element is not touched,
being the record number.
@param seq: a sequence of *subscripts*
@type seq: sequence of integers
@return: seq with all but element 0 reversed in order
@rtype: sequence of integers
"""
return numpy.concatenate((seq[0:1],
numpy.flipud(seq)[:-1]))
@staticmethod
def convert_range(start, stop, step, size):
"""Converts a start/stop/step range to start/count/interval
(i.e. changes from Python-style slice to CDF-style)
@param start: index to start a slice at, may be none or negative
@type start: int
@param stop: index at end of slice (one-past, standard Python),
may be none or negative
@type stop: int
@param step: interval for stepping through stlice
@type step: int
@param size: size of list to slice
@type size: int
@return: (start, count, interval, rev) where:
1. start is the start index, normalized to be within
the size of the list and negatives handled
2. count is the number of records in the slice,
guaranteed to stop before the end
3. interval is the skip between records
4. rev indicates whether the sequence should be reversed
@rtype: (int, int, int, boolean)
"""
(start, stop, step) = slice(start, stop, step).indices(size)
if step < 0:
step *= -1
count = int((start - stop + step - 1) / step)
start = start - (count - 1) * step
rev = True
else:
count = int((stop - start + step - 1) / step)
rev = False
if count < 0:
count = 0
start = 0
return (start, count, step, rev)
class Attr(MutableSequence):
"""An attribute, g or z, for a CDF
.. warning::
This class should not be used directly, but only in its
subclasses, :class:`gAttr` and :class:`zAttr`. The methods
listed here are safe to use in the subclasses.
Represents a CDF attribute, providing access to the Entries in a format
that looks like a Python
list. General list information is available in the python docs:
`1 <http://docs.python.org/tutorial/introduction.html#lists>`_,
`2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_,
`3 <http://docs.python.org/library/stdtypes.html#typesseq>`_.
An introduction to CDF attributes can be found in section 2.4 of
the CDF user's guide.
Each element of the list is a single Entry of the appropriate type.
The index to the elements is the Entry number.
Multi-dimensional slicing is *not* supported; an Entry with multiple
elements will have all elements returned (and can thus be sliced itself).
Example:
>>> first_three = attribute[5, 0:3] #will fail
>>> first_three = attribute[5][0:3] #first three elements of 5th Entry
.. autosummary::
~Attr.append
~Attr.has_entry
~Attr.insert
~Attr.max_idx
~Attr.new
~Attr.number
~Attr.rename
~Attr.type
.. automethod:: append
.. automethod:: has_entry
.. automethod:: insert
.. automethod:: max_idx
.. automethod:: new
.. automethod:: number
.. automethod:: rename
.. automethod:: type
"""
def __init__(self, cdf_file, attr_name, create=False):
"""Initialize this attribute
@param cdf_file: CDF file containing this attribute
@type cdf_file: :py:class:`pycdf.CDF`
@param attr_name: Name of this attribute
@type attr_name: str
@param create: True to create attribute, False to look up existing.
@type create: bool
"""
self._cdf_file = cdf_file
self._raw = False
if isinstance(attr_name, str_classes):
try:
self._name = attr_name.encode('ascii')
except AttributeError:
self._name = attr_name
attrno = ctypes.c_long()
if create:
self._cdf_file._call(const.CREATE_, const.ATTR_,
self._name, self.SCOPE,
ctypes.byref(attrno))
self._cdf_file.add_attr_to_cache(
self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE)
else: #Ensure exists, and populate cache. See scope note below
attrno, scope = self._cdf_file.attr_num(self._name)
else:
name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1)
scope = ctypes.c_long(0)
self._cdf_file._call(const.SELECT_, const.ATTR_,
ctypes.c_long(attr_name))
#Because it's possible to create a gAttr Python objecting
#referencing an Attribute with variable scope, and vice-versa,
#do NOT assume the scope matches
#(Higher level code checks for that being a bad thing.)
self._cdf_file._call(
const.GET_, const.ATTR_NAME_, name,
const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope))
self._name = name.value.rstrip()
if scope.value == const.GLOBAL_SCOPE.value:
scope = True
elif scope.value == const.VARIABLE_SCOPE.value:
scope = False
else:
raise CDFError(const.BAD_SCOPE)
self._cdf_file.add_attr_to_cache(self._name, attr_name, scope)
def __getitem__(self, key):
"""Return a slice of Entries.
Because Attributes may be sparse, a multi-element slice will return
None for those elements which do not have associated Entries.
@param key: index or range of Entry number to return
@type key: slice or int
@return: a list of entries, appropriate type.
@raise IndexError: if L{key} is an int and that Entry number does not
exist.
"""
if key is Ellipsis:
key = slice(None, None, None)
if hasattr(key, 'indices'):
idx = range(*key.indices(self.max_idx() + 1))
return [self._get_entry(i) if self.has_entry(i) else None
for i in idx]
else:
if self.has_entry(key):
return self._get_entry(key)
else:
raise IndexError('list index ' + str(key) + ' out of range.')
def _check_other_entries(self, types):
"""Try to get the type of this entry from others in the Attribute
For zAttrs, checks if all other Entries are the same type, and at
least one doesn't match its zVar, i.e. Entry type dominates (otherwise
assumption is the Var type dominates).
For gAttrs, checks all other Entries, and gives priority to the
one that's earliest in the possible type list and exists in other
Entries.
This is only one component of Entry type guessing!
:param list types: CDF types that are candidates (match the data)
:return: The type discerned from other Entries, or None
"""
if self.ENTRY_ == const.zENTRY_:
#If everything else is the same entry type,
#and one is not the same as its var, probably
#all entries should be of that type
cand_et = None #The Entry type that might work
one_var_diff = False #One Var has a type different from Entry
for num in range(self.max_idx() + 1):
if not self.has_entry(num):
continue
vartype = self._cdf_file[num].type()
entrytype = self.type(num)
if vartype != entrytype:
one_var_diff = True
if cand_et is None:
if not entrytype in types:
return None #One var has Entry with "impossible" type
cand_et = entrytype
elif cand_et != entrytype:
return None #Two vars have Entries with different types
if one_var_diff and cand_et is not None:
return cand_et
else:
# Of those types which exist in other entries,
# find the one which is earliest
# in types, i.e. the preferred type
entrytypes = [self.type(num) for num in
range(self.max_idx() + 1)
if self.has_entry(num)]
entrytypes = [et for et in entrytypes if et in types]
if entrytypes:
return types[
min([types.index(et) for et in entrytypes])]
return None
def __setitem__(self, key, data):
"""Set a slice of Entries.
@param key: index or range of Entry numbers to set
@type key: slice or int
@param data: the data to set these entries to. Normally each entry should
be a sequence; if a scalar is provided, it is treated
as a single-element list.
@type data: scalar or list
@raise ValueError: if size of {data} does not match size of L{key}
@note: Attributes do not 'grow' or 'shrink' as entries are added
or removed. Indexes of entries never change and there is no
way to 'insert'.
"""
if key is Ellipsis:
key = slice(None, None, None)
if not hasattr(key, 'indices'):
#Single value, promote everything a dimension
idx = (key, key + 1, 1)
data = [data]
else:
idx = key.indices(self.max_idx() + 1)
if key.step is None or key.step > 0:
#Iterating forward, extend slice to match data
if len(data) > len(range(*idx)):
idx = (idx[0], idx[0] + idx[2] * len(data), idx[2])
#get, and check, types and sizes for all data
#checks first so don't have error after changing half the Entries
data_idx = -1
typelist = []
for i in range(*idx):
data_idx += 1
if data_idx >= len(data):
continue
datum = data[data_idx]
if datum is None:
typelist[i] = (None, None, None)
continue
(dims, types, elements) = _Hyperslice.types(
datum, backward=self._cdf_file.backward)
if len(types) <= 0:
raise ValueError('Cannot find a matching CDF type.')
if len(dims) > 1:
raise ValueError('Entries must be scalar or 1D.')
elif len(dims) == 1 and isinstance(datum[0], str_classes):
raise ValueError('Entry strings must be scalar.')
entry_type = None
if self.has_entry(i): #If the entry already exists, match its type
entry_type = self.type(i)
if not entry_type in types:
entry_type = None
if entry_type is None: #Check other entries for this attribute
entry_type = self._check_other_entries(types)
if entry_type is None and self.ENTRY_ == const.zENTRY_:
#Fall back to zVar type
vartype = self._cdf_file[i].type()
if vartype in types:
entry_type = vartype
else:
entry_type = types[0]
elif entry_type is None:
entry_type = types[0]
if not entry_type in lib.numpytypedict:
raise ValueError('Cannot find a matching numpy type.')
typelist.append((dims, entry_type, elements))
data_idx = -1
for i in range(*idx):
data_idx += 1
if data_idx >= len(data) or data[data_idx] is None:
if self.has_entry(i):
del self[i]
continue
datum = data[data_idx]
(dims, entry_type, elements) = typelist[data_idx]
self._write_entry(i, datum, entry_type, dims, elements)
def __delitem__(self, key):
"""Delete a slice of Entries.
@param key: index or range of Entry numbers to delete
@type key: slice or int
@note: Attributes do not 'grow' or 'shrink' as entries are added
or removed. Indexes of entries never change and there is no
way to 'insert'.
"""
if key is Ellipsis:
key = slice(None, None, None)
if not hasattr(key, 'indices'):
idx = (key, key + 1, 1)
else:
idx = key.indices(self.max_idx() + 1)
for i in range(*idx):
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i),
const.DELETE_, self.ENTRY_)
def __iter__(self, current=0):
"""Iterates over all entries in this Attribute
Returns data from one entry at a time until reaches the end.
@note: Returned in entry-number order.
"""
while current <= self.max_idx():
if self.has_entry(current):
value = yield(self._get_entry(current))
if value != None:
current = value
current += 1
def __reversed__(self, current=None):
"""Iterates over all entries in this Attribute
Returns data from one entry at a time, starting at end and going
to beginning.
@note: Returned in entry-number order.
"""
if current is None:
current = self.max_idx()
while current >= 0:
if self.has_entry(current):
value = yield(self._get_entry(current))
if value != None:
current = value
current -= 1
def __len__(self):
"""Number of Entries for this Attr. NOT same as max Entry number.
@return: Number of Entries
@rtype: int
"""
count = ctypes.c_long(0)
self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count))
return count.value
def __repr__(self):
"""Returns representation of an attribute
Cannot return anything that can be eval'd to create a copy of the
attribtute, so just wrap the informal representation in angle brackets.
@return: all the data in this attribute
@rtype: str
"""
return '<\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the attribute
This is an 'informal' representation in that it cannot be evaluated
directly to create an L{Attr}.
@return: all the data in this attribute
@rtype: str
"""
if self._cdf_file._opened:
return '\n'.join([str(item) for item in self])
else:
if isinstance(self._name, str):
return 'Attribute "{0}" in closed CDF {1}'.format(
self._name, self._cdf_file.pathname)
else:
return 'Attribute "{0}" in closed CDF {1}'.format(
self._name.decode('ascii'),
self._cdf_file.pathname.decode('ascii'))
def insert(self, index, data):
"""Insert an entry at a particular number
Inserts entry at particular number while moving all subsequent
entries to one entry number later. Does not close gaps.
Parameters
==========
index : int
index where to put the new entry
data :
data for the new entry
"""
max_entry = self.max_idx()
if index > max_entry: #Easy case
self[index] = data
return
for i in range(max_entry, index - 1, -1):
if self.has_entry(i+1):
self.__delitem__(i+1)
if self.has_entry(i):
self.new(self.__getitem__(i), type=self.type(i), number=i+1)
self[index] = data
def append(self, data):
"""Add an entry to end of attribute
Puts entry after last defined entry (does not fill gaps)
Parameters
==========
data :
data for the new entry
"""
self[self.max_idx() + 1] = data
def _call(self, *args, **kwargs):
"""Select this CDF and Attr and call the CDF internal interface
@param args: Passed directly to the CDF library interface.
@type args: various, see :py:mod:`ctypes`.
@return: CDF status from the library
@rtype: ctypes.c_long
@note: Terminal NULL_ is automatically added to L{args}.
@raise CDFError: if CDF library reports an error
@raise CDFWarning: if CDF library reports a warning and interpreter
is set to error on warnings.
"""
return self._cdf_file._call(
const.SELECT_, const.ATTR_,
ctypes.c_long(self._cdf_file.attr_num(self._name)[0]),
*args, **kwargs)
def _entry_len(self, number):
"""Number of elements in an Entry
@param number: number of Entry
@type number: int
@return: number of elements
@rtype: int
"""
if not self.has_entry(number):
raise IndexError('list index ' + str(number) + ' out of range.')
count = ctypes.c_long(0)
self._call(
const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count))
return count.value
def type(self, number, new_type=None):
"""Find or change the CDF type of a particular Entry number
Parameters
==========
number : int
number of Entry to check or change
Other Parameters
================
new_type
type to change this Entry to, from :mod:`~pycdf.const`.
Omit to only check type.
Returns
=======
out : int
CDF variable type, see :mod:`~pycdf.const`
Notes
=====
If changing types, old and new must be equivalent, see CDF
User's Guide section 2.5.5 pg. 57
"""
if new_type != None:
if not hasattr(new_type, 'value'):
new_type = ctypes.c_long(new_type)
size = ctypes.c_long(self._entry_len(number))
status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.PUT_, self.ENTRY_DATASPEC_, new_type, size,
ignore=(const.NO_SUCH_ENTRY,))
if status == const.NO_SUCH_ENTRY:
raise IndexError('list index ' + str(number) + ' out of range.')
cdftype = ctypes.c_long(0)
status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype),
ignore=(const.NO_SUCH_ENTRY,))
if status == const.NO_SUCH_ENTRY:
raise IndexError('list index ' + str(number) + ' out of range.')
return cdftype.value
def has_entry(self, number):
"""Check if this attribute has a particular Entry number
Parameters
==========
number : int
number of Entry to check or change
Returns
=======
out : bool
True if ``number`` is a valid entry number; False if not
"""
status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_,
ctypes.c_long(number),
ignore=(const.NO_SUCH_ENTRY, ))
return not status == const.NO_SUCH_ENTRY
def max_idx(self):
"""Maximum index of Entries for this Attr
Returns
=======
out : int
maximum Entry number
"""
count = ctypes.c_long(0)
self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count))
return count.value
def new(self, data, type=None, number=None):
"""Create a new Entry in this Attribute
.. note:: If ``number`` is provided and an Entry with that number
already exists, it will be overwritten.
Parameters
==========
data
data to put in the Entry
Other Parameters
================
type : int
type of the new Entry, from :mod:`~pycdf.const`
(otherwise guessed from ``data``)
number : int
Entry number to write, default is lowest available number.
"""
if number is None:
number = 0
while self.has_entry(number):
number += 1
(dims, types, elements) = _Hyperslice.types(
data, backward=self._cdf_file.backward)
if type is None:
#Guess based on other entries
type = self._check_other_entries(types)
if type is None and self.ENTRY_ == const.zENTRY_:
#Try to match variable type
vartype = self._cdf_file[number].type()
if vartype in types:
type = vartype
if type is None:
type = types[0]
elif hasattr(type, 'value'):
type = type.value
self._write_entry(number, data, type, dims, elements)
def number(self):
"""Find the attribute number for this attribute
Returns
=======
out : int
attribute number
"""
no = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.ATTR_NUMBER_,
self._name, ctypes.byref(no))
return no.value
def global_scope(self):
"""Determine scope of this attribute.
Returns
=======
out : bool
True if global (i.e. gAttr), False if zAttr
"""
return self._cdf_file.attr_num(self._name)[1]
def rename(self, new_name):
"""Rename this attribute
Renaming a zAttribute renames it for *all* zVariables in this CDF!
Parameters
==========
new_name : str
the new name of the attribute
"""
try:
enc_name = new_name.encode('ascii')
except AttributeError:
enc_name = new_name
if len(enc_name) > const.CDF_ATTR_NAME_LEN256:
raise CDFError(const.BAD_ATTR_NAME)
self._call(const.PUT_, const.ATTR_NAME_, enc_name)
self._cdf_file.add_attr_to_cache(
enc_name,
*self._cdf_file.attr_num(self._name)) #still in cache
del self._cdf_file._attr_info[self._name]
self._name = enc_name
def _get_entry(self, number):
"""Read an Entry associated with this L{Attr}
@param number: number of Entry to return
@type number: int
@return: data from entry numbered L{number}
@rtype: list or str
"""
if not self.has_entry(number):
raise IndexError('list index ' + str(number) + ' out of range.')
#Make a big enough buffer
length = self._entry_len(number)
cdftype = self.type(number)
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
buff = numpy.empty((), 'S{0}'.format(length), order='C')
else:
if not cdftype in lib.numpytypedict:
raise CDFError(const.BAD_DATA_TYPE)
buff = numpy.empty((length,), lib.numpytypedict[cdftype],
order='C')
buff = numpy.require(buff, requirements=('C', 'A', 'W'))
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_DATA_,
buff.ctypes.data_as(ctypes.c_void_p))
#decode
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
if str == bytes or self._raw: #Py2k, leave as bytes
result = bytes(buff)
else: #Py3k, make unicode
result = str(numpy.char.array(buff).decode())
else:
if not self._raw:
if cdftype == const.CDF_EPOCH.value:
result = lib.v_epoch_to_datetime(buff)
elif cdftype == const.CDF_EPOCH16.value:
result = lib.v_epoch16_to_datetime(buff)
elif cdftype == const.CDF_TIME_TT2000.value:
result = lib.v_tt2000_to_datetime(buff)
else:
result = buff
else:
result = buff
if length == 1:
result = result[0]
return result
def _write_entry(self, number, data, cdf_type, dims, elements):
"""Write an Entry to this Attr.
@param number: number of Entry to write
@type number: int
@param data: data to write
@param cdf_type: the CDF type to write, from :py:mod:`pycdf.const`
@param dims: dimensions of L{data}
@type dims: list
@param elements: number of elements in L{data}, 1 unless it is a string
@type elements: int
"""
if len(dims) == 0:
n_write = 1
else:
n_write = dims[0]
if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.dtype('S' + str(elements)))
n_write = elements
elif cdf_type == const.CDF_EPOCH16.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch16(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_EPOCH.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch(data),
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_TIME_TT2000.value:
if not self._raw:
try:
data = lib.v_datetime_to_tt2000(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.int64)
elif cdf_type in lib.numpytypedict:
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=lib.numpytypedict[cdf_type])
else:
raise CDFError(const.BAD_DATA_TYPE)
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type),
ctypes.c_long(n_write),
data.ctypes.data_as(ctypes.c_void_p))
def _delete(self):
"""Delete this Attribute
Also deletes all Entries associated with it.
"""
self._call(const.DELETE_, const.ATTR_)
self._cdf_file.clear_attr_from_cache(self._name)
self._name = None
class zAttr(Attr):
"""zAttribute for zVariables within a CDF.
.. warning::
Because zAttributes are shared across all variables in a CDF,
directly manipulating them may have unexpected consequences.
It is safest to operate on zEntries via :class:`zAttrList`.
.. note::
When accessing a zAttr, pyCDF exposes only the zEntry corresponding
to the associated zVariable.
See Also
========
:class:`Attr`
"""
ENTRY_ = const.zENTRY_
ENTRY_DATA_ = const.zENTRY_DATA_
SCOPE = const.VARIABLE_SCOPE
ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_
ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_
ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_
ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_
ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_
ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_
def insert(self, index, data):
"""Insert entry at particular index number
Since there can only be one zEntry per zAttr, this cannot be
implemented.
Raises
======
NotImplementedError : always
"""
raise NotImplementedError
def append(self, index, data):
"""Add entry to end of attribute list
Since there can only be one zEntry per zAttr, this cannot be
implemented.
Raises
======
NotImplementedError : always
"""
raise NotImplementedError
class gAttr(Attr):
"""Global Attribute for a CDF
Represents a CDF attribute, providing access to the gEntries in a format
that looks like a Python list. General list information is available in
the python docs:
`1 <http://docs.python.org/tutorial/introduction.html#lists>`_,
`2 <http://docs.python.org/tutorial/datastructures.html#more-on-lists>`_,
`3 <http://docs.python.org/library/stdtypes.html#typesseq>`_.
Normally accessed by providing a key to a :class:`gAttrList`:
>>> attribute = cdffile.attrs['attribute_name']
>>> first_gentry = attribute[0]
Each element of the list is a single gEntry of the appropriate type.
The index to the elements is the gEntry number.
A gEntry may be either a single string or a 1D array of numerical type.
Entries of numerical type (everything but CDF_CHAR and CDF_UCHAR)
with a single element are returned as scalars; multiple-element entries
are returned as a list. No provision is made for accessing below
the entry level; the whole list is returned at once (but Python's
slicing syntax can be used to extract individual items from that list.)
Multi-dimensional slicing is *not* supported; an entry with multiple
elements will have all elements returned (and can thus be sliced itself).
Example:
>>> first_three = attribute[5, 0:3] #will fail
>>> first_three = attribute[5][0:3] #first three elements of 5th Entry
gEntries are *not* necessarily contiguous; a gAttribute may have an
entry 0 and entry 2 without an entry 1. :meth:`~Attr.len` will return the
*number* of gEntries; use :meth:`~Attr.max_idx` to find the highest defined
gEntry number and :meth:`~Attr.has_entry` to determine if a particular
gEntry number exists. Iterating over all entries is also supported::
>>> entrylist = [entry for entry in attribute]
Deleting gEntries will leave a "hole":
>>> attribute[0:3] = [1, 2, 3]
>>> del attribute[1]
>>> attribute.has_entry(1)
False
>>> attribute.has_entry(2)
True
>>> print attribute[0:3]
[1, None, 3]
Multi-element slices over nonexistent gEntries will return ``None`` where
no entry exists. Single-element indices for nonexistent gEntries will
raise ``IndexError``. Assigning ``None`` to a gEntry will delete it.
When assigning to a gEntry, the type is chosen to match the data;
subject to that constraint, it will try to match
(in order):
#. existing gEntry of the same number in this gAttribute
#. other gEntries in this gAttribute
#. data-matching constraints described in :meth:`CDF.new`.
See Also
========
:class:`Attr`
"""
ENTRY_ = const.gENTRY_
ENTRY_DATA_ = const.gENTRY_DATA_
SCOPE = const.GLOBAL_SCOPE
ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_
ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_
ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_
ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_
ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_
ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_
class AttrList(MutableMapping):
"""Object representing a list of attributes.
.. warning::
This class should not be used directly, but only via its
subclasses, :class:`gAttrList` and :class:`zAttrList`.
Methods listed here are safe to use from the subclasses.
.. autosummary::
~AttrList.clone
~AttrList.copy
~AttrList.from_dict
~AttrList.new
~AttrList.rename
.. automethod:: clone
.. automethod:: copy
.. automethod:: from_dict
.. automethod:: new
.. automethod:: rename
"""
def __init__(self, cdf_file, special_entry=None):
"""Initialize the attribute collection
@param cdf_file: CDF these attributes are in
@type cdf_file: :py:class:`pycdf.CDF`
@param special_entry: callable which returns a "special" entry number,
used to limit results for zAttrs to those which match the zVar
(i.e. the var number)
@type special_entry: callable
"""
self._cdf_file = cdf_file
self.special_entry = special_entry
def __getitem__(self, name):
"""Find an Attribute by name
@param name: name of the Attribute to return
@type name: str
@return: attribute named L{name}
@rtype: L{Attr}
@raise KeyError: if there is no attribute named L{name}
@raise CDFError: other errors in CDF library
"""
try:
attrib = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status == const.NO_SUCH_ATTR:
raise KeyError(name + ': ' + str(v))
else:
raise
if attrib.global_scope() != self.global_scope:
raise KeyError(name + ': no ' + self.attr_name + ' by that name.')
return attrib
def __setitem__(self, name, data):
"""Create an Attribute or change its entries
@param name: name of Attribute to change
@type name: str
@param data: Entries to populate this Attribute with.
Any existing Entries will be deleted!
Another C{Attr} may be specified, in which
case all its entries are copied.
@type data: scalar, list, or L{Attr}
"""
if isinstance(data, AttrList):
if name in self:
del self[name]
attr = self._get_or_create(name)
for entryno in range(data.max_idx()):
if data.has_entry(entryno):
attr.new(data[entryno], data.type(entryno), entryno)
else:
attr = self._get_or_create(name)
if isinstance(data, str_classes):
data = [data]
else:
try:
junk = len(data)
except TypeError:
data = [data]
attr[:] = data
del attr[len(data):]
def __delitem__(self, name):
"""Delete an Attribute (and all its entries)
@param name: name of Attribute to delete
@type name: str
"""
try:
attr = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status == const.NO_SUCH_ATTR:
raise KeyError(name + ': ' + str(v))
else:
raise
if attr.global_scope() != self.global_scope:
raise KeyError(name + ': not ' + self.attr_name)
attr._delete()
def __iter__(self, current=0):
"""Iterates over all Attr in this CDF or variable
Returns name of one L{Attr} at a time until reaches the end.
@note: Returned in number order.
"""
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_,
ctypes.byref(count))
while current < count.value:
candidate = self.AttrType(self._cdf_file, current)
if candidate.global_scope() == self.global_scope:
if self.special_entry is None or \
candidate.has_entry(self.special_entry()):
if str == bytes:
value = yield(candidate._name)
else:
value = yield(candidate._name.decode())
if value != None:
current = self[value].number()
current += 1
def __repr__(self):
"""Returns representation of attribute list
Cannot return anything that can be eval'd to create a copy of the
list, so just wrap the informal representation in angle brackets.
@return: all the data in this list of attributes
@rtype: str
"""
return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>'
def __str__(self):
"""Returns a string representation of the attribute list
This is an 'informal' representation in that it cannot be evaluated
directly to create an L{AttrList}.
@return: all the data in this list of attributes
@rtype: str
"""
if self._cdf_file._opened:
return '\n'.join([key + ': ' + (
('\n' + ' ' * (len(key) + 2)).join(
[str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']'
for i in range(value.max_idx() + 1) if value.has_entry(i)])
if isinstance(value, Attr)
else str(value) +
' [' + lib.cdftypenames[self.type(key)] + ']'
)
for (key, value) in sorted(self.items())])
else:
if isinstance(self._cdf_file.pathname, str):
return 'Attribute list in closed CDF {0}'.format(
self._cdf_file.pathname)
else:
return 'Attribute list in closed CDF {0}'.format(
self._cdf_file.pathname.decode('ascii'))
def clone(self, master, name=None, new_name=None):
"""
Clones another attribute list, or one attribute from it, into this
list.
Parameters
==========
master : AttrList
the attribute list to copy from. This can be any dict-like object.
Other Parameters
================
name : str (optional)
name of attribute to clone (default: clone entire list)
new_name : str (optional)
name of the new attribute, default ``name``
"""
if name is None:
self._clone_list(master)
else:
self._clone_attr(master, name, new_name)
def copy(self):
"""
Create a copy of this attribute list
Returns
=======
out : dict
copy of the entries for all attributes in this list
"""
return dict((key, value[:] if isinstance(value, Attr) else value)
for (key, value) in self.items())
def new(self, name, data=None, type=None):
"""
Create a new Attr in this AttrList
Parameters
==========
name : str
name of the new Attribute
Other Parameters
================
data
data to put into the first entry in the new Attribute
type
CDF type of the first entry from :mod:`~pycdf.const`.
Only used if data are specified.
Raises
======
KeyError : if the name already exists in this list
"""
if name in self:
raise KeyError(name + ' already exists.')
#A zAttr without an Entry in this zVar will be a "get" not "create"
attr = self._get_or_create(name)
if data is not None:
if self.special_entry is None:
attr.new(data, type)
else:
attr.new(data, type, self.special_entry())
def rename(self, old_name, new_name):
"""
Rename an attribute in this list
Renaming a zAttribute renames it for *all* zVariables in this CDF!
Parameters
==========
old_name : str
the current name of the attribute
new_name : str
the new name of the attribute
"""
AttrList.__getitem__(self, old_name).rename(new_name)
def from_dict(self, in_dict):
"""
Fill this list of attributes from a dictionary
.. deprecated:: 0.1.5
Use :meth:`~pycdf.AttrList.clone` instead; it supports
cloning from dictionaries.
Parameters
==========
in_dict : dict
Attribute list is populated entirely from this dictionary;
all existing attributes are deleted.
"""
warnings.warn("from_dict is deprecated and will be removed. Use clone.",
DeprecationWarning)
for k in in_dict:
self[k] = in_dict[k]
for k in list(self):
if not k in in_dict:
del self[k]
def _clone_attr(self, master, name, new_name=None):
"""Clones a single attribute from one in this list or another
Copies data and types from the master attribute to the new one
@param master: attribute list to copy attribute from
@type master: L{AttrList}
@param name: name of attribute to copy
@type name: str
@param new_name: name of the new attribute, default L{name}
@type new_name: str
"""
if new_name is None:
new_name = name
self[new_name] = master[name]
def _clone_list(self, master):
"""Clones this attribute list from another
@param master: the attribute list to copy from
@type master: L{AttrList}
"""
for name in master:
self._clone_attr(master, name)
for name in list(self): #Can't iterate over a list we're changing
if not name in master:
del self[name]
def _get_or_create(self, name):
"""Retrieve L{Attr} or create it if it doesn't exist
@param name: name of the attribute to look up or create
@type name: str
@return: attribute with this name
@rtype: L{Attr}
"""
attr = None
try:
attr = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status != const.NO_SUCH_ATTR:
raise
if attr is None:
attr = self.AttrType(self._cdf_file, name, True)
elif attr.global_scope() != self.global_scope:
raise KeyError(name + ': not ' + self.attr_name)
return attr
class gAttrList(AttrList):
"""
Object representing *all* the gAttributes in a CDF.
Normally accessed as an attribute of an open :class:`CDF`:
>>> global_attribs = cdffile.attrs
Appears as a dictionary: keys are attribute names; each value is an
attribute represented by a :class:`gAttr` object. To access the global
attribute TEXT:
>>> text_attr = cdffile.attrs['TEXT']
See Also
========
:class:`AttrList`
"""
AttrType = gAttr
attr_name = 'gAttribute'
global_scope = True
def __len__(self):
"""
Number of gAttributes in this CDF
Returns
=======
out : int
number of gAttributes in the CDF
"""
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_,
ctypes.byref(count))
return count.value
class zAttrList(AttrList):
"""Object representing *all* the zAttributes in a zVariable.
Normally accessed as an attribute of a :class:`Var` in an open
CDF:
>>> epoch_attribs = cdffile['Epoch'].attrs
Appears as a dictionary: keys are attribute names, values are
the value of the zEntry associated with the appropriate zVariable.
Each vAttribute in a CDF may only have a *single* entry associated
with each variable. The entry may be a string, a single numerical value,
or a series of numerical values. Entries with multiple values are returned
as an entire list; direct access to the individual elements is not
possible.
Example: finding the first dependency of (ISTP-compliant) variable
``Flux``:
>>> print cdffile['Flux'].attrs['DEPEND_0']
zAttributes are shared among zVariables, one zEntry allowed per zVariable.
(pyCDF hides this detail.) Deleting the last zEntry for a zAttribute will
delete the underlying zAttribute.
zEntries are created and destroyed by the usual dict methods on the
zAttrlist:
>>> epoch_attribs['new_entry'] = [1, 2, 4] #assign a list to new zEntry
>>> del epoch_attribs['new_entry'] #delete the zEntry
The type of the zEntry is guessed from data provided. The type is chosen to
match the data; subject to that constraint, it will try to match
(in order):
#. existing zEntry corresponding to this zVar
#. other zEntries in this zAttribute
#. the type of this zVar
#. data-matching constraints described in :py:meth:`CDF.new`
See Also
========
:class:`AttrList`
"""
AttrType = zAttr
attr_name = 'zAttribute'
global_scope = False
def __init__(self, zvar):
"""Initialize the attribute collection
@param zvar: zVariable these attributes are in
@param zvar: :py:class:`pycdf.Var`
"""
super(zAttrList, self).__init__(zvar.cdf_file, zvar._num)
self._zvar = zvar
def __getitem__(self, name):
"""Find an zEntry by name
@param name: name of the zAttribute to return
@type name: str
@return: attribute named L{name}
@rtype: L{zAttr}
@raise KeyError: if there is no attribute named L{name} associated
with this zVariable
@raise CDFError: other errors in CDF library
"""
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if attrib.has_entry(zvar_num):
attrib._raw = self._zvar._raw
return attrib[zvar_num]
else:
raise KeyError(name + ': no such attribute for variable ' +
self._zvar.name())
def __delitem__(self, name):
"""Delete an zEntry by name
@param name: name of the zEntry to delete
@type name: str
@raise KeyError: if there is no attribute named L{name} associated
with this zVariable
@raise CDFError: other errors in CDF library
@note: If this is the only remaining entry, the Attribute will be
deleted.
"""
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if not attrib.has_entry(zvar_num):
raise KeyError(str(name) + ': no such attribute for variable ' +
str(self._zvar._name))
del attrib[zvar_num]
if len(attrib) == 0:
attrib._delete()
def __setitem__(self, name, data):
"""Sets a zEntry by name
The type of the zEntry is guessed from L{data}. The type is chosen to
match the data; subject to that constraint, it will try to match
(in order):
1. existing zEntry corresponding to this zVar
2. other zEntries in this zAttribute
3. the type of this zVar
4. data-matching constraints described in L{_Hyperslice.types}
@param name: name of zAttribute; zEntry for this zVariable will be set
in zAttribute by this name
@type name: str
@raise CDFError: errors in CDF library
@raise ValueError: if unable to find a valid CDF type matching L{data},
or if L{data} is the wrong dimensions.
"""
try:
attr = super(zAttrList, self).__getitem__(name)
except KeyError:
attr = zAttr(self._cdf_file, name, True)
attr._raw = self._zvar._raw
attr[self._zvar._num()] = data
def __len__(self):
"""Number of zAttributes in this variable
@return: number of zAttributes in the CDF
which have entries for this variable.
@rtype: int
"""
length = 0
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_,
ctypes.byref(count))
current = 0
while current < count.value:
candidate = zAttr(self._cdf_file, current)
if not candidate.global_scope():
if candidate.has_entry(self._zvar._num()):
length += 1
current += 1
return length
def type(self, name, new_type=None):
"""Find or change the CDF type of a zEntry in this zVar
@param name: name of the zAttr to check or change
@type name: str
@param new_type: type to change it to, see :py:mod:`pycdf.const`
@type new_type: ctypes.c_long
@return: CDF variable type, see :py:mod:`pycdf.const`
@rtype: int
@note: If changing types, old and new must be equivalent, see CDF
User's Guide section 2.5.5 pg. 57
"""
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if not attrib.has_entry(zvar_num):
raise KeyError(name + ': no such attribute for variable ' +
self._zvar.name())
return attrib.type(zvar_num, new_type)
def _clone_attr(self, master, name, new_name=None):
"""Clones a single attribute from one in this list or another
Copies data and types from the master attribute to the new one
@param master: attribute list to copy attribute from
@type master: L{zAttrList}
@param name: name of attribute to copy
@type name: str
@param new_name: name of the new attribute, default L{name}
@type new_name: str
"""
if new_name is None:
new_name = name
if new_name in self:
del self[new_name]
self.new(new_name, master[name],
master.type(name) if hasattr(master, 'type') else None)
| 37.378262 | 118 | 0.572838 |
__contact__ = 'Jon Niehof, Jonathan.Niehof@unh.edu'
try:
from collections.abc import MutableMapping, MutableSequence
except ImportError:
from collections import MutableMapping, MutableSequence
import ctypes
import ctypes.util
import datetime
import operator
import os
import os.path
import shutil
import sys
import tempfile
import warnings
import weakref
import numpy
import numpy.ma
#from . import const
try:
str_classes = (str, bytes, unicode)
except NameError:
str_classes = (str, bytes)
class Library(object):
def __init__(self, libpath=None, library=None):
if not 'CDF_TMP' in os.environ:
os.environ['CDF_TMP'] = tempfile.gettempdir()
if not library:
if not libpath:
self.libpath, self._library = self._find_lib()
if self._library is None:
raise Exception((
'Cannot load CDF C library; checked {0}. '
'Try \'os.environ["CDF_LIB"] = library_directory\' '
'before import.').format(', '.join(self.libpath)))
else:
self._library = ctypes.CDLL(libpath)
self.libpath = libpath
else:
self._library = library
self.libpath = libpath
self._library.CDFlib.restype = ctypes.c_long #commonly used, so set it up here
self._library.EPOCHbreakdown.restype = ctypes.c_long
self._library.computeEPOCH.restype = ctypes.c_double
self._library.computeEPOCH.argtypes = [ctypes.c_long] * 7
self._library.computeEPOCH16.restype = ctypes.c_double
self._library.computeEPOCH16.argtypes = [ctypes.c_long] * 10 + \
[ctypes.POINTER(ctypes.c_double * 2)]
if hasattr(self._library, 'CDFsetFileBackward'):
self._library.CDFsetFileBackward.restype = None
self._library.CDFsetFileBackward.argtypes = [ctypes.c_long]
#Map old name to the 3.7.1+ name
if not hasattr(self._library, 'computeTT2000') \
and hasattr(self._library, 'CDF_TT2000_from_UTC_parts'):
self._library.computeTT2000 \
= self._library.CDF_TT2000_from_UTC_parts
if hasattr(self._library, 'computeTT2000'):
self._library.computeTT2000.restype = ctypes.c_longlong
self._library.computeTT2000.argtypes = \
[ctypes.c_double] *9
#Map old name to the 3.7.1+ name
if not hasattr(self._library, 'breakdownTT2000') \
and hasattr(self._library, 'CDF_TT2000_to_UTC_parts'):
self._library.breakdownTT2000 \
= self._library.CDF_TT2000_to_UTC_parts
if hasattr(self._library, 'breakdownTT2000'):
self._library.breakdownTT2000.restype = None
self._library.breakdownTT2000.argtypes = \
[ctypes.c_longlong] + [ctypes.POINTER(ctypes.c_double)] * 9
if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH'):
self._library.CDF_TT2000_to_UTC_EPOCH.restype = ctypes.c_double
self._library.CDF_TT2000_to_UTC_EPOCH.argtypes = [ctypes.c_longlong]
if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH'):
self._library.CDF_TT2000_from_UTC_EPOCH.restype = ctypes.c_longlong
self._library.CDF_TT2000_from_UTC_EPOCH.argtypes = [ctypes.c_double]
if hasattr(self._library, 'CDF_TT2000_to_UTC_EPOCH16'):
self._library.CDF_TT2000_to_UTC_EPOCH16.restype = ctypes.c_double
self._library.CDF_TT2000_to_UTC_EPOCH16.argtypes = \
[ctypes.c_longlong, ctypes.POINTER(ctypes.c_double * 2)]
if hasattr(self._library, 'CDF_TT2000_from_UTC_EPOCH16'):
self._library.CDF_TT2000_from_UTC_EPOCH16.restype = \
ctypes.c_longlong
self._library.CDF_TT2000_from_UTC_EPOCH16.argtypes = \
[ctypes.POINTER(ctypes.c_double * 2)]
#Get CDF version information
ver = ctypes.c_long(0)
rel = ctypes.c_long(0)
inc = ctypes.c_long(0)
sub = ctypes.c_char(b' ')
self.call(const.GET_, const.LIB_VERSION_, ctypes.byref(ver),
const.GET_, const.LIB_RELEASE_, ctypes.byref(rel),
const.GET_, const.LIB_INCREMENT_, ctypes.byref(inc),
const.GET_, const.LIB_subINCREMENT_, ctypes.byref(sub))
ver = ver.value
rel = rel.value
inc = inc.value
sub = sub.value
self.version = (ver, rel, inc, sub)
self._del_middle_rec_bug = ver < 3 or (ver == 3 and
(rel < 4 or
(rel == 4 and inc < 1)))
self.supports_int8 = (ver > 3 or (ver == 3 and rel >=4))
self.cdftypenames = {const.CDF_BYTE.value: 'CDF_BYTE',
const.CDF_CHAR.value: 'CDF_CHAR',
const.CDF_INT1.value: 'CDF_INT1',
const.CDF_UCHAR.value: 'CDF_UCHAR',
const.CDF_UINT1.value: 'CDF_UINT1',
const.CDF_INT2.value: 'CDF_INT2',
const.CDF_UINT2.value: 'CDF_UINT2',
const.CDF_INT4.value: 'CDF_INT4',
const.CDF_UINT4.value: 'CDF_UINT4',
const.CDF_INT8.value: 'CDF_INT8',
const.CDF_FLOAT.value: 'CDF_FLOAT',
const.CDF_REAL4.value: 'CDF_REAL4',
const.CDF_DOUBLE.value: 'CDF_DOUBLE',
const.CDF_REAL8.value: 'CDF_REAL8',
const.CDF_EPOCH.value: 'CDF_EPOCH',
const.CDF_EPOCH16.value: 'CDF_EPOCH16',
const.CDF_TIME_TT2000.value: 'CDF_TIME_TT2000',
}
self.numpytypedict = {const.CDF_BYTE.value: numpy.int8,
const.CDF_CHAR.value: numpy.int8,
const.CDF_INT1.value: numpy.int8,
const.CDF_UCHAR.value: numpy.uint8,
const.CDF_UINT1.value: numpy.uint8,
const.CDF_INT2.value: numpy.int16,
const.CDF_UINT2.value: numpy.uint16,
const.CDF_INT4.value: numpy.int32,
const.CDF_UINT4.value: numpy.uint32,
const.CDF_INT8.value: numpy.int64,
const.CDF_FLOAT.value: numpy.float32,
const.CDF_REAL4.value: numpy.float32,
const.CDF_DOUBLE.value: numpy.float64,
const.CDF_REAL8.value: numpy.float64,
const.CDF_EPOCH.value: numpy.float64,
const.CDF_EPOCH16.value:
numpy.dtype((numpy.float64, 2)),
const.CDF_TIME_TT2000.value: numpy.int64,
}
self.timetypes = [const.CDF_EPOCH.value,
const.CDF_EPOCH16.value,
const.CDF_TIME_TT2000.value]
if not self.supports_int8:
del self.cdftypenames[const.CDF_INT8.value]
del self.numpytypedict[const.CDF_INT8.value]
del self.cdftypenames[const.CDF_TIME_TT2000.value]
del self.numpytypedict[const.CDF_TIME_TT2000.value]
elif sys.platform.startswith('linux') \
and os.uname()[4].startswith('arm') \
and hasattr(self._library, 'computeTT2000') \
and self._library.computeTT2000(
2010, 1, 1, 0, 0, 0, 0, 0, 0) != 315576066184000000:
#TT2000 call failed, so probably need to type-pun
#double arguments to variadic functions.
#Calling convention for non-variadic functions with floats
#is unique, but convention for ints is same as variadic.
#So type-pun arguments to integers to force that calling
#convention.
if ctypes.sizeof(ctypes.c_longlong) != \
ctypes.sizeof(ctypes.c_double):
warnings.warn('ARM with unknown type sizes; '
'TT2000 functions will not work.')
else:
self._library.computeTT2000.argtypes = \
[ctypes.c_longlong] * 9
c_ll_p = ctypes.POINTER(ctypes.c_longlong)
if self._library.computeTT2000(
ctypes.cast(ctypes.pointer(ctypes.c_double(
2010)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
1)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
1)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents) != 315576066184000000:
warnings.warn('ARM with unknown calling convention; '
'TT2000 functions will not work.')
self.datetime_to_tt2000 = self._datetime_to_tt2000_typepunned
v_epoch16_to_datetime = numpy.frompyfunc(
self.epoch16_to_datetime, 2, 1)
self.v_epoch16_to_datetime = \
lambda x: v_epoch16_to_datetime(x[..., 0], x[..., 1])
self.v_epoch_to_datetime = numpy.frompyfunc(
self.epoch_to_datetime, 1, 1)
self.v_tt2000_to_datetime = numpy.frompyfunc(
self.tt2000_to_datetime, 1, 1)
self.v_datetime_to_epoch = numpy.vectorize(
self.datetime_to_epoch, otypes=[numpy.float64])
v_datetime_to_epoch16 = numpy.frompyfunc(
self.datetime_to_epoch16, 1, 2)
#frompyfunc returns a TUPLE of the returned values,
#implicitly the 0th dimension. We want everything from one
#call paired, so this rolls the 0th dimension to the last
#(via the second-to-last)
def _v_datetime_to_epoch16(x):
retval = numpy.require(v_datetime_to_epoch16(x),
dtype=numpy.float64)
if len(retval.shape) > 1:
return numpy.rollaxis(
numpy.rollaxis(retval, 0, -1),
-1, -2)
else:
return retval
self.v_datetime_to_epoch16 = _v_datetime_to_epoch16
self.v_datetime_to_tt2000 = numpy.vectorize(
self.datetime_to_tt2000, otypes=[numpy.int64])
self.v_epoch_to_tt2000 = numpy.vectorize(
self.epoch_to_tt2000, otypes=[numpy.int64])
self.v_tt2000_to_epoch = numpy.vectorize(
self.tt2000_to_epoch, otypes=[numpy.float64])
v_epoch16_to_tt2000 = numpy.frompyfunc(
self.epoch16_to_tt2000, 2, 1)
self.v_epoch16_to_tt2000 = \
lambda x: v_epoch16_to_tt2000(x[..., 0], x[..., 1])
v_tt2000_to_epoch16 = numpy.frompyfunc(
self.tt2000_to_epoch16, 1, 2)
#frompyfunc returns a TUPLE of the returned values,
#implicitly the 0th dimension. We want everything from one
#call paired, so this rolls the 0th dimension to the last
#(via the second-to-last)
def _v_tt2000_to_epoch16(x):
retval = numpy.require(v_tt2000_to_epoch16(x),
dtype=numpy.float64)
if len(retval.shape) > 1:
return numpy.rollaxis(
numpy.rollaxis(retval, 0, -1),
-1, -2)
else:
return retval
self.v_tt2000_to_epoch16 = _v_tt2000_to_epoch16
if not self.supports_int8:
self.datetime_to_tt2000 = self._bad_tt2000
self.tt2000_to_datetime = self._bad_tt2000
self.v_datetime_to_tt2000 = self._bad_tt2000
self.v_tt2000_to_datetime = self._bad_tt2000
self.epoch_to_tt2000 = self._bad_tt2000
self.v_epoch_to_tt2000 = self._bad_tt2000
self.tt2000_to_epoch = self._bad_tt2000
self.v_tt2000_to_epoch = self._bad_tt2000
self.epoch_16_to_tt2000 = self._bad_tt2000
self.v_epoch16_to_tt2000 = self._bad_tt2000
self.tt2000_to_epoch16 = self._bad_tt2000
self.v_tt2000_to_epoch16 = self._bad_tt2000
#Default to V2 CDF
self.set_backward(True)
@staticmethod
def _find_lib():
failed = []
for libpath in Library._lib_paths():
try:
lib = ctypes.CDLL(libpath)
except:
failed.append(libpath)
else:
return libpath, lib
return (failed, None)
@staticmethod
def _lib_paths():
#What the library might be named
names = { 'win32': ['cdf.dll'],
'darwin': ['libcdf.dylib', 'cdf.dylib', 'libcdf.so'],
'linux2': ['libcdf.so'],
'linux': ['libcdf.so'],
}
names = names.get(sys.platform, ['libcdf.so'])
#All existing CDF-library-like paths within a directory
search_dir = lambda x: \
[os.path.join(x, fname) for fname in names
if os.path.exists(os.path.join(x, fname))]
# Only use anaconda locations...
# Defined during builds ...
if 'PREFIX' in os.environ:
if sys.platform == 'win32':
for p in search_dir(os.path.join(os.environ['PREFIX'], 'Library', 'bin')):
yield p
else:
for p in search_dir(os.path.join(os.environ['PREFIX'], 'lib')):
yield p
# defined when conda is activated ...
if 'CONDA_PREFIX' in os.environ:
if sys.platform == 'win32':
for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'Library', 'bin')):
yield p
else:
for p in search_dir(os.path.join(os.environ['CONDA_PREFIX'], 'lib')):
yield p
# Special subdirectory for anaconda unix packages on windows
if 'LIBRARY_BIN' in os.environ:
for p in search_dir(os.environ['LIBRARY_BIN']):
yield p
ctypespath = ctypes.util.find_library(
'cdf.dll' if sys.platform == 'win32' else 'cdf')
if ctypespath:
yield ctypespath
def check_status(self, status, ignore=()):
if status == const.CDF_OK or status in ignore:
return status
if status < const.CDF_WARN:
raise CDFError(status)
else:
warning = CDFWarning(status)
warning.warn()
return status
def call(self, *args, **kwargs):
if 'ignore' in kwargs:
return self.check_status(self._library.CDFlib(
*(args + (const.NULL_, ))
), kwargs['ignore'])
else:
return self.check_status(self._library.CDFlib(
*(args + (const.NULL_, ))
))
def set_backward(self, backward=True):
if self.version[0] < 3:
if not backward:
raise ValueError(
'Cannot disable backward-compatible mode for CDF version 2.')
else:
return
self._library.CDFsetFileBackward(const.BACKWARDFILEon if backward
else const.BACKWARDFILEoff)
def epoch_to_datetime(self, epoch):
yyyy = ctypes.c_long(0)
mm = ctypes.c_long(0)
dd = ctypes.c_long(0)
hh = ctypes.c_long(0)
min = ctypes.c_long(0)
sec = ctypes.c_long(0)
msec = ctypes.c_long(0)
self._library.EPOCHbreakdown(ctypes.c_double(epoch),
ctypes.byref(yyyy), ctypes.byref(mm),
ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min),
ctypes.byref(sec), ctypes.byref(msec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999000)
else:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
msec.value * 1000)
def datetime_to_epoch(self, dt):
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt.replace(tzinfo=None)
micro = dt.microsecond % 1000
if micro >= 500 and dt.year < 9999:
dt += datetime.timedelta(0, 0, 1000)
return self._library.computeEPOCH(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000))
def epoch16_to_datetime(self, epoch0, epoch1):
yyyy = ctypes.c_long(0)
mm = ctypes.c_long(0)
dd = ctypes.c_long(0)
hh = ctypes.c_long(0)
min = ctypes.c_long(0)
sec = ctypes.c_long(0)
msec = ctypes.c_long(0)
usec = ctypes.c_long(0)
nsec = ctypes.c_long(0)
psec = ctypes.c_long(0)
self._library.EPOCH16breakdown((ctypes.c_double * 2)(epoch0, epoch1),
ctypes.byref(yyyy), ctypes.byref(mm),
ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min),
ctypes.byref(sec), ctypes.byref(msec),
ctypes.byref(usec), ctypes.byref(nsec),
ctypes.byref(psec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999)
micro = int(float(msec.value) * 1000 + float(usec.value) +
float(nsec.value) / 1000 + float(psec.value) / 1e6 + 0.5)
if micro < 1000000:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
micro)
else:
add_sec = int(micro / 1000000)
try:
return datetime.datetime(yyyy.value, mm.value, dd.value,
hh.value, min.value, sec.value,
micro - add_sec * 1000000) + \
datetime.timedelta(seconds=add_sec)
except OverflowError:
return datetime.datetime(datetime.MAXYEAR, 12, 31,
23, 59, 59,
999999)
def datetime_to_epoch16(self, dt):
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt.replace(tzinfo=None)
#Default to "illegal epoch"
epoch16 = (ctypes.c_double * 2)(-1., -1.)
if self._library.computeEPOCH16(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000),
dt.microsecond % 1000, 0, 0,
epoch16):
return (-1., -1.) #Failure, so illegal epoch
return (epoch16[0], epoch16[1])
def epoch_to_epoch16(self, epoch):
e = numpy.require(epoch, numpy.float64)
s = numpy.trunc(e / 1000.0)
#ugly numpy stuff, probably a better way....
res = numpy.hstack((s, (e - s * 1000.0) * 1e9))
if len(res) <= 2:
return res
newshape = list(res.shape[0:-2])
newshape.append(res.shape[-1] // 2)
newshape.append(2)
return numpy.rollaxis(res.reshape(newshape), -1, -2)
def epoch_to_num(self, epoch):
#date2num day 1 is 1/1/1 00UT
#epoch 1/1/1 00UT is 31622400000.0 (millisecond)
return (epoch - 31622400000.0) / (24 * 60 * 60 * 1000.0) + 1.0
def epoch16_to_epoch(self, epoch16):
e = numpy.require(epoch16, numpy.float64)
return e[..., 0] * 1000.0 + numpy.round(e[..., 1] / 1e9)
def tt2000_to_datetime(self, tt2000):
yyyy = ctypes.c_double(0)
mm = ctypes.c_double(0)
dd = ctypes.c_double(0)
hh = ctypes.c_double(0)
min = ctypes.c_double(0)
sec = ctypes.c_double(0)
msec = ctypes.c_double(0)
usec = ctypes.c_double(0)
nsec = ctypes.c_double(0)
self._library.breakdownTT2000(
ctypes.c_longlong(tt2000),
ctypes.byref(yyyy), ctypes.byref(mm), ctypes.byref(dd),
ctypes.byref(hh), ctypes.byref(min), ctypes.byref(sec),
ctypes.byref(msec), ctypes.byref(usec), ctypes.byref(nsec))
if yyyy.value <= 0:
return datetime.datetime(9999, 12, 13, 23, 59, 59, 999999)
sec = int(sec.value)
if sec >= 60:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), 59, 999999)
micro = int(msec.value * 1000 + usec.value + nsec.value / 1000 + 0.5)
if micro < 1000000:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), sec, micro)
else:
add_sec = int(micro / 1000000)
try:
return datetime.datetime(
int(yyyy.value), int(mm.value), int(dd.value),
int(hh.value), int(min.value), sec,
micro - add_sec * 1000000) + \
datetime.timedelta(seconds=add_sec)
except OverflowError:
return datetime.datetime(datetime.MAXYEAR, 12, 31,
23, 59, 59, 999999)
def datetime_to_tt2000(self, dt):
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt = dt.replace(tzinfo=None)
if dt == datetime.datetime.max:
return -2**63
return self._library.computeTT2000(
dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second,
int(dt.microsecond / 1000),
dt.microsecond % 1000, 0)
def _datetime_to_tt2000_typepunned(self, dt):
c_ll_p = ctypes.POINTER(ctypes.c_longlong)
if dt.tzinfo != None and dt.utcoffset() != None:
dt = dt - dt.utcoffset()
dt = dt.replace(tzinfo=None)
if dt == datetime.datetime.max:
return -2**63
return self._library.computeTT2000(
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.year)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.month)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.day)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.hour)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.minute)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.second)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.microsecond // 1000)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
dt.microsecond % 1000)), c_ll_p).contents,
ctypes.cast(ctypes.pointer(ctypes.c_double(
0)), c_ll_p).contents)
def epoch_to_tt2000(self, epoch):
return self._library.CDF_TT2000_from_UTC_EPOCH(epoch)
def tt2000_to_epoch(self, tt2000):
return self._library.CDF_TT2000_to_UTC_EPOCH(tt2000)
def epoch16_to_tt2000(self, epoch0, epoch1):
return self._library.CDF_TT2000_from_UTC_EPOCH16(
(ctypes.c_double * 2)(epoch0, epoch1))
def tt2000_to_epoch16(self, tt2000):
#Default to "illegal epoch" if isn't populated
epoch16 = (ctypes.c_double * 2)(-1., -1.)
if self._library.CDF_TT2000_to_UTC_EPOCH16(tt2000, epoch16):
return (-1., -1.)
return (epoch16[0], epoch16[1])
def _bad_tt2000(*args, **kwargs):
raise NotImplementedError(
'TT2000 functions require CDF library 3.4.0 or later')
def download_library():
if sys.platform != 'win32':
raise NotImplementedError(
'CDF library install only supported on Windows')
try:
import html.parser as HTMLParser
except ImportError:
import HTMLParser
class LinkParser(HTMLParser.HTMLParser, object):
def __init__(self, *args, **kwargs):
self.links_found = []
super(LinkParser, self).__init__(*args, **kwargs)
def handle_starttag(self, tag, attrs):
if tag != 'a' or attrs[0][0] != 'href':
return
self.links_found.append(attrs[0][1])
import re
import subprocess
try:
import urllib.request as u
except ImportError:
import urllib as u
baseurl = 'https://spdf.sci.gsfc.nasa.gov/pub/software/cdf/dist/'
url = u.urlopen(baseurl)
listing = url.read()
url.close()
p = LinkParser()
p.feed(listing)
cdfdist = [l for l in p.links_found if re.match('^cdf3\d_\d(?:_\d)?/$', l)]
if not cdfdist:
raise RuntimeError(
"Couldn't find CDF distribution directory to download")
cdfdist.sort(key=lambda x: x.rstrip('/').split('_'))
cdfverbase = cdfdist[-1].rstrip('/')
instfname = cdfverbase + ('_0' if cdfverbase.count('_') == 1 else '') + \
'-setup-{0}.exe'.format(len('%x' % sys.maxsize)*4)
insturl = baseurl + cdfverbase + '/windows/' + instfname
tmpdir = tempfile.mkdtemp()
try:
fname, status = u.urlretrieve(insturl, os.path.join(tmpdir, instfname))
subprocess.check_call([fname, '/install', '/q1'], shell=False)
finally:
shutil.rmtree(tmpdir)
_libpath, _library = Library._find_lib()
if _library is None:
raise Exception(('Cannot load CDF C library; checked {0}. '
'Try \'os.environ["CDF_LIB"] = library_directory\' '
'before import.').format(', '.join(_libpath)))
from . import const
lib = Library(_libpath, _library)
class CDFException(Exception):
def __init__(self, status):
self.status = status
self.string = 'CDF error ' + repr(status) + ', unable to get details.'
message = ctypes.create_string_buffer(const.CDF_STATUSTEXT_LEN + 1)
try:
retval = lib._library.CDFlib(const.SELECT_, const.CDF_STATUS_,
ctypes.c_long(status),
const.GET_, const.STATUS_TEXT_, message,
const.NULL_)
if retval == const.CDF_OK:
if isinstance(message.value, str):
self.string = message.value
elif isinstance(message.value, bytes):
self.string = message.value.decode()
except:
pass
def __str__(self):
return self.string
class CDFError(CDFException):
pass
class CDFWarning(CDFException, UserWarning):
def warn(self, level=4):
warnings.warn(self, self.__class__, level)
class EpochError(Exception):
pass
def _compress(obj, comptype=None, param=None):
if isinstance(obj, CDF):
COMPRESSION_ = const.CDF_COMPRESSION_
elif isinstance(obj, Var):
COMPRESSION_ = const.zVAR_COMPRESSION_
else:
raise ValueError('Must specify a CDF or Var type.')
validparams = {const.NO_COMPRESSION.value: [ctypes.c_long(0)],
const.RLE_COMPRESSION.value: [const.RLE_OF_ZEROs],
const.HUFF_COMPRESSION.value:
[const.OPTIMAL_ENCODING_TREES],
const.AHUFF_COMPRESSION.value:
[const.OPTIMAL_ENCODING_TREES],
const.GZIP_COMPRESSION.value: [ctypes.c_long(5),
ctypes.c_long(1),
ctypes.c_long(2),
ctypes.c_long(3),
ctypes.c_long(4),
ctypes.c_long(6),
ctypes.c_long(7),
ctypes.c_long(8),
ctypes.c_long(9),
],
}
comptypes = [const.NO_COMPRESSION, const.RLE_COMPRESSION,
const.HUFF_COMPRESSION, const.AHUFF_COMPRESSION,
const.GZIP_COMPRESSION]
comptypevalues = [i.value for i in comptypes]
if comptype != None:
if not hasattr(comptype, 'value'):
comptype = ctypes.c_long(comptype)
if param is None:
if not comptype.value in validparams:
raise CDFError(const.BAD_COMPRESSION)
param = validparams[comptype.value][0]
paramlist = (ctypes.c_long * 1)(param)
obj._call(const.PUT_, COMPRESSION_,
comptype, paramlist)
params = (ctypes.c_long *
const.CDF_MAX_PARMS)(*([0] * const.CDF_MAX_PARMS))
comptype = ctypes.c_long(0)
percent = ctypes.c_long(0)
obj._call(const.GET_, COMPRESSION_,
ctypes.byref(comptype), ctypes.byref(params),
ctypes.byref(percent))
param = params[0]
if not comptype.value in comptypevalues:
raise CDFError(const.BAD_COMPRESSION)
validparamvalues = [i.value for i in validparams[comptype.value]]
if not param in validparamvalues:
raise CDFError(const.BAD_COMPRESSION_PARM)
comptype = comptypes[comptypevalues.index(comptype.value)]
if comptype in (const.RLE_COMPRESSION, const.HUFF_COMPRESSION,
const.AHUFF_COMPRESSION):
param = validparams[comptype.value][validparamvalues.index(param)]
return (comptype, param)
class CDF(MutableMapping):
def __init__(self, pathname, masterpath=None, create=None, readonly=None):
if masterpath is not None: #Looks like we want to create
if create is False:
raise ValueError('Cannot specify a master CDF without creating a CDF')
if readonly is True:
raise ValueError('Cannot create a CDF in readonly mode')
if create and readonly:
raise ValueError('Cannot create a CDF in readonly mode')
try:
self.pathname = pathname.encode()
except AttributeError:
raise ValueError(
'pathname must be string-like: {0}'.format(pathname))
self._handle = ctypes.c_void_p(None)
self._opened = False
if masterpath is None and not create:
self._open(True if readonly is None else readonly)
elif masterpath:
self._from_master(masterpath.encode())
else:
self._create()
lib.call(const.SELECT_, const.CDF_zMODE_, ctypes.c_long(2))
self._attrlistref = weakref.ref(gAttrList(self))
self.backward = self.version()[0] < 3
self._var_nums = {}
self._attr_info = {}
def __del__(self):
if self._opened:
self.close()
def __delitem__(self, name):
self[name]._delete()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def __getitem__(self, name):
try:
return Var(self, name)
except CDFException as e:
raise KeyError('{0}: {1}'.format(name, e))
def __setitem__(self, name, data):
if isinstance(data, Var):
self.clone(data, name)
elif name in self:
self[name][...] = data
if hasattr(data, 'attrs'):
self[name].attrs.clone(data.attrs)
else:
self.new(name, data)
def __iter__(self, current = 0):
while current < self.__len__():
name = self[current].name()
value = (yield name)
if value is None:
current += 1
else:
current = self[value]._num()
current += 1
def __len__(self):
count = ctypes.c_long(0)
self._call(const.GET_, const.CDF_NUMzVARS_, ctypes.byref(count))
return count.value
def __contains__(self, key):
try:
foo = self[key]
return True
except KeyError as e:
expected = str(key) + \
": NO_SUCH_VAR: Named variable not found in this CDF."
if expected in e.args:
return False
raise
def __repr__(self):
return '<CDF:\n' + str(self) + '\n>'
def __str__(self):
if self._opened:
return '\n'.join([key + ': ' + str(value)
for (key, value) in sorted(self.items())])
#can get away with this sort because second value in tuple isn't
else:
if isinstance(self.pathname, str):
return 'Closed CDF {0}'.format(self.pathname)
else:
return 'Closed CDF {0}'.format(self.pathname.decode('ascii'))
def _open(self, readonly=True):
lib.call(const.OPEN_, const.CDF_, self.pathname, ctypes.byref(self._handle))
self._opened = True
if readonly:
self.readonly(readonly)
def _create(self):
lib.call(const.CREATE_, const.CDF_, self.pathname, ctypes.c_long(0),
(ctypes.c_long * 1)(0), ctypes.byref(self._handle))
self._opened = True
def _from_master(self, master_path):
if os.path.exists(self.pathname):
raise CDFError(const.CDF_EXISTS)
shutil.copy2(master_path, self.pathname)
self._open(False)
def _call(self, *args, **kwargs):
return lib.call(const.SELECT_, const.CDF_, self._handle,
*args, **kwargs)
def clone(self, zVar, name=None, data=True):
if name is None:
name = zVar.name()
if name in self:
del self[name]
self.new(name, type=zVar.type(), recVary=zVar.rv(),
dimVarys=zVar.dv(), dims=zVar._dim_sizes(),
n_elements=zVar._nelems())
self[name].compress(*zVar.compress())
self[name].attrs.clone(zVar.attrs)
if data:
r = zVar._raw
zVar._raw = True
self.raw_var(name)[...] = zVar[...]
zVar._raw = r
return zVar
def col_major(self, new_col=None):
if new_col != None:
new_maj = const.COLUMN_MAJOR if new_col else const.ROW_MAJOR
self._call(const.PUT_, const.CDF_MAJORITY_, new_maj)
maj = ctypes.c_long(0)
self._call(const.GET_, const.CDF_MAJORITY_, ctypes.byref(maj))
if not maj.value in (const.ROW_MAJOR.value, const.COLUMN_MAJOR.value):
raise CDFError(const.BAD_MAJORITY)
return maj.value == const.COLUMN_MAJOR.value
def readonly(self, ro=None):
if ro == True:
self._call(const.SELECT_, const.CDF_READONLY_MODE_,
const.READONLYon)
elif ro == False:
self._call(const.SELECT_, const.CDF_READONLY_MODE_,
const.READONLYoff)
mode = ctypes.c_long(0)
self._call(const.CONFIRM_, const.CDF_READONLY_MODE_,
ctypes.byref(mode))
if mode.value == const.READONLYon.value:
return True
elif mode.value == const.READONLYoff.value:
return False
else:
raise CDFError(const.BAD_READONLY_MODE.value)
def checksum(self, new_val=None):
if new_val != None:
self._call(const.PUT_, const.CDF_CHECKSUM_,
const.MD5_CHECKSUM if new_val else const.NO_CHECKSUM)
chk = ctypes.c_long(0)
self._call(const.GET_, const.CDF_CHECKSUM_, ctypes.byref(chk))
if not chk.value in (const.MD5_CHECKSUM.value,
const.NO_CHECKSUM.value):
raise CDFError(const.BAD_CHECKSUM)
return chk.value == const.MD5_CHECKSUM.value
def close(self):
self._call(const.CLOSE_, const.CDF_)
self._opened = False
def compress(self, comptype=None, param=None):
return _compress(self, comptype, param)
def new(self, name, data=None, type=None, recVary=True, dimVarys=None,
dims=None, n_elements=None, compress=None, compress_param=None):
if type in (const.CDF_EPOCH16, const.CDF_INT8, const.CDF_TIME_TT2000) \
and self.backward:
raise ValueError('Cannot use EPOCH16, INT8, or TIME_TT2000 '
'in backward-compatible CDF')
if not lib.supports_int8 and \
type in (const.CDF_INT8, const.CDF_TIME_TT2000):
raise ValueError('INT8 and TIME_TT2000 require CDF library 3.4.0')
if data is None:
if type is None:
raise ValueError('Must provide either data or a CDF type.')
if dims is None:
dims = []
if n_elements is None:
n_elements = 1
else:
(guess_dims, guess_types, guess_elements) = _Hyperslice.types(data)
if dims is None:
if recVary:
if guess_dims == ():
raise ValueError(
'Record-varying data cannot be scalar. '
'Specify NRV with CDF.new() or put data in array.')
dims = guess_dims[1:]
else:
dims = guess_dims
if type is None:
type = guess_types[0]
if type == const.CDF_EPOCH16.value and self.backward:
type = const.CDF_EPOCH
if n_elements is None:
n_elements = guess_elements
if dimVarys is None:
dimVarys = [True for i in dims]
recVary = const.VARY if recVary else const.NOVARY
dimVarys = [const.VARY if dimVary else const.NOVARY
for dimVary in dimVarys]
if not hasattr(type, 'value'):
type = ctypes.c_long(type)
if type.value == const.CDF_INT8.value and not lib.supports_int8:
raise ValueError(
'64-bit integer support require CDF library 3.4.0')
if type.value in (const.CDF_EPOCH16.value, const.CDF_INT8.value,
const.CDF_TIME_TT2000.value) \
and self.backward:
raise ValueError('Data requires EPOCH16, INT8, or TIME_TT2000; '
'incompatible with backward-compatible CDF')
new_var = Var(self, name, type, n_elements, dims, recVary, dimVarys)
if compress != None:
new_var.compress(compress, compress_param)
if data is not None:
new_var[...] = data
if hasattr(data, 'attrs'):
new_var.attrs.clone(data.attrs)
return new_var
def raw_var(self, name):
v = self[name]
v._raw = True
return v
def save(self):
self._call(const.SAVE_, const.CDF_)
def copy(self):
return CDFCopy(self)
def version(self):
ver = ctypes.c_long(0)
rel = ctypes.c_long(0)
inc = ctypes.c_long(0)
self._call(const.GET_, const.CDF_VERSION_, ctypes.byref(ver),
const.GET_, const.CDF_RELEASE_, ctypes.byref(rel),
const.GET_, const.CDF_INCREMENT_, ctypes.byref(inc))
return (ver.value, rel.value, inc.value)
def _get_attrs(self):
al = self._attrlistref()
if al is None:
al = gAttrList(self)
self._attrlistref = weakref.ref(al)
return al
def _set_attrs(self, value):
self.attrs.clone(value)
attrs = property(
_get_attrs, _set_attrs, None,
"""Global attributes for this CDF in a dict-like format.
See :class:`gAttrList` for details.
""")
def var_num(self, varname):
num = self._var_nums.get(varname, None)
if num is None:
varNum = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMBER_, varname,
ctypes.byref(varNum))
num = varNum.value
self._var_nums[varname] = num
return num
def attr_num(self, attrname):
res = self._attr_info.get(attrname, None)
if res is None:
attrNum = ctypes.c_long(0)
self._call(const.GET_, const.ATTR_NUMBER_, attrname,
ctypes.byref(attrNum))
scope = ctypes.c_long(0)
self._call(const.SELECT_, const.ATTR_, attrNum,
const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope))
if scope.value == const.GLOBAL_SCOPE.value:
scope = True
elif scope.value == const.VARIABLE_SCOPE.value:
scope = False
else:
raise CDFError(const.BAD_SCOPE)
res = (attrNum.value, scope)
self._attr_info[attrname] = res
return res
def clear_attr_from_cache(self, attrname):
num, scope = self.attr_num(attrname)
for a, n in list(self._attr_info.items()):
if n[0] >= num:
del self._attr_info[a]
def clear_from_cache(self, varname):
num = self.var_num(varname)
for v, n in list(self._var_nums.items()):
if n >= num:
del self._var_nums[v]
def add_attr_to_cache(self, attrname, num, scope):
self._attr_info[attrname] = (num, scope)
def add_to_cache(self, varname, num):
self._var_nums[varname] = num
class Var(MutableSequence):
def __init__(self, cdf_file, var_name, *args):
self.cdf_file = cdf_file
self._name = None
self._type = None
self._raw = False
if len(args) == 0:
self._get(var_name)
else:
self._create(var_name, *args)
self._attrlistref = weakref.ref(zAttrList(self))
def __getitem__(self, key):
hslice = _Hyperslice(self, key)
if hslice.rv:
if hslice.dimsizes[0] == 0 and hslice.degen[0] and \
hslice.starts[0] == 0:
raise IndexError('record index out of range')
#For NRV, again hslice will assume 0th record exists since we might
#want to write. So ANY degenerate dim other than the glued-on 0th
#suggests an explicit index that should fail. None degenerate suggests
#make an empty array.
#Note this is pulling a lot of hyperslice stuff into getitem!
elif hslice.dimsizes[0] == 0:
if len(hslice.degen) > 1 and max(hslice.degen[1:]):
raise IndexError('record index out of range')
else:
#The zero-length dimension is degenerate so it gets chopped,
#and you can't have a zero-length numpy array that still
hslice.counts[...] = 0
if len(hslice.counts) == 1:
hslice.degen[0] = False
result = hslice.create_array()
if hslice.counts[0] != 0:
hslice.select()
lib.call(const.GET_, const.zVAR_HYPERDATA_,
result.ctypes.data_as(ctypes.c_void_p))
return hslice.convert_input_array(result)
def __delitem__(self, key):
if not self.rv():
raise TypeError('Cannot delete records from non-record-varying '
'variable.')
hslice = _Hyperslice(self, key)
if hslice.dims > 1 and (hslice.counts[1:] != hslice.dimsizes[1:]).any():
raise TypeError('Can only delete entire records.')
if hslice.counts[0] == 0:
return
start = hslice.starts[0]
count = hslice.counts[0]
interval = hslice.intervals[0]
dimsize = hslice.dimsizes[0]
self._call()
dangerous_delete = False
if lib._del_middle_rec_bug and \
(interval != 1 or (start != 0 and start + count < dimsize)):
entries = ctypes.c_long(0)
lib.call(const.GET_, const.zVAR_nINDEXENTRIES_,
ctypes.byref(entries))
dangerous_delete = (entries.value == 1)
if dangerous_delete:
data = self[...]
data = numpy.delete(
data,
numpy.arange(start, start + count * interval, interval),
0)
self[0:dimsize - count] = data
first_rec = dimsize - count
last_rec = dimsize - 1
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(first_rec), ctypes.c_long(last_rec))
elif interval == 1:
first_rec = ctypes.c_long(start)
last_rec = ctypes.c_long(start + count - 1)
lib.call(const.DELETE_, const.zVAR_RECORDS_,
first_rec, last_rec)
else:
self._call()
for recno in range(start + (count - 1) * interval,
start - 1, -1 * interval):
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(recno), ctypes.c_long(recno))
def __setitem__(self, key, data):
hslice = _Hyperslice(self, key)
n_recs = hslice.counts[0]
hslice.expand(data)
cdf_type = self.type()
if cdf_type == const.CDF_EPOCH16.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch16(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_EPOCH.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_TIME_TT2000.value:
if not self._raw:
try:
data = lib.v_datetime_to_tt2000(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.int64)
else:
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=self._np_type())
if cdf_type == const.CDF_EPOCH16.value:
datashape = data.shape[:-1]
else:
datashape = data.shape
if datashape != tuple(hslice.expected_dims()):
raise ValueError('attempt to assign data of dimensions ' +
str(datashape) + ' to slice of dimensions ' +
str(tuple(hslice.expected_dims())))
data = hslice.convert_output_array(data)
if hslice.counts[0] > n_recs and \
hslice.starts[0] + n_recs < hslice.dimsizes[0]:
saved_data = self[hslice.starts[0] + n_recs:]
if hslice.counts[0] > 0:
hslice.select()
lib.call(const.PUT_, const.zVAR_HYPERDATA_,
data.ctypes.data_as(ctypes.c_void_p))
if hslice.counts[0] < n_recs:
first_rec = hslice.starts[0] + hslice.counts[0]
last_rec = hslice.dimsizes[0] - 1
lib.call(const.DELETE_, const.zVAR_RECORDS_,
ctypes.c_long(first_rec), ctypes.c_long(last_rec))
elif hslice.counts[0] > n_recs and \
hslice.starts[0] + n_recs < hslice.dimsizes[0]:
self[hslice.starts[0] + hslice.counts[0]:] = saved_data
def extend(self, data):
self[len(self):] = data
def insert(self, index, data):
self[index:index] = [data]
def _create(self, var_name, datatype, n_elements = 1, dims = (),
recVary = const.VARY, dimVarys = None):
dim_array = (ctypes.c_long * len(dims))(*dims)
enc_name = var_name.encode('ascii')
if dimVarys is None:
dim_vary_array = (ctypes.c_long * (len(dims) if len(dims) > 0 else 1))(const.VARY)
else:
dim_vary_array = (ctypes.c_long * len(dims))(*dimVarys)
varNum = ctypes.c_long(0)
self.cdf_file._call(const.CREATE_, const.zVAR_, enc_name, datatype,
ctypes.c_long(n_elements), ctypes.c_long(len(dims)), dim_array,
recVary, dim_vary_array, ctypes.byref(varNum))
self._name = enc_name
self.cdf_file.add_to_cache(enc_name, varNum.value)
def _delete(self):
self._call(const.DELETE_, const.zVAR_)
self.cdf_file.clear_from_cache(self._name)
self._name = None
def _get(self, var_name):
if isinstance(var_name, str_classes):
try:
enc_name = var_name.encode('ascii').rstrip()
except AttributeError:
enc_name = var_name.rstrip()
varNum = ctypes.c_long(0)
self.cdf_file._call(const.GET_, const.zVAR_NUMBER_, enc_name, ctypes.byref(varNum))
self._name = enc_name
self.cdf_file.add_to_cache(enc_name, varNum.value)
else: #Looking up by number
name = ctypes.create_string_buffer(const.CDF_VAR_NAME_LEN256+1)
self.cdf_file._call(const.SELECT_, const.zVAR_, ctypes.c_long(var_name),
const.GET_, const.zVAR_NAME_, name)
self._name = name.value.rstrip()
self.cdf_file.add_to_cache(self._name, var_name)
def _num(self):
return self.cdf_file.var_num(self._name)
def __len__(self):
count = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_MAXREC_, ctypes.byref(count))
return (count.value + 1)
def __repr__(self):
return '<Var:\n' + str(self) + '\n>'
def __str__(self):
if self.cdf_file._opened:
cdftype = self.type()
chartypes = (const.CDF_CHAR.value, const.CDF_UCHAR.value)
rv = self.rv()
typestr = lib.cdftypenames[cdftype] + \
('*' + str(self._nelems()) if cdftype in chartypes else '' )
if rv:
sizestr = str([len(self)] + self._dim_sizes())
else:
sizestr = str(self._dim_sizes())
return typestr + ' ' + sizestr + ('' if rv else ' NRV')
else:
if isinstance(self._name, str):
return 'zVar "{0}" in closed CDF {1}'.format(
self._name, self.cdf_file.pathname)
else:
return 'zVar "{0}" in closed CDF {1}'.format(
self._name.decode('ascii'),
self.cdf_file.pathname.decode('ascii'))
def _n_dims(self):
n_dims = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMDIMS_, ctypes.byref(n_dims))
return n_dims.value
def _dim_sizes(self):
sizes = (ctypes.c_long * const.CDF_MAX_DIMS)(0)
self._call(const.GET_, const.zVAR_DIMSIZES_, sizes)
sizes = sizes[0:self._n_dims()]
return sizes
def rv(self, new_rv=None):
if new_rv != None:
self._call(const.PUT_, const.zVAR_RECVARY_,
const.VARY if new_rv else const.NOVARY)
vary = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_RECVARY_, ctypes.byref(vary))
return vary.value != const.NOVARY.value
def dv(self, new_dv=None):
ndims = self._n_dims()
if new_dv != None:
if len(new_dv) != ndims:
raise ValueError('Must specify variance for ' +
str(ndims) + 'dimensions.')
varies = (ctypes.c_long * ndims)(
*[const.VARY if dv else const.NOVARY for dv in new_dv])
self._call(const.PUT_, const.zVAR_DIMVARYS_,
varies)
if ndims == 0:
return []
varies = (ctypes.c_long * const.CDF_MAX_DIMS)()
self._call(const.GET_, const.zVAR_DIMVARYS_, varies)
return [dv != const.NOVARY.value for dv in varies[0:ndims]]
def _call(self, *args, **kwargs):
return self.cdf_file._call(
const.SELECT_, const.zVAR_,
ctypes.c_long(self.cdf_file.var_num(self._name)), *args, **kwargs)
def _np_type(self):
cdftype = self.type()
if cdftype == const.CDF_CHAR.value or cdftype == const.CDF_UCHAR.value:
return numpy.dtype('S' + str(self._nelems()))
try:
return lib.numpytypedict[cdftype]
except KeyError:
raise CDFError(const.BAD_DATA_TYPE)
def type(self, new_type=None):
if new_type != None:
if not hasattr(new_type, 'value'):
new_type = ctypes.c_long(new_type)
n_elements = ctypes.c_long(self._nelems())
self._call(const.PUT_, const.zVAR_DATASPEC_,
new_type, n_elements)
self._type = None
if self._type is None:
cdftype = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_DATATYPE_,
ctypes.byref(cdftype))
self._type = cdftype.value
return self._type
def _nelems(self):
nelems = ctypes.c_long(0)
self._call(const.GET_, const.zVAR_NUMELEMS_, ctypes.byref(nelems))
return nelems.value
def name(self):
if isinstance(self._name, str):
return self._name
elif isinstance(self._name, bytes):
return self._name.decode()
def compress(self, comptype=None, param=None):
return _compress(self, comptype, param)
def copy(self):
return VarCopy(self)
def rename(self, new_name):
try:
enc_name = new_name.encode('ascii')
except AttributeError:
enc_name = new_name
if len(enc_name) > const.CDF_VAR_NAME_LEN256:
raise CDFError(const.BAD_VAR_NAME)
self._call(const.PUT_, const.zVAR_NAME_, enc_name)
self.cdf_file.add_to_cache(
enc_name,
self.cdf_file.var_num(self._name)) #Still in cache
del self.cdf_file._var_nums[self._name]
self._name = enc_name
@property
def shape(self):
if self.rv():
return tuple([len(self)] + self._dim_sizes())
else:
return tuple(self._dim_sizes())
@property
def dtype(self):
cdftype = self.type()
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \
str is not bytes and not self._raw:
return numpy.dtype('U' + str(self._nelems()))
if cdftype in (const.CDF_EPOCH.value, const.CDF_EPOCH16.value,
const.CDF_TIME_TT2000.value) and not self._raw:
return numpy.dtype('O')
return self._np_type()
def _get_attrs(self):
al = self._attrlistref()
if al is None:
al = zAttrList(self)
self._attrlistref = weakref.ref(al)
return al
def _set_attrs(self, value):
self.attrs.clone(value)
attrs = property(
_get_attrs, _set_attrs, None,
"""zAttributes for this zVariable in a dict-like format.
See :class:`zAttrList` for details.
""")
class _Hyperslice(object):
def __init__(self, zvar, key):
self.zvar = zvar
self.rv = self.zvar.rv()
#dim of records, + 1 record dim (NRV always is record 0)
self.dims = zvar._n_dims() + 1
self.dimsizes = [len(zvar)] + \
zvar._dim_sizes()
self.starts = [0] * self.dims
self.counts = numpy.empty((self.dims,), dtype=numpy.int32)
self.counts.fill(1)
self.intervals = [1] * self.dims
self.degen = numpy.zeros(self.dims, dtype=numpy.bool)
self.rev = numpy.zeros(self.dims, dtype=numpy.bool)
#key is:
#1. a single value (integer or slice object) if called 1D
#2. a tuple (of integers and/or slice objects) if called nD
#3. Each item is either a single value (degenerate dim)
# or a slice object.
if not hasattr(key, '__len__'): #Not a container object, pack in tuple
key = (key, )
if not self.rv:
key = (0, ) + key #NRV, so always get 0th record (degenerate)
key = self.expand_ellipsis(key, self.dims)
if self.rv: #special-cases for RV variables
if len(key) == 1: #get all data for this record(s)
key = self.expand_ellipsis(key + (Ellipsis, ), self.dims)
elif len(key) == self.dims - 1: #get same slice from each record
key = (slice(None, None, None), ) + key
if len(key) == self.dims:
self.expanded_key = key
for i in range(self.dims):
idx = key[i]
if hasattr(idx, 'start'): #slice
(self.starts[i], self.counts[i],
self.intervals[i], self.rev[i]) = \
self.convert_range(idx.start, idx.stop,
idx.step, self.dimsizes[i])
else: #Single degenerate value
if idx < 0:
idx += self.dimsizes[i]
if idx != 0 and (idx >= self.dimsizes[i] or idx < 0):
raise IndexError('list index out of range')
self.starts[i] = idx
self.degen[i] = True
else:
raise IndexError('Slice does not match dimensions for zVar ' +
str(zvar._name))
self.column = zvar.cdf_file.col_major()
def expected_dims(self, data=None):
return [self.counts[i] for i in range(self.dims) if not self.degen[i]]
def expand(self, data):
rec_slice = self.expanded_key[0]
if not self.rv or isinstance(data, str_classes) or self.degen[0] or \
not hasattr(rec_slice, 'stop'):
return
if len(data) < self.counts[0]: #Truncate to fit data
if rec_slice.stop is None and rec_slice.step in (None, 1):
self.counts[0] = len(data)
elif len(data) > self.counts[0]: #Expand to fit data
if rec_slice.step in (None, 1):
self.counts[0] = len(data)
def create_array(self):
counts = self.counts
degen = self.degen
if self.column:
counts = self.reorder(counts)
degen = self.reorder(degen)
#TODO: Forcing C order for now, revert to using self.column later
array = numpy.empty(
[counts[i] for i in range(len(counts)) if not degen[i]],
self.zvar._np_type(), order='C')
return numpy.require(array, requirements=('C', 'A', 'W'))
def convert_input_array(self, buffer):
result = self._flip_array(buffer)
#Convert to derived types
cdftype = self.zvar.type()
if not self.zvar._raw:
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value) and \
str != bytes:
dt = numpy.dtype('U{0}'.format(result.dtype.itemsize))
result = numpy.require(numpy.char.array(result).decode(),
dtype=dt)
elif cdftype == const.CDF_EPOCH.value:
result = lib.v_epoch_to_datetime(result)
elif cdftype == const.CDF_EPOCH16.value:
result = lib.v_epoch16_to_datetime(result)
elif cdftype == const.CDF_TIME_TT2000.value:
result = lib.v_tt2000_to_datetime(result)
return result
def convert_output_array(self, buffer):
buffer = self._flip_array(buffer)
return numpy.require(buffer, requirements=('C', 'A', 'W'))
def _flip_array(self, data):
cdftype = self.zvar.type()
#Flip majority if any non-degenerate dimensions exist
if self.column and not min(self.degen):
#Record-number dim degen, swap whole thing
if self.degen[0]:
if cdftype == const.CDF_EPOCH16.value:
#Maintain last dimension
data = data.transpose(
list(range(len(data.shape) - 2, 0, -1)) +
[len(data.shape) - 1]
)
else:
data = data.transpose()
#Record-number dimension is not degenerate, so keep it first
else:
if cdftype == const.CDF_EPOCH16.value:
data = data.transpose(
[0] + list(range(len(data.shape) - 2, 0, -1)) +
[len(data.shape) - 1]
)
else:
data = data.transpose(
[0] + list(range(len(data.shape) - 1, 0, -1)))
#Reverse non-degenerate dimensions in rev
#Remember that the degenerate indices are already gone!
if self.rev.any():
sliced = [(slice(None, None, -1) if self.rev[i] else slice(None))
for i in range(self.dims) if not self.degen[i]]
if cdftype == const.CDF_EPOCH16.value: #don't reverse last dim
sliced.extend(slice(None))
data = operator.getitem(data, tuple(sliced))
return data
def select(self):
args = (const.SELECT_, const.zVAR_RECNUMBER_, ctypes.c_long(self.starts[0]),
const.SELECT_, const.zVAR_RECCOUNT_, ctypes.c_long(self.counts[0]),
const.SELECT_, const.zVAR_RECINTERVAL_,
ctypes.c_long(self.intervals[0]))
if self.dims > 1:
dims = self.dims - 1
args += (const.SELECT_, const.zVAR_DIMINDICES_,
(ctypes.c_long * dims)(*self.starts[1:]),
const.SELECT_, const.zVAR_DIMCOUNTS_,
(ctypes.c_long * dims)(*self.counts[1:]),
const.SELECT_, const.zVAR_DIMINTERVALS_,
(ctypes.c_long * dims)(*self.intervals[1:]))
self.zvar._call(*args)
@staticmethod
def expand_ellipsis(slices, n_dims):
if slices is Ellipsis:
return tuple([slice(None, None, None)
for i in range(n_dims)])
idx = [i for i, v in enumerate(slices) if v is Ellipsis]
if not idx: #no ellipsis
return slices
if len(idx) > 1: #multiples!
raise IndexError('Ellipses can only be used once per slice.')
idx = idx[0]
#how many dims to expand ellipsis to
#remember the ellipsis is in len(slices) and must be replaced!
extra = n_dims - len(slices) + 1
if extra < 0:
raise IndexError('too many indices')
result = slices[0:idx] + (slice(None), ) * extra + slices[idx+1:]
return result
@staticmethod
def check_well_formed(data):
d = numpy.asanyarray(data)
if d.dtype == numpy.object: #this is probably going to be bad
try:
len(d.flat[0])
except TypeError: #at least it's not a list
pass
else:
raise ValueError(
'Data must be well-formed, regular array of number, '
'string, or datetime')
@staticmethod
def dimensions(data):
d = numpy.asanyarray(data)
_Hyperslice.check_well_formed(d)
return d.shape
@staticmethod
def types(data, backward=False):
d = numpy.asanyarray(data)
dims = d.shape
elements = 1
types = []
_Hyperslice.check_well_formed(d)
if d.dtype.kind in ('S', 'U'):
types = [const.CDF_CHAR, const.CDF_UCHAR]
elements = d.dtype.itemsize
if d.dtype.kind == 'U': #UTF-8 uses 4 bytes per
elements //= 4
elif d.size and hasattr(numpy.ma.getdata(d).flat[0], 'microsecond'):
if max((dt.microsecond % 1000 for dt in d.flat)) > 0:
types = [const.CDF_EPOCH16, const.CDF_EPOCH,
const.CDF_TIME_TT2000]
else:
types = [const.CDF_EPOCH, const.CDF_EPOCH16,
const.CDF_TIME_TT2000]
if backward:
del types[types.index(const.CDF_EPOCH16)]
del types[-1]
elif not lib.supports_int8:
del types[-1]
elif d is data or isinstance(data, numpy.generic):
#numpy array came in, use its type (or byte-swapped)
types = [k for k in lib.numpytypedict
if (lib.numpytypedict[k] == d.dtype
or lib.numpytypedict[k] == d.dtype.newbyteorder())
and not k in lib.timetypes]
if (not lib.supports_int8 or backward) \
and const.CDF_INT8.value in types:
del types[types.index(const.CDF_INT8.value)]
#Maintain priority to match the ordered lists below:
#float/double (44, 45) before real (21/22), and
#byte (41) before int (1) before char (51). So hack.
#Consider making typedict an ordered dict once 2.6 is dead.
types.sort(key=lambda x: x % 50, reverse=True)
if not types: #not a numpy array, or can't parse its type
if d.dtype.kind == 'O':
#Basically try most restrictive to least restrictive
trytypes = (numpy.uint64, numpy.int64, numpy.float64)
for t in trytypes:
try:
newd = d.astype(dtype=t)
except: #Failure to cast, try next type
continue
if (newd == d).all(): #Values preserved, use this type
d = newd
#Continue with normal guessing, as if a list
break
else:
#fell through without a match
raise ValueError(
'Cannot convert generic objects to CDF type.')
if d.dtype.kind in ('i', 'u'): #integer
minval = numpy.min(d)
maxval = numpy.max(d)
if minval < 0:
types = [const.CDF_BYTE, const.CDF_INT1,
const.CDF_INT2, const.CDF_INT4, const.CDF_INT8,
const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
cutoffs = [2 ** 7, 2 ** 7, 2 ** 15, 2 ** 31, 2 ** 63,
1.7e38, 1.7e38, 8e307, 8e307]
else:
types = [const.CDF_BYTE, const.CDF_INT1, const.CDF_UINT1,
const.CDF_INT2, const.CDF_UINT2,
const.CDF_INT4, const.CDF_UINT4,
const.CDF_INT8,
const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
cutoffs = [2 ** 7, 2 ** 7, 2 ** 8,
2 ** 15, 2 ** 16, 2 ** 31, 2 ** 32, 2 ** 63,
1.7e38, 1.7e38, 8e307, 8e307]
types = [t for (t, c) in zip(types, cutoffs) if c > maxval
and (minval >= 0 or minval >= -c)]
if (not lib.supports_int8 or backward) \
and const.CDF_INT8 in types:
del types[types.index(const.CDF_INT8)]
else: #float
if dims is ():
if d != 0 and (abs(d) > 1.7e38 or abs(d) < 3e-39):
types = [const.CDF_DOUBLE, const.CDF_REAL8]
else:
types = [const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
else:
absolutes = numpy.abs(d[d != 0])
if len(absolutes) > 0 and \
(numpy.max(absolutes) > 1.7e38 or
numpy.min(absolutes) < 3e-39):
types = [const.CDF_DOUBLE, const.CDF_REAL8]
else:
types = [const.CDF_FLOAT, const.CDF_REAL4,
const.CDF_DOUBLE, const.CDF_REAL8]
types = [t.value if hasattr(t, 'value') else t for t in types]
return (dims, types, elements)
@staticmethod
def reorder(seq):
return numpy.concatenate((seq[0:1],
numpy.flipud(seq)[:-1]))
@staticmethod
def convert_range(start, stop, step, size):
(start, stop, step) = slice(start, stop, step).indices(size)
if step < 0:
step *= -1
count = int((start - stop + step - 1) / step)
start = start - (count - 1) * step
rev = True
else:
count = int((stop - start + step - 1) / step)
rev = False
if count < 0:
count = 0
start = 0
return (start, count, step, rev)
class Attr(MutableSequence):
def __init__(self, cdf_file, attr_name, create=False):
self._cdf_file = cdf_file
self._raw = False
if isinstance(attr_name, str_classes):
try:
self._name = attr_name.encode('ascii')
except AttributeError:
self._name = attr_name
attrno = ctypes.c_long()
if create:
self._cdf_file._call(const.CREATE_, const.ATTR_,
self._name, self.SCOPE,
ctypes.byref(attrno))
self._cdf_file.add_attr_to_cache(
self._name, attrno.value, self.SCOPE == const.GLOBAL_SCOPE)
else: #Ensure exists, and populate cache. See scope note below
attrno, scope = self._cdf_file.attr_num(self._name)
else:
name = ctypes.create_string_buffer(const.CDF_ATTR_NAME_LEN256 + 1)
scope = ctypes.c_long(0)
self._cdf_file._call(const.SELECT_, const.ATTR_,
ctypes.c_long(attr_name))
#Because it's possible to create a gAttr Python objecting
self._cdf_file._call(
const.GET_, const.ATTR_NAME_, name,
const.GET_, const.ATTR_SCOPE_, ctypes.byref(scope))
self._name = name.value.rstrip()
if scope.value == const.GLOBAL_SCOPE.value:
scope = True
elif scope.value == const.VARIABLE_SCOPE.value:
scope = False
else:
raise CDFError(const.BAD_SCOPE)
self._cdf_file.add_attr_to_cache(self._name, attr_name, scope)
def __getitem__(self, key):
if key is Ellipsis:
key = slice(None, None, None)
if hasattr(key, 'indices'):
idx = range(*key.indices(self.max_idx() + 1))
return [self._get_entry(i) if self.has_entry(i) else None
for i in idx]
else:
if self.has_entry(key):
return self._get_entry(key)
else:
raise IndexError('list index ' + str(key) + ' out of range.')
def _check_other_entries(self, types):
if self.ENTRY_ == const.zENTRY_:
cand_et = None
one_var_diff = False
for num in range(self.max_idx() + 1):
if not self.has_entry(num):
continue
vartype = self._cdf_file[num].type()
entrytype = self.type(num)
if vartype != entrytype:
one_var_diff = True
if cand_et is None:
if not entrytype in types:
return None
cand_et = entrytype
elif cand_et != entrytype:
return None
if one_var_diff and cand_et is not None:
return cand_et
else:
entrytypes = [self.type(num) for num in
range(self.max_idx() + 1)
if self.has_entry(num)]
entrytypes = [et for et in entrytypes if et in types]
if entrytypes:
return types[
min([types.index(et) for et in entrytypes])]
return None
def __setitem__(self, key, data):
if key is Ellipsis:
key = slice(None, None, None)
if not hasattr(key, 'indices'):
idx = (key, key + 1, 1)
data = [data]
else:
idx = key.indices(self.max_idx() + 1)
if key.step is None or key.step > 0:
if len(data) > len(range(*idx)):
idx = (idx[0], idx[0] + idx[2] * len(data), idx[2])
data_idx = -1
typelist = []
for i in range(*idx):
data_idx += 1
if data_idx >= len(data):
continue
datum = data[data_idx]
if datum is None:
typelist[i] = (None, None, None)
continue
(dims, types, elements) = _Hyperslice.types(
datum, backward=self._cdf_file.backward)
if len(types) <= 0:
raise ValueError('Cannot find a matching CDF type.')
if len(dims) > 1:
raise ValueError('Entries must be scalar or 1D.')
elif len(dims) == 1 and isinstance(datum[0], str_classes):
raise ValueError('Entry strings must be scalar.')
entry_type = None
if self.has_entry(i): #If the entry already exists, match its type
entry_type = self.type(i)
if not entry_type in types:
entry_type = None
if entry_type is None: #Check other entries for this attribute
entry_type = self._check_other_entries(types)
if entry_type is None and self.ENTRY_ == const.zENTRY_:
#Fall back to zVar type
vartype = self._cdf_file[i].type()
if vartype in types:
entry_type = vartype
else:
entry_type = types[0]
elif entry_type is None:
entry_type = types[0]
if not entry_type in lib.numpytypedict:
raise ValueError('Cannot find a matching numpy type.')
typelist.append((dims, entry_type, elements))
data_idx = -1
for i in range(*idx):
data_idx += 1
if data_idx >= len(data) or data[data_idx] is None:
if self.has_entry(i):
del self[i]
continue
datum = data[data_idx]
(dims, entry_type, elements) = typelist[data_idx]
self._write_entry(i, datum, entry_type, dims, elements)
def __delitem__(self, key):
if key is Ellipsis:
key = slice(None, None, None)
if not hasattr(key, 'indices'):
idx = (key, key + 1, 1)
else:
idx = key.indices(self.max_idx() + 1)
for i in range(*idx):
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(i),
const.DELETE_, self.ENTRY_)
def __iter__(self, current=0):
while current <= self.max_idx():
if self.has_entry(current):
value = yield(self._get_entry(current))
if value != None:
current = value
current += 1
def __reversed__(self, current=None):
if current is None:
current = self.max_idx()
while current >= 0:
if self.has_entry(current):
value = yield(self._get_entry(current))
if value != None:
current = value
current -= 1
def __len__(self):
count = ctypes.c_long(0)
self._call(const.GET_, self.ATTR_NUMENTRIES_, ctypes.byref(count))
return count.value
def __repr__(self):
return '<\n' + str(self) + '\n>'
def __str__(self):
if self._cdf_file._opened:
return '\n'.join([str(item) for item in self])
else:
if isinstance(self._name, str):
return 'Attribute "{0}" in closed CDF {1}'.format(
self._name, self._cdf_file.pathname)
else:
return 'Attribute "{0}" in closed CDF {1}'.format(
self._name.decode('ascii'),
self._cdf_file.pathname.decode('ascii'))
def insert(self, index, data):
max_entry = self.max_idx()
if index > max_entry: #Easy case
self[index] = data
return
for i in range(max_entry, index - 1, -1):
if self.has_entry(i+1):
self.__delitem__(i+1)
if self.has_entry(i):
self.new(self.__getitem__(i), type=self.type(i), number=i+1)
self[index] = data
def append(self, data):
self[self.max_idx() + 1] = data
def _call(self, *args, **kwargs):
return self._cdf_file._call(
const.SELECT_, const.ATTR_,
ctypes.c_long(self._cdf_file.attr_num(self._name)[0]),
*args, **kwargs)
def _entry_len(self, number):
if not self.has_entry(number):
raise IndexError('list index ' + str(number) + ' out of range.')
count = ctypes.c_long(0)
self._call(
const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_NUMELEMS_, ctypes.byref(count))
return count.value
def type(self, number, new_type=None):
if new_type != None:
if not hasattr(new_type, 'value'):
new_type = ctypes.c_long(new_type)
size = ctypes.c_long(self._entry_len(number))
status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.PUT_, self.ENTRY_DATASPEC_, new_type, size,
ignore=(const.NO_SUCH_ENTRY,))
if status == const.NO_SUCH_ENTRY:
raise IndexError('list index ' + str(number) + ' out of range.')
cdftype = ctypes.c_long(0)
status = self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_DATATYPE_, ctypes.byref(cdftype),
ignore=(const.NO_SUCH_ENTRY,))
if status == const.NO_SUCH_ENTRY:
raise IndexError('list index ' + str(number) + ' out of range.')
return cdftype.value
def has_entry(self, number):
status = self._call(const.CONFIRM_, self.ENTRY_EXISTENCE_,
ctypes.c_long(number),
ignore=(const.NO_SUCH_ENTRY, ))
return not status == const.NO_SUCH_ENTRY
def max_idx(self):
count = ctypes.c_long(0)
self._call(const.GET_, self.ATTR_MAXENTRY_, ctypes.byref(count))
return count.value
def new(self, data, type=None, number=None):
if number is None:
number = 0
while self.has_entry(number):
number += 1
(dims, types, elements) = _Hyperslice.types(
data, backward=self._cdf_file.backward)
if type is None:
#Guess based on other entries
type = self._check_other_entries(types)
if type is None and self.ENTRY_ == const.zENTRY_:
#Try to match variable type
vartype = self._cdf_file[number].type()
if vartype in types:
type = vartype
if type is None:
type = types[0]
elif hasattr(type, 'value'):
type = type.value
self._write_entry(number, data, type, dims, elements)
def number(self):
no = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.ATTR_NUMBER_,
self._name, ctypes.byref(no))
return no.value
def global_scope(self):
return self._cdf_file.attr_num(self._name)[1]
def rename(self, new_name):
try:
enc_name = new_name.encode('ascii')
except AttributeError:
enc_name = new_name
if len(enc_name) > const.CDF_ATTR_NAME_LEN256:
raise CDFError(const.BAD_ATTR_NAME)
self._call(const.PUT_, const.ATTR_NAME_, enc_name)
self._cdf_file.add_attr_to_cache(
enc_name,
*self._cdf_file.attr_num(self._name)) #still in cache
del self._cdf_file._attr_info[self._name]
self._name = enc_name
def _get_entry(self, number):
if not self.has_entry(number):
raise IndexError('list index ' + str(number) + ' out of range.')
#Make a big enough buffer
length = self._entry_len(number)
cdftype = self.type(number)
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
buff = numpy.empty((), 'S{0}'.format(length), order='C')
else:
if not cdftype in lib.numpytypedict:
raise CDFError(const.BAD_DATA_TYPE)
buff = numpy.empty((length,), lib.numpytypedict[cdftype],
order='C')
buff = numpy.require(buff, requirements=('C', 'A', 'W'))
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.GET_, self.ENTRY_DATA_,
buff.ctypes.data_as(ctypes.c_void_p))
#decode
if cdftype in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
if str == bytes or self._raw: #Py2k, leave as bytes
result = bytes(buff)
else: #Py3k, make unicode
result = str(numpy.char.array(buff).decode())
else:
if not self._raw:
if cdftype == const.CDF_EPOCH.value:
result = lib.v_epoch_to_datetime(buff)
elif cdftype == const.CDF_EPOCH16.value:
result = lib.v_epoch16_to_datetime(buff)
elif cdftype == const.CDF_TIME_TT2000.value:
result = lib.v_tt2000_to_datetime(buff)
else:
result = buff
else:
result = buff
if length == 1:
result = result[0]
return result
def _write_entry(self, number, data, cdf_type, dims, elements):
if len(dims) == 0:
n_write = 1
else:
n_write = dims[0]
if cdf_type in (const.CDF_CHAR.value, const.CDF_UCHAR.value):
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.dtype('S' + str(elements)))
n_write = elements
elif cdf_type == const.CDF_EPOCH16.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch16(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_EPOCH.value:
if not self._raw:
try:
data = lib.v_datetime_to_epoch(data),
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.float64)
elif cdf_type == const.CDF_TIME_TT2000.value:
if not self._raw:
try:
data = lib.v_datetime_to_tt2000(data)
except AttributeError:
pass
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=numpy.int64)
elif cdf_type in lib.numpytypedict:
data = numpy.require(data, requirements=('C', 'A', 'W'),
dtype=lib.numpytypedict[cdf_type])
else:
raise CDFError(const.BAD_DATA_TYPE)
self._call(const.SELECT_, self.ENTRY_, ctypes.c_long(number),
const.PUT_, self.ENTRY_DATA_, ctypes.c_long(cdf_type),
ctypes.c_long(n_write),
data.ctypes.data_as(ctypes.c_void_p))
def _delete(self):
self._call(const.DELETE_, const.ATTR_)
self._cdf_file.clear_attr_from_cache(self._name)
self._name = None
class zAttr(Attr):
ENTRY_ = const.zENTRY_
ENTRY_DATA_ = const.zENTRY_DATA_
SCOPE = const.VARIABLE_SCOPE
ENTRY_EXISTENCE_ = const.zENTRY_EXISTENCE_
ATTR_NUMENTRIES_ = const.ATTR_NUMzENTRIES_
ATTR_MAXENTRY_ = const.ATTR_MAXzENTRY_
ENTRY_NUMELEMS_ = const.zENTRY_NUMELEMS_
ENTRY_DATATYPE_ = const.zENTRY_DATATYPE_
ENTRY_DATASPEC_ = const.zENTRY_DATASPEC_
def insert(self, index, data):
raise NotImplementedError
def append(self, index, data):
raise NotImplementedError
class gAttr(Attr):
ENTRY_ = const.gENTRY_
ENTRY_DATA_ = const.gENTRY_DATA_
SCOPE = const.GLOBAL_SCOPE
ENTRY_EXISTENCE_ = const.gENTRY_EXISTENCE_
ATTR_NUMENTRIES_ = const.ATTR_NUMgENTRIES_
ATTR_MAXENTRY_ = const.ATTR_MAXgENTRY_
ENTRY_NUMELEMS_ = const.gENTRY_NUMELEMS_
ENTRY_DATATYPE_ = const.gENTRY_DATATYPE_
ENTRY_DATASPEC_ = const.gENTRY_DATASPEC_
class AttrList(MutableMapping):
def __init__(self, cdf_file, special_entry=None):
self._cdf_file = cdf_file
self.special_entry = special_entry
def __getitem__(self, name):
try:
attrib = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status == const.NO_SUCH_ATTR:
raise KeyError(name + ': ' + str(v))
else:
raise
if attrib.global_scope() != self.global_scope:
raise KeyError(name + ': no ' + self.attr_name + ' by that name.')
return attrib
def __setitem__(self, name, data):
if isinstance(data, AttrList):
if name in self:
del self[name]
attr = self._get_or_create(name)
for entryno in range(data.max_idx()):
if data.has_entry(entryno):
attr.new(data[entryno], data.type(entryno), entryno)
else:
attr = self._get_or_create(name)
if isinstance(data, str_classes):
data = [data]
else:
try:
junk = len(data)
except TypeError:
data = [data]
attr[:] = data
del attr[len(data):]
def __delitem__(self, name):
try:
attr = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status == const.NO_SUCH_ATTR:
raise KeyError(name + ': ' + str(v))
else:
raise
if attr.global_scope() != self.global_scope:
raise KeyError(name + ': not ' + self.attr_name)
attr._delete()
def __iter__(self, current=0):
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_,
ctypes.byref(count))
while current < count.value:
candidate = self.AttrType(self._cdf_file, current)
if candidate.global_scope() == self.global_scope:
if self.special_entry is None or \
candidate.has_entry(self.special_entry()):
if str == bytes:
value = yield(candidate._name)
else:
value = yield(candidate._name.decode())
if value != None:
current = self[value].number()
current += 1
def __repr__(self):
return '<' + self.__class__.__name__ + ':\n' + str(self) + '\n>'
def __str__(self):
if self._cdf_file._opened:
return '\n'.join([key + ': ' + (
('\n' + ' ' * (len(key) + 2)).join(
[str(value[i]) + ' [' + lib.cdftypenames[value.type(i)] + ']'
for i in range(value.max_idx() + 1) if value.has_entry(i)])
if isinstance(value, Attr)
else str(value) +
' [' + lib.cdftypenames[self.type(key)] + ']'
)
for (key, value) in sorted(self.items())])
else:
if isinstance(self._cdf_file.pathname, str):
return 'Attribute list in closed CDF {0}'.format(
self._cdf_file.pathname)
else:
return 'Attribute list in closed CDF {0}'.format(
self._cdf_file.pathname.decode('ascii'))
def clone(self, master, name=None, new_name=None):
if name is None:
self._clone_list(master)
else:
self._clone_attr(master, name, new_name)
def copy(self):
return dict((key, value[:] if isinstance(value, Attr) else value)
for (key, value) in self.items())
def new(self, name, data=None, type=None):
if name in self:
raise KeyError(name + ' already exists.')
#A zAttr without an Entry in this zVar will be a "get" not "create"
attr = self._get_or_create(name)
if data is not None:
if self.special_entry is None:
attr.new(data, type)
else:
attr.new(data, type, self.special_entry())
def rename(self, old_name, new_name):
AttrList.__getitem__(self, old_name).rename(new_name)
def from_dict(self, in_dict):
warnings.warn("from_dict is deprecated and will be removed. Use clone.",
DeprecationWarning)
for k in in_dict:
self[k] = in_dict[k]
for k in list(self):
if not k in in_dict:
del self[k]
def _clone_attr(self, master, name, new_name=None):
if new_name is None:
new_name = name
self[new_name] = master[name]
def _clone_list(self, master):
for name in master:
self._clone_attr(master, name)
for name in list(self): #Can't iterate over a list we're changing
if not name in master:
del self[name]
def _get_or_create(self, name):
attr = None
try:
attr = self.AttrType(self._cdf_file, name)
except CDFError:
(t, v, tb) = sys.exc_info()
if v.status != const.NO_SUCH_ATTR:
raise
if attr is None:
attr = self.AttrType(self._cdf_file, name, True)
elif attr.global_scope() != self.global_scope:
raise KeyError(name + ': not ' + self.attr_name)
return attr
class gAttrList(AttrList):
AttrType = gAttr
attr_name = 'gAttribute'
global_scope = True
def __len__(self):
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMgATTRS_,
ctypes.byref(count))
return count.value
class zAttrList(AttrList):
AttrType = zAttr
attr_name = 'zAttribute'
global_scope = False
def __init__(self, zvar):
super(zAttrList, self).__init__(zvar.cdf_file, zvar._num)
self._zvar = zvar
def __getitem__(self, name):
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if attrib.has_entry(zvar_num):
attrib._raw = self._zvar._raw
return attrib[zvar_num]
else:
raise KeyError(name + ': no such attribute for variable ' +
self._zvar.name())
def __delitem__(self, name):
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if not attrib.has_entry(zvar_num):
raise KeyError(str(name) + ': no such attribute for variable ' +
str(self._zvar._name))
del attrib[zvar_num]
if len(attrib) == 0:
attrib._delete()
def __setitem__(self, name, data):
try:
attr = super(zAttrList, self).__getitem__(name)
except KeyError:
attr = zAttr(self._cdf_file, name, True)
attr._raw = self._zvar._raw
attr[self._zvar._num()] = data
def __len__(self):
length = 0
count = ctypes.c_long(0)
self._cdf_file._call(const.GET_, const.CDF_NUMATTRS_,
ctypes.byref(count))
current = 0
while current < count.value:
candidate = zAttr(self._cdf_file, current)
if not candidate.global_scope():
if candidate.has_entry(self._zvar._num()):
length += 1
current += 1
return length
def type(self, name, new_type=None):
attrib = super(zAttrList, self).__getitem__(name)
zvar_num = self._zvar._num()
if not attrib.has_entry(zvar_num):
raise KeyError(name + ': no such attribute for variable ' +
self._zvar.name())
return attrib.type(zvar_num, new_type)
def _clone_attr(self, master, name, new_name=None):
if new_name is None:
new_name = name
if new_name in self:
del self[new_name]
self.new(new_name, master[name],
master.type(name) if hasattr(master, 'type') else None)
| true | true |
79020526110a259e64dcd1691022d309d2e30800 | 776 | py | Python | queue-with-stacks/queue_with_stacks.py | brandonholderman/data-structures-and-algorithms | 9b9aa0eac1fe305d9655537c90a24dd263a42df9 | [
"MIT"
] | null | null | null | queue-with-stacks/queue_with_stacks.py | brandonholderman/data-structures-and-algorithms | 9b9aa0eac1fe305d9655537c90a24dd263a42df9 | [
"MIT"
] | null | null | null | queue-with-stacks/queue_with_stacks.py | brandonholderman/data-structures-and-algorithms | 9b9aa0eac1fe305d9655537c90a24dd263a42df9 | [
"MIT"
] | null | null | null | from stack import Stack as s
class Queue:
def __init__(self, iter=[]):
self.stack_one = s()
self.stack_two = s()
self._len = 0
for item in iter:
self.enqueue(item)
def enqueue(self, value):
if value:
self.stack_one.push(value)
self._len += 1
return self.stack_one
return False
def dequeue(self):
if self._len == 0:
return False
else:
for _ in range(self._len - 2):
self.stack_two.push(self.stack_two.pop())
last = self.stack_one.pop()
for _ in range(self._len - 2):
self.stack_one.push(self.stack_two.pop())
self._len -= 1
return last | 25.866667 | 57 | 0.501289 | from stack import Stack as s
class Queue:
def __init__(self, iter=[]):
self.stack_one = s()
self.stack_two = s()
self._len = 0
for item in iter:
self.enqueue(item)
def enqueue(self, value):
if value:
self.stack_one.push(value)
self._len += 1
return self.stack_one
return False
def dequeue(self):
if self._len == 0:
return False
else:
for _ in range(self._len - 2):
self.stack_two.push(self.stack_two.pop())
last = self.stack_one.pop()
for _ in range(self._len - 2):
self.stack_one.push(self.stack_two.pop())
self._len -= 1
return last | true | true |
790205a660a3e83ae739a120a62f77b28f028bee | 4,553 | py | Python | readers/myU3.py | dayne/mini-monitor | 7d6cc8af8877152a04a6deeb9940101f9dcade56 | [
"Apache-2.0"
] | null | null | null | readers/myU3.py | dayne/mini-monitor | 7d6cc8af8877152a04a6deeb9940101f9dcade56 | [
"Apache-2.0"
] | null | null | null | readers/myU3.py | dayne/mini-monitor | 7d6cc8af8877152a04a6deeb9940101f9dcade56 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python2.6
'''
Creates a MyU3 class that adds higher-level functionality to the base
LabJack U3 class.
'''
from __future__ import division
import u3
from time import sleep
import math
def getU3(**kargs):
'''Returns an open MyU3 object but retries until successful if errors occur.'''
while True:
try:
return MyU3(**kargs)
except:
sleep(2)
print('Trying to Open U3...')
class MyU3(u3.U3):
'''
Class that adds some functionality to the base u3.U3 class, which
operates a U3 data acquisition device.
'''
def __init__(self, **kargs):
# call the constructor in the base class
u3.U3.__init__(self, **kargs)
def getRMS(self, ch, signalFreq=60, numCycles=4):
'''
Returns the RMS voltage of a stream of readings on a channel.
'ch' is the channel to sample.
'signalFreq' is the fundamental frequency of the signal being sampled.
'numCycles' is the number of full cycles of the signal that you want to
sample for purposes of calculating the RMS value.
I found that for 60 Hz signals, sampling 4 cycles produces stable
readings.
NOTE: there are limits to the frequency calculation below. Getting
a packet from the U3 in streaming mode is limited to 1 second I think,
and it will reduces the # of samples if the frequency is set so that
less than 1200 samples arrive in 1 second.
'''
# There are 1200 samples in one streaming request of the U3. Calculate
# the required streaming frequency from that and the other input parameters.
freq = int(signalFreq / numCycles * 1200.0)
freq = min(50000, freq) # cap at 50 kHz
# the U3 must operate at lower resolution if the streaming is very fast.
if freq < 2500:
resolution = 0
elif freq < 10000:
resolution = 1
elif freq < 20000:
resolution = 2
else:
resolution = 3
self.streamConfig( NumChannels = 1,
PChannels = [ ch ],
NChannels = [ 31 ], # 31 indicates single-ended read
Resolution = resolution,
SampleFrequency = freq )
try:
self.streamStart()
for r in self.streamData():
# calculate the sum of the squares, average that, and take square root
# to determine RMS
vals = r['AIN' + str(ch)]
sum_sqr = reduce(lambda x,y: x + y*y, vals, 0.0)
return math.sqrt(sum_sqr / len(vals))
finally:
self.streamStop()
def getAvg(self, ch, reads=8, specialRange=False, longSettle=True):
'''
Returns an analog reading of channel 'ch', but samples
multiple times = 'reads' and then averages. If 'specialRange'
is True, uses the higher voltage range for the channel.
If 'longSettle' is True, a higher source impedance can be tolerated.
'''
if specialRange:
negCh = 32
else:
negCh = 31
tot = 0.0
for i in range(reads):
tot += self.getAIN(ch, negCh, longSettle=longSettle)
return tot / reads
# Could add a routine to average an analog reading across
# 4 60 Hz cycles using the stream function as in getRMS().
def getDutyCycle(self, timer, reads=8):
'''
Returns the duty cycle measured by a timer. Assumes that the timer is already
set to Mode = 4 for reading duty cycles.
timer - the timer number, either 0 or 1.
reads - the number of times to read the duty cycle and average
'''
tot = 0.0 # used to average the duty cycle readings
for i in range(reads):
val = self.getFeedback(u3.Timer(timer=timer))[0]
hi = float(val % 2**16)
lo = float(val / 2**16)
tot += hi / (lo + hi)
return tot / reads
if __name__=='__main__':
# create the device object
d = MyU3()
# Set all possible inputs to Analog
# Create a bit mask indicating which channels are analog:
FIOEIOAnalog = ( 2 ** 16 ) - 1;
fios = FIOEIOAnalog & (0xFF) # the bottom 8 bits
eios = FIOEIOAnalog/256 # shift 8 places to get top 8 bits
d.configIO( FIOAnalog = fios, EIOAnalog = int(eios) )
try:
while True:
#print '%.3f' % d.getAvg(6)
#print '%.2f' % ( (d.getAvg(30) - 273.15)*1.8 + 32.0 )
print '%.3f' % d.getRMS(6)
sleep(0.5)
finally:
d.close() | 32.29078 | 84 | 0.597408 |
'''
Creates a MyU3 class that adds higher-level functionality to the base
LabJack U3 class.
'''
from __future__ import division
import u3
from time import sleep
import math
def getU3(**kargs):
'''Returns an open MyU3 object but retries until successful if errors occur.'''
while True:
try:
return MyU3(**kargs)
except:
sleep(2)
print('Trying to Open U3...')
class MyU3(u3.U3):
'''
Class that adds some functionality to the base u3.U3 class, which
operates a U3 data acquisition device.
'''
def __init__(self, **kargs):
u3.U3.__init__(self, **kargs)
def getRMS(self, ch, signalFreq=60, numCycles=4):
'''
Returns the RMS voltage of a stream of readings on a channel.
'ch' is the channel to sample.
'signalFreq' is the fundamental frequency of the signal being sampled.
'numCycles' is the number of full cycles of the signal that you want to
sample for purposes of calculating the RMS value.
I found that for 60 Hz signals, sampling 4 cycles produces stable
readings.
NOTE: there are limits to the frequency calculation below. Getting
a packet from the U3 in streaming mode is limited to 1 second I think,
and it will reduces the # of samples if the frequency is set so that
less than 1200 samples arrive in 1 second.
'''
freq = int(signalFreq / numCycles * 1200.0)
freq = min(50000, freq)
if freq < 2500:
resolution = 0
elif freq < 10000:
resolution = 1
elif freq < 20000:
resolution = 2
else:
resolution = 3
self.streamConfig( NumChannels = 1,
PChannels = [ ch ],
NChannels = [ 31 ],
Resolution = resolution,
SampleFrequency = freq )
try:
self.streamStart()
for r in self.streamData():
vals = r['AIN' + str(ch)]
sum_sqr = reduce(lambda x,y: x + y*y, vals, 0.0)
return math.sqrt(sum_sqr / len(vals))
finally:
self.streamStop()
def getAvg(self, ch, reads=8, specialRange=False, longSettle=True):
'''
Returns an analog reading of channel 'ch', but samples
multiple times = 'reads' and then averages. If 'specialRange'
is True, uses the higher voltage range for the channel.
If 'longSettle' is True, a higher source impedance can be tolerated.
'''
if specialRange:
negCh = 32
else:
negCh = 31
tot = 0.0
for i in range(reads):
tot += self.getAIN(ch, negCh, longSettle=longSettle)
return tot / reads
def getDutyCycle(self, timer, reads=8):
'''
Returns the duty cycle measured by a timer. Assumes that the timer is already
set to Mode = 4 for reading duty cycles.
timer - the timer number, either 0 or 1.
reads - the number of times to read the duty cycle and average
'''
tot = 0.0
for i in range(reads):
val = self.getFeedback(u3.Timer(timer=timer))[0]
hi = float(val % 2**16)
lo = float(val / 2**16)
tot += hi / (lo + hi)
return tot / reads
if __name__=='__main__':
d = MyU3()
FIOEIOAnalog = ( 2 ** 16 ) - 1;
fios = FIOEIOAnalog & (0xFF)
eios = FIOEIOAnalog/256
d.configIO( FIOAnalog = fios, EIOAnalog = int(eios) )
try:
while True:
print '%.3f' % d.getRMS(6)
sleep(0.5)
finally:
d.close() | false | true |
7902063976a16a516504664ce8b3bf5438d73a18 | 32,476 | py | Python | contrib/cli_scripts/nodemeisterlib.py | coxmediagroup/nodemeister | ecfd2c04f516b1ea022c55ce4372976055a39e5f | [
"Apache-2.0"
] | null | null | null | contrib/cli_scripts/nodemeisterlib.py | coxmediagroup/nodemeister | ecfd2c04f516b1ea022c55ce4372976055a39e5f | [
"Apache-2.0"
] | null | null | null | contrib/cli_scripts/nodemeisterlib.py | coxmediagroup/nodemeister | ecfd2c04f516b1ea022c55ce4372976055a39e5f | [
"Apache-2.0"
] | null | null | null | """
Some common functions for interfacing with the
NodeMeister REST API.
"""
import requests
import anyjson
import re
import logging
MISSING_ITEM = '-'
DIFF_MARKER = ">"
try:
logger.debug("importing nodemeisterlib")
except NameError:
FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.ERROR, format=FORMAT)
logger = logging.getLogger(__name__)
def red(text):
"""
Shameless hack-up of the 'termcolor' python package
by Konstantin Lepa - <https://pypi.python.org/pypi/termcolor>
to reduce rependencies and only make red text.
"""
s = '\033[%dm%s\033[0m' % (31, text)
return s
def print_columns(lines, spacer=' ', onlydifferent=False):
"""
Take a list of lines, each being a list with 3 elements
(the three columns to print) and print in 3 columns.
:param lines: list of 3-element lists, each list is a line and
each sub-list are the 3 columns in the line
:type lines: list of lists
:param spacer: spacer between columns, default 3 spaces
:type lines: string
:param onlydifferent: only output differing lines
:type onlydifferent: boolean
"""
s = ""
# get the column width
clen = [0, 0, 0]
for l in lines:
if onlydifferent:
if len(l) < 3:
continue
for c in xrange(0, 3):
if len(str(l[c])) > clen[c]:
clen[c] = len(str(l[c]))
line_spec = "{{0:<{1}s}}{0}{{1:<{2}s}}{0}{{2:<{3}s}}\n".format(' ' * 3, clen[0], clen[1], clen[2])
# print the lines
for l in lines:
if len(l) > 3 and l[3] == True:
s += red(line_spec.format(DIFF_MARKER + l[0], str(l[1]), str(l[2])))
else:
if onlydifferent:
continue
s += line_spec.format(l[0], str(l[1]), str(l[2]))
return s
def pretty_diff_list(title, oA, oB):
"""
Generate a pretty diff of two dicts.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists
"""
lines = []
items = set.union(set(oA), set(oB))
for i in sorted(items):
if i in oA and i in oB:
lines.append(['', i, i])
elif i in oA:
lines.append(['', i, MISSING_ITEM, True])
elif i in oB:
lines.append(['', MISSING_ITEM, i, True])
return lines
def pretty_diff_str(title, oA, oB):
"""
Generate a pretty diff of two dicts.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists
"""
if oA != oB:
return [[title, oA, oB, True]]
return [[title, oA, oB]]
def pretty_diff_dict(title, oA, oB):
"""
Generate a pretty diff of two dicts.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists
"""
lines = [[title, '', '']]
keys = set.union(set(oA.keys()), set(oB.keys()))
for k in sorted(keys):
if k in oA and k in oB:
if oA[k] == oB[k]:
lines.append([k, oA[k], oB[k]])
else:
lines.append([k, oA[k], oB[k], True])
elif k in oA:
lines.append([k, oA[k], MISSING_ITEM, True])
else:
lines.append([k, MISSING_ITEM, oB[k], True])
return lines
def pretty_diff_obj(title, oA, oB):
"""
Generate a pretty diff of two objects (actually just
dict, list or string) of lines suitable for use in pretty_diff_dicts()
This method is a pass-through to
pretty_diff_(dict|string|list)
depending on the input type.
:param title: the title/heading for the line
:type title: string
:param oA: first object
:param oB: second object
:returns: list of lines, each a list of 3 columns
:rtype: list of lists
"""
if type(oA) == type({}) or type(oB) == type({}):
return pretty_diff_dict(title, oA, oB)
elif type(oA) == type("") or type(oB) == type("") or type(oA) == type(u"") or type(oB) == type(u""):
return pretty_diff_str(title, oA, oB)
else:
return pretty_diff_list(title, oA, oB)
return []
def pretty_diff(title, titleA, dictA, titleB, dictB, onlydifferent=False):
"""
Generate a "pretty" printable diff of two Nodes or Groups
containing arbitrarily deep dict, list or string items.
Intended to be used for the "text" dicts in migrate_group()
and migrate_node().
:param title: overall title of the diff
:type title: string
:param titleA: title of the first dict
:type titleA: string
:param dictA: the first dict
:type dictA: dict
:param titleB: title of the second dict
:type titleB: string
:param dictB: the second dict
:type dictB: dict
:param onlydifferent: only output differing lines
:type onlydifferent: boolean
:returns: multi-line string, columnar diff of dicts
:rtype: string
"""
s = "Diff of %s\n" % title
lines = []
lines.append(['', titleA, titleB])
lines.append(['', '-' * len(titleA), '-' * len(titleB)])
lines.append(['name', dictA.get('name', '<none>'), dictB.get('name', '<none>')])
lines.append(['id', dictA.get('id', '<none>'), dictB.get('id', '<none>')])
lines.append(['description', dictA.get('description', '<none>'), dictB.get('description', '<none>')])
dictA.pop('name', None)
dictA.pop('id', None)
dictA.pop('description', None)
dictB.pop('name', None)
dictB.pop('id', None)
dictB.pop('description', None)
lines.append(['', '', ''])
k = set.union(set(dictA.keys()), set(dictB.keys()))
for p in sorted(k):
lines.append([p.capitalize() + ':', '', ''])
lines.extend(pretty_diff_obj('', dictA.get(p), dictB.get(p)))
#lines.append(['', '', ''])
s += print_columns(lines, onlydifferent=onlydifferent)
return s
def get_nm_node_yaml(nm_host, node_name, ssl_verify=False, verbose=False):
"""
Get the raw ENC YAML for a given node
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_name: name of the node to get YAML for
:type node_name: string
:param ssl_verify: whether or not to verify SSL certificate, default False
:type ssl_verify: boolean
:rtype: string
:returns: raw YAML string, or None
"""
nm_url = "http://%s/enc/puppet/%s" % (nm_host, node_name)
r = requests.get(nm_url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if r.status_code == 200:
return r.content
else:
logger.error("got status code {s} for {u}".format(s=r.status_code, u=nm_url))
return None
def get_dashboard_node_yaml(url, ssl_verify=False, verbose=False):
"""
Given the full URL to a Puppet Dashboard node YAML file,
return the content of the YAML file as a string.
:param url: full URL to Dashboard node yaml
:type url: string
:param ssl_verify: whether or not to verify SSL certificate, default False
:type ssl_verify: boolean
:rtype: string
:returns: raw YAML string, or None
"""
r = requests.get(url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if r.status_code == 200:
return r.content
else:
logger.error("got status code {s} for {u}".format(s=r.status_code, u=url))
return None
def get_json(url):
"""
uses requests to GET and return deserialized json
uses anyjson if the Response object doesn't have .json()
:param url: the URL to get
:type url: string
:rtype: dict/mixed or None
:returns: unserialized JSON, or None
"""
r = requests.get(url)
if 'json' in dir(r):
return r.json()
try:
j = anyjson.deserialize(r.content)
return j
except:
logger.error("could not deserialize JSON for {u} (got status code {s})".format(s=r.status_code, u=url))
return None
def get_group_names(nm_host):
"""
Return a dict of groups in the NM instance,
id => name
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM groups, dict of the form {id<int>: name<string>}
"""
j = get_json("http://%s/enc/groups/" % nm_host)
names = {}
for n in j:
names[n['id']] = n['name']
return names
def get_nm_group_classes(nm_host):
"""
Return a dict of all group classes in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM group classes, dict of the form:
{id<int>: {'classname': <string>, 'classparams': <string or None>, 'group': <int>, 'id': <int>}
"""
r = {}
j = get_json("http://%s/enc/classes/groups/" % nm_host)
for o in j:
r[o['id']] = o
return r
def get_nm_group_params(nm_host):
"""
Return a dict of all group params in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM group params, dict of the form:
{id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'group': <int>, 'id': <int>}
"""
r = {}
j = get_json("http://%s/enc/parameters/groups/" % nm_host)
for o in j:
if o['paramvalue'] is not None:
o['paramvalue'] = clean_value(o['paramvalue'])
r[o['id']] = o
return r
def get_nm_group(nm_host, gname=None, gid=None, groupnames=None):
"""
Return a dict of information about a group
in NM, by either name or ID. If gname is specified,
it will be resolved to the id.
groupnames, if specified, is the output dict from get_group_names();
if it is not specified, get_group_names() will be called internally.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:param gname: name of group to get
:type gname: string
:param gid: ID of group to get, overrides gname
:type gid: int
:param groupnames: output of get_group_names(), to prevent calling it again if we already have it
:type groupnames: dict
:rtype: dict
:returns: unserialized JSON dict representing the specified group, of the form:
{'name': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>], 'groups': [<group IDs>], 'id': <int>, 'description': <string>}
"""
if gid is None and gname is None:
raise ValueError("get_nm_group called without gname or gid")
if gid is None:
if groupnames is None:
groupnames = get_group_names(nm_host)
for n in groupnames:
if groupnames[n] == gname:
gid = n
if gid is None:
return {}
j = get_json("http://%s/enc/groups/%d/" % (nm_host, gid))
return j
def interpolate_group(group, classes, params, group_names):
"""
In the dict returned by get_nm_group, replace class
and parameter IDs, and other group IDs, with their
appropriate string or dict representations.
:param group: the Group dict returned by get_nm_group()
:type group: dict
:param classes: the dict of classes returned by get_nm_group_classes()
:type classes: dict
:param params: the dict of parameters returned by get_nm_group_params()
:type params: dict
:param group_names: the dict of group IDs to names returned by get_group_names()
:type group_names: dict
:returns: group dict, with classes and params interpolated
:rtype: dict
"""
g_params = group.get('parameters', {})
params_text = {}
for p in g_params:
foo = params[p]
params_text[foo['paramkey']] = foo['paramvalue']
group['parameters'] = params_text
g_classes = group.get('classes', {})
classes_text = {}
for c in g_classes:
foo = classes[c]
classes_text[foo['classname']] = foo['classparams']
group['classes'] = classes_text
g_parents = group.get('parents', {})
parents_text = []
for p in g_parents:
parents_text.append(group_names[p])
group['parents'] = parents_text
g_groups = group.get('groups', {})
groups_text = []
for g in g_groups:
groups_text.append(group_names[g])
group['groups'] = groups_text
return group
def add_group(nm_host, name, description, parents=None, groups=None, dry_run=False):
"""
add a group to NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param name: name of the new group
:type name: string
:param description: description of the new group
:type description: string
:param parents: parents of this group
:type parents: list of int IDs
:param groups: child groups of this group
:type groups: list of int IDs
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: int ID of the new group on success or False on failure
:rtype: int or False
"""
payload = {'name': name, 'description': description}
if parents is not None:
payload['parents'] = parents
if groups is not None:
payload['groups'] = groups
url = "http://%s/enc/groups/" % nm_host
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return get_nm_group_id(nm_host, name, dry_run=dry_run)
logger.error("ERROR: add_group got status code %d" % status_code)
return False
def get_nm_group_id(nm_host, name, groups=None, dry_run=False):
"""
Get the group ID of a group specified by name
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param name: name of the new group
:type name: string
:param groups: dict of groups as returned by get_group_names()
:type groups: dict
:returns: int ID of the group or False on failure
:rtype: int or False
"""
if dry_run:
return 0
if groups is None:
groups = get_group_names(nm_host)
for n in groups:
if groups[n] == name:
return n
return False
def add_param_to_group(nm_host, gid, pname, pval, dry_run=False):
"""
add a parameter to a group in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param gid: numeric ID of the group to add param to
:type gid: int
:param pname: parameter name
:type pname: string
:param pval: parameter value
:type pval: string
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean
"""
if isinstance(pval, basestring) and (pval.strip() == "" or pval == "" or pval == "''"):
pval = None
payload = {'group': gid, 'paramkey': pname, 'paramvalue': pval}
url = "http://%s/enc/parameters/groups/" % nm_host
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_param_to_group got status code %d" % status_code)
return False
def add_class_to_group(nm_host, gid, classname, classparams=None, dry_run=False):
"""
add a class to a group in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param gid: numeric ID of the group to add param to
:type gid: int
:param classname: class name
:type classname: string
:param classparams: class parameters, default None
:type classparams: string or None
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean
"""
payload = {'group': gid, 'classname': classname, 'classparams': classparams}
url = "http://%s/enc/classes/groups/" % nm_host
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_class_to_group got status code %d" % status_code)
return False
def get_node_names(nm_host):
"""
Return a dict of nodes in the NM instance,
id => hostname
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM nodes, dict of the form {id<int>: hostname<string>}
"""
j = get_json("http://%s/enc/nodes/" % nm_host)
names = {}
for n in j:
names[n['id']] = n['hostname']
return names
def get_nm_node_id(nm_host, hostname, nodenames=None, dry_run=False):
"""
Get the node ID of a node specified by hostname
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param hostname: hostname of the node
:type hostname: string
:param nodenames: dict of nodes as returned by get_node_names()
:type nodenames: dict
:returns: int ID of the group or False on failure
:rtype: int or False
"""
if dry_run:
return 0
if nodenames is None:
nodenames = get_node_names(nm_host)
for n in nodenames:
if nodenames[n] == hostname:
return n
logger.error("could not find node ID for {h}".format(h=hostname))
return False
def get_nm_node(nm_host, hostname=None, node_id=None, nodenames=None):
"""
Return a dict of information about a node
in NM, by either name or ID. If nodename is specified,
it will be resolved to the id.
nodenames, if specified, is the output dict from get_node_names();
if it is not specified, get_node_names() will be called internally.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:param hostname: name of node to get
:type hostname: string
:param node_id: ID of node to get, overrides hostname
:type node_id: int
:param nodenames: output of get_node_names(), to prevent calling it again if we already have it
:type nodenames: dict
:rtype: dict
:returns: unserialized JSON dict representing the specified group, of the form:
{'hostname': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>],
'groups': [<group IDs>], 'id': <int>, 'description': <string>}
"""
if node_id is None and hostname is None:
raise ValueError("get_nm_node called without hostname or node_id")
if node_id is None:
if nodenames is None:
nodenames = get_node_names(nm_host)
for n in nodenames:
if nodenames[n] == hostname:
node_id = n
if node_id is None:
logger.error("could not find hode with hostname {h}".format(h=hostname))
return {}
j = get_json("http://%s/enc/nodes/%d/" % (nm_host, node_id))
return j
def get_nm_node_classes(nm_host):
"""
Return a dict of all node classes in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM node classes, dict of the form:
{id<int>: {'classname': <string>, 'classparams': <string or None>, 'node': <int>, 'id': <int>}
"""
r = {}
j = get_json("http://%s/enc/classes/nodes/" % nm_host)
for o in j:
r[o['id']] = o
return r
def get_nm_node_params(nm_host):
"""
Return a dict of all node params in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM node params, dict of the form:
{id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'node': <int>, 'id': <int>}
"""
r = {}
j = get_json("http://%s/enc/parameters/nodes/" % nm_host)
for o in j:
r[o['id']] = o
return r
def add_node(nm_host, hostname, description, groups=None, dry_run=False):
"""
add a node to NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param hostname: hostname of the new node
:type hostname: string
:param description: description of the new node
:type description: string
:param groups: groups that this node is in
:type groups: list of int IDs
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: int ID of the new node on success or False on failure
:rtype: int or False
"""
payload = {'hostname': hostname, 'description': description}
if groups is not None:
payload['groups'] = groups
url = "http://%s/enc/nodes/" % nm_host
logger.debug("adding node {h}".format(h=hostname))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return get_nm_node_id(nm_host, hostname, dry_run=dry_run)
logger.error("ERROR: add_node got status code %d" % status_code)
return False
def add_param_to_node(nm_host, node_id, pname, pval, dry_run=False):
"""
add a parameter to a node in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_id: numeric ID of the node to add param to
:type node_id: int
:param pname: parameter name
:type pname: string
:param pval: parameter value
:type pval: string
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean
"""
if pval.strip() == "" or pval == "" or pval == "''":
pval = None
payload = {'node': node_id, 'paramkey': pname, 'paramvalue': pval}
url = "http://%s/enc/parameters/nodes/" % nm_host
logger.debug("adding param '{pname}' to node {n} with val: {pval}".format(n=node_id, pname=pname, pval=pval))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_param_to_node got status code %d" % status_code)
return False
def add_class_to_node(nm_host, node_id, classname, classparams=None, dry_run=False):
"""
add a class to a node in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_id: numeric ID of the node to add param to
:type node_id: int
:param classname: class name
:type classname: string
:param classparams: class parameters, default None
:type classparams: string or None
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean
"""
payload = {'node': node_id, 'classname': classname, 'classparams': classparams}
url = "http://%s/enc/classes/nodes/" % nm_host
logger.debug("adding class '{cn}' to node {n} with params: {cp}".format(n=node_id, cn=classname, cp=classparams))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_class_to_node got status code %d" % status_code)
return False
def get_name_for_class_exclusion(nm_host, class_exclusion_id, verbose):
"""
Get the excluded class name for a given ClassExclusion ID.
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param class_exclusion_id: numeric ID of the class exclusion
:type class_exclusion_id: int
:returns: string name of class, or False on faliure
:rtype: string or False
"""
r = {}
j = get_json("http://%s/enc/exclusions/classes/" % nm_host)
if j is None:
return False
for o in j:
if o['id'] == class_exclusion_id:
return o['exclusion']
return False
def add_node_class_exclusion(nm_host, node_id, classname, dry_run=False, verbose=False):
"""
add a class exclusion to a node in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_id: numeric ID of the node to add param to
:type node_id: int
:param classname: class name to exclude
:type classname: string
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean
"""
payload = {'node': node_id, 'exclusion': classname}
url = "http://%s/enc/exclusions/classes/" % nm_host
logger.debug("adding class exclusion for '{cn}' to node {n}".format(n=node_id, cn=classname))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_node_class_exclusion got status code %d" % status_code)
return False
def clean_value(v, debug=False):
"""
Strip bad characters off of values
"""
if debug:
print("clean_value '%s'" % v)
if type(v) == type("") or type(v) == type(u""):
v = v.strip('"\\')
return v
def do_post(url, payload, dry_run=False):
"""
Do a POST request with Requests, return the status code.
:param url: URL to POST to
:type nm_host: string
:param payload: the payload data, to be JSON encoded
:type name: dict
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: HTTP status code from the request
:rtype: int
"""
headers = {'content-type': 'application/json'}
if dry_run:
logger.warning("DRY RUN: do_post to url %s - payload:\n\t%s\n" % (url, payload))
return 201
r = requests.post(url, data=anyjson.serialize(payload), headers=headers)
return r.status_code
def clone_nodemeister_node(nm_host, dst_name, src_name, munge_res, group_replace=None, noop=False, verbose=False):
"""
Clone a node in nodemeister, munging all parameters and class params through munge_re,
a list of lists, each having 2 elements, a regex and a string to replace matches with.
group_replace is a hash of old_group_id => new_group_id to replace when creating the new node
"""
nodes = get_node_names(nm_host)
dst_node_id = get_nm_node_id(nm_host, dst_name, nodenames=nodes)
if dst_node_id is not False:
logger.error("ERROR: node %s already exists in NodeMeister with id %d." % (dst_name, dst_node_id))
return False
src_node = get_nm_node(nm_host, hostname=src_name, nodenames=nodes)
if len(src_node) == 0:
logger.error("ERROR: could not find source node %s" % src_name)
return False
if verbose:
logger.debug("Got source node id: {n}\n{src}".format(n=src_node['id'], src=src_node))
classes = get_nm_node_classes(nm_host)
params = get_nm_node_params(nm_host)
# add to the right groups
groups = []
for g in src_node['groups']:
if group_replace is not None:
if g in group_replace:
if verbose:
logger.debug(" changing group %d to %d (group_replace)" % (g, group_replace[g]))
g = group_replace[g]
groups.append(g)
# TODO - these are going to be difficult because we need to resolve IDs from source to names,
# and then map to the correct IDs for our new node
# add excluded groups
# add excluded params
node_id = add_node(nm_host, dst_name, "imported by %s" % __file__, groups=groups, dry_run=noop)
if node_id is False:
logger.error("ERROR adding node in Nodemeister.")
return False
else:
logger.info("Node added to NodeMeister with id %d" % node_id)
ok = True
# add excluded classes
for c in src_node['excluded_classes']:
c_name = get_name_for_class_exclusion(nm_host, c, verbose=verbose)
if verbose:
logger.debug("excluded class %s (%d)" % (c_name, c))
res = add_node_class_exclusion(nm_host, node_id, c_name, dry_run=noop, verbose=verbose)
if not res:
logger.error("ERROR adding class exclusion of '%s' to node %d" % (c_name, node_id))
ok = False
if verbose:
logger.info("added class_exclusion of '%s' to group %d" % (c_name, node_id))
# add the params
for p in src_node['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_node['parameters'][p])
if foo != src_node['parameters'][p] and verbose:
logger.debug("Munged value of '%s' from '%s' to '%s'" % (p, src_node['parameters'][p], foo))
src_node['parameters'][p] = foo
res = add_param_to_node(nm_host, node_id, p, src_node['parameters'][p], dry_run=noop)
if not res:
logger.error("ERROR adding param %s with value '%s' to node %d" % (p, src_node['parameters'][p], node_id))
ok = False
if verbose:
logger.info("\tadded param %s with value '%s' to group %d" % (p, src_node['parameters'][p], node_id))
if len(src_node['classes']) > 0:
logger.critical("ERROR: script does not yet migrate classes for nodes.")
ok = False
if ok is False:
return False
return node_id
def clone_nodemeister_group(nm_host, dst_gname, src_gname, munge_re=None, noop=False, verbose=False):
"""
Clone a group in nodemeister, munging all parameters and class params through munge_re,
a list of lists, each having 2 elements, a regex and a string to replace matches with.
"""
group_names = get_group_names(nm_host)
dst_gid = get_nm_group_id(nm_host, dst_gname, groups=group_names)
if dst_gid is not False:
logger.error("ERROR: group %s already exists in NodeMeister with id %d." % (dst_gname, dst_gid))
return False
src_group = get_nm_group(nm_host, gname=src_gname, groupnames=group_names)
if len(src_group) == 0:
logger.error("ERROR: could not find source group %s" % src_gname)
return False
if verbose:
logger.debug("Got source group id: {n}\n{src}".format(n=src_group['id'], src=src_group))
classes = get_nm_group_classes(nm_host)
params = get_nm_group_params(nm_host)
interp_src_group = interpolate_group(src_group, classes, params, group_names)
#if verbose:
# print("\tInterpolated: %s" % interp_src_group)
groups = []
for foo in src_group['groups']:
bar = get_nm_group_id(nm_host, foo, groups=group_names)
if bar:
groups.append(bar)
# ok, try adding the group
gid = add_group(nm_host, dst_gname, "imported by %s" % __file__, groups=groups, dry_run=noop)
if gid is False:
logger.error("ERROR adding group in Nodemeister.")
return False
else:
logger.info("Group added to NodeMeister with id %d" % gid)
ok = True
# add the params
for p in src_group['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['parameters'][p])
if foo != src_group['parameters'][p] and verbose:
logger.debug("Munged value of '%s' from '%s' to '%s'" % (p, src_group['parameters'][p], foo))
src_group['parameters'][p] = foo
res = add_param_to_group(nm_host, gid, p, src_group['parameters'][p], dry_run=noop)
if not res:
logger.error("ERROR adding param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid))
ok = False
if verbose:
logger.info("added param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid))
for c in src_group['classes']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['classes'][c])
if foo != src_group['classes'][c] and verbose:
logger.debug("Munged value of '%s' from '%s' to '%s'" % (c, src_group['classes'][c], foo))
src_group['classes'][c] = foo
res = add_class_to_group(nm_host, gid, c, src_group['classes'][c], dry_run=noop)
if not res:
logger.error("ERROR adding class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid))
ok = False
if verbose:
logger.info("added class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid))
if ok is False:
logger.critical("cloning group failed.")
return False
return gid
| 35.338411 | 168 | 0.634253 |
import requests
import anyjson
import re
import logging
MISSING_ITEM = '-'
DIFF_MARKER = ">"
try:
logger.debug("importing nodemeisterlib")
except NameError:
FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.ERROR, format=FORMAT)
logger = logging.getLogger(__name__)
def red(text):
s = '\033[%dm%s\033[0m' % (31, text)
return s
def print_columns(lines, spacer=' ', onlydifferent=False):
s = ""
clen = [0, 0, 0]
for l in lines:
if onlydifferent:
if len(l) < 3:
continue
for c in xrange(0, 3):
if len(str(l[c])) > clen[c]:
clen[c] = len(str(l[c]))
line_spec = "{{0:<{1}s}}{0}{{1:<{2}s}}{0}{{2:<{3}s}}\n".format(' ' * 3, clen[0], clen[1], clen[2])
for l in lines:
if len(l) > 3 and l[3] == True:
s += red(line_spec.format(DIFF_MARKER + l[0], str(l[1]), str(l[2])))
else:
if onlydifferent:
continue
s += line_spec.format(l[0], str(l[1]), str(l[2]))
return s
def pretty_diff_list(title, oA, oB):
lines = []
items = set.union(set(oA), set(oB))
for i in sorted(items):
if i in oA and i in oB:
lines.append(['', i, i])
elif i in oA:
lines.append(['', i, MISSING_ITEM, True])
elif i in oB:
lines.append(['', MISSING_ITEM, i, True])
return lines
def pretty_diff_str(title, oA, oB):
if oA != oB:
return [[title, oA, oB, True]]
return [[title, oA, oB]]
def pretty_diff_dict(title, oA, oB):
lines = [[title, '', '']]
keys = set.union(set(oA.keys()), set(oB.keys()))
for k in sorted(keys):
if k in oA and k in oB:
if oA[k] == oB[k]:
lines.append([k, oA[k], oB[k]])
else:
lines.append([k, oA[k], oB[k], True])
elif k in oA:
lines.append([k, oA[k], MISSING_ITEM, True])
else:
lines.append([k, MISSING_ITEM, oB[k], True])
return lines
def pretty_diff_obj(title, oA, oB):
if type(oA) == type({}) or type(oB) == type({}):
return pretty_diff_dict(title, oA, oB)
elif type(oA) == type("") or type(oB) == type("") or type(oA) == type(u"") or type(oB) == type(u""):
return pretty_diff_str(title, oA, oB)
else:
return pretty_diff_list(title, oA, oB)
return []
def pretty_diff(title, titleA, dictA, titleB, dictB, onlydifferent=False):
s = "Diff of %s\n" % title
lines = []
lines.append(['', titleA, titleB])
lines.append(['', '-' * len(titleA), '-' * len(titleB)])
lines.append(['name', dictA.get('name', '<none>'), dictB.get('name', '<none>')])
lines.append(['id', dictA.get('id', '<none>'), dictB.get('id', '<none>')])
lines.append(['description', dictA.get('description', '<none>'), dictB.get('description', '<none>')])
dictA.pop('name', None)
dictA.pop('id', None)
dictA.pop('description', None)
dictB.pop('name', None)
dictB.pop('id', None)
dictB.pop('description', None)
lines.append(['', '', ''])
k = set.union(set(dictA.keys()), set(dictB.keys()))
for p in sorted(k):
lines.append([p.capitalize() + ':', '', ''])
lines.extend(pretty_diff_obj('', dictA.get(p), dictB.get(p)))
s += print_columns(lines, onlydifferent=onlydifferent)
return s
def get_nm_node_yaml(nm_host, node_name, ssl_verify=False, verbose=False):
nm_url = "http://%s/enc/puppet/%s" % (nm_host, node_name)
r = requests.get(nm_url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if r.status_code == 200:
return r.content
else:
logger.error("got status code {s} for {u}".format(s=r.status_code, u=nm_url))
return None
def get_dashboard_node_yaml(url, ssl_verify=False, verbose=False):
r = requests.get(url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if r.status_code == 200:
return r.content
else:
logger.error("got status code {s} for {u}".format(s=r.status_code, u=url))
return None
def get_json(url):
r = requests.get(url)
if 'json' in dir(r):
return r.json()
try:
j = anyjson.deserialize(r.content)
return j
except:
logger.error("could not deserialize JSON for {u} (got status code {s})".format(s=r.status_code, u=url))
return None
def get_group_names(nm_host):
j = get_json("http://%s/enc/groups/" % nm_host)
names = {}
for n in j:
names[n['id']] = n['name']
return names
def get_nm_group_classes(nm_host):
r = {}
j = get_json("http://%s/enc/classes/groups/" % nm_host)
for o in j:
r[o['id']] = o
return r
def get_nm_group_params(nm_host):
r = {}
j = get_json("http://%s/enc/parameters/groups/" % nm_host)
for o in j:
if o['paramvalue'] is not None:
o['paramvalue'] = clean_value(o['paramvalue'])
r[o['id']] = o
return r
def get_nm_group(nm_host, gname=None, gid=None, groupnames=None):
if gid is None and gname is None:
raise ValueError("get_nm_group called without gname or gid")
if gid is None:
if groupnames is None:
groupnames = get_group_names(nm_host)
for n in groupnames:
if groupnames[n] == gname:
gid = n
if gid is None:
return {}
j = get_json("http://%s/enc/groups/%d/" % (nm_host, gid))
return j
def interpolate_group(group, classes, params, group_names):
g_params = group.get('parameters', {})
params_text = {}
for p in g_params:
foo = params[p]
params_text[foo['paramkey']] = foo['paramvalue']
group['parameters'] = params_text
g_classes = group.get('classes', {})
classes_text = {}
for c in g_classes:
foo = classes[c]
classes_text[foo['classname']] = foo['classparams']
group['classes'] = classes_text
g_parents = group.get('parents', {})
parents_text = []
for p in g_parents:
parents_text.append(group_names[p])
group['parents'] = parents_text
g_groups = group.get('groups', {})
groups_text = []
for g in g_groups:
groups_text.append(group_names[g])
group['groups'] = groups_text
return group
def add_group(nm_host, name, description, parents=None, groups=None, dry_run=False):
payload = {'name': name, 'description': description}
if parents is not None:
payload['parents'] = parents
if groups is not None:
payload['groups'] = groups
url = "http://%s/enc/groups/" % nm_host
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return get_nm_group_id(nm_host, name, dry_run=dry_run)
logger.error("ERROR: add_group got status code %d" % status_code)
return False
def get_nm_group_id(nm_host, name, groups=None, dry_run=False):
if dry_run:
return 0
if groups is None:
groups = get_group_names(nm_host)
for n in groups:
if groups[n] == name:
return n
return False
def add_param_to_group(nm_host, gid, pname, pval, dry_run=False):
if isinstance(pval, basestring) and (pval.strip() == "" or pval == "" or pval == "''"):
pval = None
payload = {'group': gid, 'paramkey': pname, 'paramvalue': pval}
url = "http://%s/enc/parameters/groups/" % nm_host
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_param_to_group got status code %d" % status_code)
return False
def add_class_to_group(nm_host, gid, classname, classparams=None, dry_run=False):
payload = {'group': gid, 'classname': classname, 'classparams': classparams}
url = "http://%s/enc/classes/groups/" % nm_host
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_class_to_group got status code %d" % status_code)
return False
def get_node_names(nm_host):
j = get_json("http://%s/enc/nodes/" % nm_host)
names = {}
for n in j:
names[n['id']] = n['hostname']
return names
def get_nm_node_id(nm_host, hostname, nodenames=None, dry_run=False):
if dry_run:
return 0
if nodenames is None:
nodenames = get_node_names(nm_host)
for n in nodenames:
if nodenames[n] == hostname:
return n
logger.error("could not find node ID for {h}".format(h=hostname))
return False
def get_nm_node(nm_host, hostname=None, node_id=None, nodenames=None):
if node_id is None and hostname is None:
raise ValueError("get_nm_node called without hostname or node_id")
if node_id is None:
if nodenames is None:
nodenames = get_node_names(nm_host)
for n in nodenames:
if nodenames[n] == hostname:
node_id = n
if node_id is None:
logger.error("could not find hode with hostname {h}".format(h=hostname))
return {}
j = get_json("http://%s/enc/nodes/%d/" % (nm_host, node_id))
return j
def get_nm_node_classes(nm_host):
r = {}
j = get_json("http://%s/enc/classes/nodes/" % nm_host)
for o in j:
r[o['id']] = o
return r
def get_nm_node_params(nm_host):
r = {}
j = get_json("http://%s/enc/parameters/nodes/" % nm_host)
for o in j:
r[o['id']] = o
return r
def add_node(nm_host, hostname, description, groups=None, dry_run=False):
payload = {'hostname': hostname, 'description': description}
if groups is not None:
payload['groups'] = groups
url = "http://%s/enc/nodes/" % nm_host
logger.debug("adding node {h}".format(h=hostname))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return get_nm_node_id(nm_host, hostname, dry_run=dry_run)
logger.error("ERROR: add_node got status code %d" % status_code)
return False
def add_param_to_node(nm_host, node_id, pname, pval, dry_run=False):
if pval.strip() == "" or pval == "" or pval == "''":
pval = None
payload = {'node': node_id, 'paramkey': pname, 'paramvalue': pval}
url = "http://%s/enc/parameters/nodes/" % nm_host
logger.debug("adding param '{pname}' to node {n} with val: {pval}".format(n=node_id, pname=pname, pval=pval))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_param_to_node got status code %d" % status_code)
return False
def add_class_to_node(nm_host, node_id, classname, classparams=None, dry_run=False):
payload = {'node': node_id, 'classname': classname, 'classparams': classparams}
url = "http://%s/enc/classes/nodes/" % nm_host
logger.debug("adding class '{cn}' to node {n} with params: {cp}".format(n=node_id, cn=classname, cp=classparams))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_class_to_node got status code %d" % status_code)
return False
def get_name_for_class_exclusion(nm_host, class_exclusion_id, verbose):
r = {}
j = get_json("http://%s/enc/exclusions/classes/" % nm_host)
if j is None:
return False
for o in j:
if o['id'] == class_exclusion_id:
return o['exclusion']
return False
def add_node_class_exclusion(nm_host, node_id, classname, dry_run=False, verbose=False):
payload = {'node': node_id, 'exclusion': classname}
url = "http://%s/enc/exclusions/classes/" % nm_host
logger.debug("adding class exclusion for '{cn}' to node {n}".format(n=node_id, cn=classname))
status_code = do_post(url, payload, dry_run=dry_run)
if status_code == 201:
return True
logger.error("ERROR: add_node_class_exclusion got status code %d" % status_code)
return False
def clean_value(v, debug=False):
if debug:
print("clean_value '%s'" % v)
if type(v) == type("") or type(v) == type(u""):
v = v.strip('"\\')
return v
def do_post(url, payload, dry_run=False):
headers = {'content-type': 'application/json'}
if dry_run:
logger.warning("DRY RUN: do_post to url %s - payload:\n\t%s\n" % (url, payload))
return 201
r = requests.post(url, data=anyjson.serialize(payload), headers=headers)
return r.status_code
def clone_nodemeister_node(nm_host, dst_name, src_name, munge_res, group_replace=None, noop=False, verbose=False):
nodes = get_node_names(nm_host)
dst_node_id = get_nm_node_id(nm_host, dst_name, nodenames=nodes)
if dst_node_id is not False:
logger.error("ERROR: node %s already exists in NodeMeister with id %d." % (dst_name, dst_node_id))
return False
src_node = get_nm_node(nm_host, hostname=src_name, nodenames=nodes)
if len(src_node) == 0:
logger.error("ERROR: could not find source node %s" % src_name)
return False
if verbose:
logger.debug("Got source node id: {n}\n{src}".format(n=src_node['id'], src=src_node))
classes = get_nm_node_classes(nm_host)
params = get_nm_node_params(nm_host)
# add to the right groups
groups = []
for g in src_node['groups']:
if group_replace is not None:
if g in group_replace:
if verbose:
logger.debug(" changing group %d to %d (group_replace)" % (g, group_replace[g]))
g = group_replace[g]
groups.append(g)
# TODO - these are going to be difficult because we need to resolve IDs from source to names,
# and then map to the correct IDs for our new node
# add excluded groups
# add excluded params
node_id = add_node(nm_host, dst_name, "imported by %s" % __file__, groups=groups, dry_run=noop)
if node_id is False:
logger.error("ERROR adding node in Nodemeister.")
return False
else:
logger.info("Node added to NodeMeister with id %d" % node_id)
ok = True
# add excluded classes
for c in src_node['excluded_classes']:
c_name = get_name_for_class_exclusion(nm_host, c, verbose=verbose)
if verbose:
logger.debug("excluded class %s (%d)" % (c_name, c))
res = add_node_class_exclusion(nm_host, node_id, c_name, dry_run=noop, verbose=verbose)
if not res:
logger.error("ERROR adding class exclusion of '%s' to node %d" % (c_name, node_id))
ok = False
if verbose:
logger.info("added class_exclusion of '%s' to group %d" % (c_name, node_id))
# add the params
for p in src_node['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_node['parameters'][p])
if foo != src_node['parameters'][p] and verbose:
logger.debug("Munged value of '%s' from '%s' to '%s'" % (p, src_node['parameters'][p], foo))
src_node['parameters'][p] = foo
res = add_param_to_node(nm_host, node_id, p, src_node['parameters'][p], dry_run=noop)
if not res:
logger.error("ERROR adding param %s with value '%s' to node %d" % (p, src_node['parameters'][p], node_id))
ok = False
if verbose:
logger.info("\tadded param %s with value '%s' to group %d" % (p, src_node['parameters'][p], node_id))
if len(src_node['classes']) > 0:
logger.critical("ERROR: script does not yet migrate classes for nodes.")
ok = False
if ok is False:
return False
return node_id
def clone_nodemeister_group(nm_host, dst_gname, src_gname, munge_re=None, noop=False, verbose=False):
group_names = get_group_names(nm_host)
dst_gid = get_nm_group_id(nm_host, dst_gname, groups=group_names)
if dst_gid is not False:
logger.error("ERROR: group %s already exists in NodeMeister with id %d." % (dst_gname, dst_gid))
return False
src_group = get_nm_group(nm_host, gname=src_gname, groupnames=group_names)
if len(src_group) == 0:
logger.error("ERROR: could not find source group %s" % src_gname)
return False
if verbose:
logger.debug("Got source group id: {n}\n{src}".format(n=src_group['id'], src=src_group))
classes = get_nm_group_classes(nm_host)
params = get_nm_group_params(nm_host)
interp_src_group = interpolate_group(src_group, classes, params, group_names)
#if verbose:
# print("\tInterpolated: %s" % interp_src_group)
groups = []
for foo in src_group['groups']:
bar = get_nm_group_id(nm_host, foo, groups=group_names)
if bar:
groups.append(bar)
# ok, try adding the group
gid = add_group(nm_host, dst_gname, "imported by %s" % __file__, groups=groups, dry_run=noop)
if gid is False:
logger.error("ERROR adding group in Nodemeister.")
return False
else:
logger.info("Group added to NodeMeister with id %d" % gid)
ok = True
# add the params
for p in src_group['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['parameters'][p])
if foo != src_group['parameters'][p] and verbose:
logger.debug("Munged value of '%s' from '%s' to '%s'" % (p, src_group['parameters'][p], foo))
src_group['parameters'][p] = foo
res = add_param_to_group(nm_host, gid, p, src_group['parameters'][p], dry_run=noop)
if not res:
logger.error("ERROR adding param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid))
ok = False
if verbose:
logger.info("added param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid))
for c in src_group['classes']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['classes'][c])
if foo != src_group['classes'][c] and verbose:
logger.debug("Munged value of '%s' from '%s' to '%s'" % (c, src_group['classes'][c], foo))
src_group['classes'][c] = foo
res = add_class_to_group(nm_host, gid, c, src_group['classes'][c], dry_run=noop)
if not res:
logger.error("ERROR adding class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid))
ok = False
if verbose:
logger.info("added class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid))
if ok is False:
logger.critical("cloning group failed.")
return False
return gid
| true | true |
7902068c2c80cf2e8ba0a0dd8b6fb2489ee8b432 | 1,165 | py | Python | 0844_backspace_string_compare.py | subwaymatch/leetcode | 2592ba2e55682fd54d0060c5b1ff1b8469ba7916 | [
"MIT"
] | null | null | null | 0844_backspace_string_compare.py | subwaymatch/leetcode | 2592ba2e55682fd54d0060c5b1ff1b8469ba7916 | [
"MIT"
] | null | null | null | 0844_backspace_string_compare.py | subwaymatch/leetcode | 2592ba2e55682fd54d0060c5b1ff1b8469ba7916 | [
"MIT"
] | null | null | null | class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
s_bcount, t_bcount = 0, 0
s_idx, t_idx = len(S) - 1, len(T) - 1
while s_idx >= 0 or t_idx >= 0:
while s_idx >= 0:
if S[s_idx] == '#':
s_bcount += 1
s_idx -= 1
continue
if s_bcount > 0:
s_idx -= 1
s_bcount -= 1
else:
break
while t_idx >= 0:
if T[t_idx] == '#':
t_bcount += 1
t_idx -= 1
continue
if t_bcount > 0:
t_idx -= 1
t_bcount -= 1
else:
break
if s_idx >= 0 and t_idx >= 0 and S[s_idx] != T[t_idx]:
return False
elif (s_idx >= 0 and t_idx < 0) or (s_idx < 0 and t_idx >= 0):
return False
s_idx -= 1
t_idx -= 1
return True
| 29.125 | 74 | 0.318455 | class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
s_bcount, t_bcount = 0, 0
s_idx, t_idx = len(S) - 1, len(T) - 1
while s_idx >= 0 or t_idx >= 0:
while s_idx >= 0:
if S[s_idx] == '#':
s_bcount += 1
s_idx -= 1
continue
if s_bcount > 0:
s_idx -= 1
s_bcount -= 1
else:
break
while t_idx >= 0:
if T[t_idx] == '#':
t_bcount += 1
t_idx -= 1
continue
if t_bcount > 0:
t_idx -= 1
t_bcount -= 1
else:
break
if s_idx >= 0 and t_idx >= 0 and S[s_idx] != T[t_idx]:
return False
elif (s_idx >= 0 and t_idx < 0) or (s_idx < 0 and t_idx >= 0):
return False
s_idx -= 1
t_idx -= 1
return True
| true | true |
790207282ca331068c3a54864207948571b5a342 | 10,406 | py | Python | puffmarker/input/import_RICE_pilot_data.py | nsaleheen/puffmarker_plus_plus | 36201b3f95c8a5cb39f744ae65a01ec5f5abf36d | [
"MIT"
] | null | null | null | puffmarker/input/import_RICE_pilot_data.py | nsaleheen/puffmarker_plus_plus | 36201b3f95c8a5cb39f744ae65a01ec5f5abf36d | [
"MIT"
] | 1 | 2021-04-10T19:39:00.000Z | 2021-04-10T19:39:00.000Z | puffmarker/input/import_RICE_pilot_data.py | nsaleheen/puffmarker_plus_plus | 36201b3f95c8a5cb39f744ae65a01ec5f5abf36d | [
"MIT"
] | null | null | null | # import datetime, time
from typing import List
from datetime import datetime, timedelta
import pytz
import os
from puffmarker.domain.datapoint import DataPoint
from puffmarker.input.import_stream_processor_inputs import load_data, load_data_offset
data_dir = '/home/nsaleheen/data/rice_ema_puffmarker_activity_loc/'
# data_dir = '/home/nsaleheen/data/RICE_data/without_raw_data/'
smoking_self_report_file = 'SMOKING+SELF_REPORT+PHONE.csv'
activity_type_file = 'ACTIVITY_TYPE+PHONE.csv'
puffmarker_smoking_epi_cloud_file = 'PUFFMARKER_SMOKING_EPISODE+PHONE.csv'
# streamprocessor_puffmarker_smoking_epi_file = 'streamprocessor.puffMarker.smoking.episode.rip.wrist.combine.csv'
streamprocessor_puffmarker_smoking_epi_file = 'org.md2k.streamprocessor+PUFFMARKER_SMOKING_EPISODE+PHONE.csv'
# streamprocessor_puffmarker_smoking_epi_file = 'puffmarker_streamprocessor.csv'
ema_random_file = 'EMA+RANDOM_EMA+PHONE.csv'
ema_smoking_file = 'EMA+SMOKING_EMA+PHONE.csv'
ema_end_of_day_file = 'EMA+END_OF_DAY_EMA+PHONE.csv'
ema_stressed_file = 'EMA+STRESS_EMA+PHONE.csv'
tz = pytz.timezone('US/Central')
print(tz)
# unix time to '2017-11-01 15:52:00'
def unixtime_to_datetime_pre(timestamp):
timestamp = timestamp / 1000
dt = datetime.fromtimestamp(timestamp, tz).strftime('%m-%d-%Y %H:%M:%S')
return dt
def unixtime_to_datetime(timestamp):
timestamp = timestamp / 1000
dt = datetime.fromtimestamp(timestamp, tz).strftime('%m/%d %H:%M:%S')
return dt
# unix time to '2017-11-01 15:52:00' -> '2017-11-01'
def unixtime_to_date(timestamp):
dt = unixtime_to_datetime(timestamp)
return dt.split(' ')[0]
# unix time to '2017-11-01 15:52:00' -> '15:52:00'
def unixtime_to_time(timestamp):
dt = unixtime_to_datetime(timestamp)
return dt.split(' ')[1]
# unix time to '15*52' in minutes
def unixtime_to_timeOfDay(timestamp):
tm = unixtime_to_time(timestamp)
toks = tm.split(':')
h = int(toks[0])
m = int(toks[1])
timeOfday = h * 60 + m
return timeOfday
ut = 1512506705814 # 1386181800
print(unixtime_to_datetime(ut))
print(unixtime_to_date(ut))
print(unixtime_to_time(ut))
print(unixtime_to_timeOfDay(ut))
# timezone = datetime.timezone(datetime.timedelta(milliseconds=offset))
# ts = datetime.datetime.fromtimestamp(ts, timezone)
import json
def get_fileName(cur_dir, file_sufix):
filenames = [name for name in os.listdir(cur_dir) if name.endswith(file_sufix)]
# print(file_sufix + ':' + str(filenames))
if len(filenames) > 0:
return filenames[0]
else:
return None
def get_EMA_data(cur_dir, filename):
if filename is None:
return []
fp = open(cur_dir + filename)
file_content = fp.read()
fp.close()
lines = file_content.splitlines()
data = []
for line in lines:
if len(line) > 1:
ts, offset, sample = line.split(',', 2)
# start_time = int(ts)
start_time = int(float(ts)) / 1000.0
start_time = datetime.fromtimestamp(start_time)
offset = int(offset)
sample = sample[1:-1]
data.append([start_time, offset, sample])
return data
# random ema + stressed EMA
# sample = (#smoked, from_time, to_time); eg: "2 hrs - 4 hrs" one cig smoked (1, 2*60*60*1000, 4*60*60*1000)
def get_random_EMA(cur_dir, filename) -> List[DataPoint]:
emas = get_EMA_data(cur_dir, filename)
data = []
for ema in emas:
d = ema[2]
jsn_file = json.loads(d)
status = jsn_file['status']
# print(jsn_file['status'])
if status == 'COMPLETED':
is_smoked = jsn_file['question_answers'][32]['response'][0]
if is_smoked == 'Yes':
nSmoked = jsn_file['question_answers'][33]['response'][0]
if int(nSmoked) == 1:
nQI = 34
else:
nQI = 35
# options: ["0 - 2 hrs", "2 hrs - 4 hrs", "4 hrs - 6 hrs", "6 hrs - 8 hrs", "8 hrs - 10 hrs", "10 hrs - 12 hrs", "More than 12 hrs"]
howlong_ago = jsn_file['question_answers'][nQI]['response']
sample = [int(nSmoked)]
for hla in howlong_ago:
hla = str(hla)
if hla in ["More than 12 hrs"]:
sample.extend([12 * 60 * 60 * 1000, 24 * 60 * 60 * 1000])
continue
st = hla.split('-')[0]
et = hla.split('-')[1]
st = st.split(' ')[0]
st = int(st.strip()) * 60 * 60 * 1000
et = et.strip().split(' ')[0]
et = int(et.strip()) * 60 * 60 * 1000
sample.extend([st, et])
# print([ema[0], ema[1], nSmoked, howlong_ago, sample])
# data.append([ema[0], ema[1], int(nSmoked)])
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=sample))
return data
# Confirm refute
def get_smoking_EMA(cur_dir, filename) -> List[DataPoint]:
emas = get_EMA_data(cur_dir, filename)
data = []
for ema in emas:
d = ema[2]
jsn_file = json.loads(d)
status = jsn_file['status']
if status == 'COMPLETED':
is_smoked = jsn_file['question_answers'][0]['question_answer'][0:3]
# print(is_smoked)
if is_smoked.lower() == 'yes':
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=1))
# data.append([ema[0], ema[1], 1])
else:
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=0))
# data.append([ema[0], ema[1], 0])
return data
def get_smoking_self_report(cur_dir, filename) -> List[DataPoint]:
emas = get_EMA_data(cur_dir, filename)
data = []
for ema in emas:
d = ema[2]
jsn_file = json.loads(d)
status = jsn_file['message']
if 'YES' in status:
# print(status)
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=1))
# print(ema)
# data.append([ema[0], ema[1], status])
return data
cur_dir = data_dir + '2007/'
# emas = get_smoking_self_report(cur_dir, get_fileName(cur_dir, smoking_self_report_file))
# print(emas)
# emas = get_smoking_EMA(cur_dir, get_fileName(cur_dir, ema_smoking_file))
# print(emas)
# emas = get_random_EMA(cur_dir, get_fileName(cur_dir, ema_stressed_file))
# print(emas)
# emas = get_random_EMA(cur_dir, get_fileName(cur_dir, ema_random_file))
# print(emas)
def get_RICE_PILOT_EMAs(pid):
cur_dir = data_dir + pid + '/'
# smoking_epis = load_data(cur_dir + get_fileName(cur_dir, streamprocessor_puffmarker_smoking_epi_file))
smoking_epis = load_data_offset(cur_dir + get_fileName(cur_dir, streamprocessor_puffmarker_smoking_epi_file))
smoking_selfreport = get_smoking_self_report(cur_dir, get_fileName(cur_dir, smoking_self_report_file))
smoking_emas = get_smoking_EMA(cur_dir, get_fileName(cur_dir, ema_smoking_file))
random_emas = get_random_EMA(cur_dir, get_fileName(cur_dir, ema_random_file))
stressed_emas = get_random_EMA(cur_dir, get_fileName(cur_dir, ema_stressed_file))
sup_sr = [0] * len(smoking_epis)
sup_cr = [0] * len(smoking_epis)
sup_ema = [0] * len(smoking_epis)
for i, epi in enumerate(smoking_epis):
for sr in smoking_selfreport:
time_diff = (sr.start_time - epi.start_time).total_seconds()
if (time_diff > -1800 and time_diff < 1800):
sup_sr[i] = 1
break
for sr in smoking_emas:
time_diff = (sr.start_time - epi.start_time).total_seconds()
if (time_diff > -600 and time_diff < 1800):
sup_cr[i] = 1
break
for re in random_emas:
st = re.start_time - timedelta(milliseconds=re.sample[2])
et = re.start_time - timedelta(milliseconds=re.sample[1])
if (epi.start_time >= st and epi.start_time <= et):
sup_ema[i] = 1
break
for re in stressed_emas:
st = re.start_time - timedelta(milliseconds=re.sample[2])
et = re.start_time - timedelta(milliseconds=re.sample[1])
if (epi.start_time >= st and epi.start_time <= et):
sup_ema[i] = 1
break
sup = [sup_sr[i] * 100 + sup_cr[i] * 10 + sup_ema[i] for i in range(len(sup_ema))]
print('se=' + str(len(smoking_epis)) + ' : sup sr = ' + str(sum(sup_sr)) + ' : sup cr = ' + str(
sum(sup_cr)) + ' : sup ema = ' + str(sum(sup_ema)))
non_sup = len([v for v in sup if v == 0])
print('Supported : Not supported = ' + str(len(sup) - non_sup) + ' : ' + str(non_sup))
# print(sup)
# print(len(smoking_selfreport))
# print(len(smoking_emas))
# print(len(random_emas))
# print(len(stressed_emas))
# print(smoking_epis)
# print(smoking_emas)
# print(smoking_selfreport)
# print(random_emas)
# print(stressed_emas)
#
# , "2008", "2010", "2011", "2012"
pids = ["2006", "2007", "2009", "2013", "2014", "2015", "2016", "2017"]
# for pid in pids:
# print('-----------' + pid + '---------------------------')
# get_RICE_PILOT_EMAs(pid)
get_RICE_PILOT_EMAs('2006')
# -----------2006---------------------------
# se=25 : sup sr = 19 : sup cr = 18 : sup ema = 4
# Supported : Not supported = 21 : 4
# -----------2007---------------------------
# se=6 : sup sr = 5 : sup cr = 6 : sup ema = 0
# Supported : Not supported = 6 : 0
# -----------2009---------------------------
# se=32 : sup sr = 14 : sup cr = 30 : sup ema = 10
# Supported : Not supported = 30 : 2
# -----------2013---------------------------
# se=113 : sup sr = 72 : sup cr = 108 : sup ema = 49
# Supported : Not supported = 113 : 0
# -----------2014---------------------------
# se=44 : sup sr = 6 : sup cr = 43 : sup ema = 23
# Supported : Not supported = 44 : 0
# -----------2015---------------------------
# se=0 : sup sr = 0 : sup cr = 0 : sup ema = 0
# Supported : Not supported = 0 : 0
# -----------2016---------------------------
# se=0 : sup sr = 0 : sup cr = 0 : sup ema = 0
# Supported : Not supported = 0 : 0
# -----------2017---------------------------
# se=8 : sup sr = 0 : sup cr = 5 : sup ema = 2
# Supported : Not supported = 5 : 3 | 36.25784 | 148 | 0.591486 |
from typing import List
from datetime import datetime, timedelta
import pytz
import os
from puffmarker.domain.datapoint import DataPoint
from puffmarker.input.import_stream_processor_inputs import load_data, load_data_offset
data_dir = '/home/nsaleheen/data/rice_ema_puffmarker_activity_loc/'
smoking_self_report_file = 'SMOKING+SELF_REPORT+PHONE.csv'
activity_type_file = 'ACTIVITY_TYPE+PHONE.csv'
puffmarker_smoking_epi_cloud_file = 'PUFFMARKER_SMOKING_EPISODE+PHONE.csv'
streamprocessor_puffmarker_smoking_epi_file = 'org.md2k.streamprocessor+PUFFMARKER_SMOKING_EPISODE+PHONE.csv'
ema_random_file = 'EMA+RANDOM_EMA+PHONE.csv'
ema_smoking_file = 'EMA+SMOKING_EMA+PHONE.csv'
ema_end_of_day_file = 'EMA+END_OF_DAY_EMA+PHONE.csv'
ema_stressed_file = 'EMA+STRESS_EMA+PHONE.csv'
tz = pytz.timezone('US/Central')
print(tz)
def unixtime_to_datetime_pre(timestamp):
timestamp = timestamp / 1000
dt = datetime.fromtimestamp(timestamp, tz).strftime('%m-%d-%Y %H:%M:%S')
return dt
def unixtime_to_datetime(timestamp):
timestamp = timestamp / 1000
dt = datetime.fromtimestamp(timestamp, tz).strftime('%m/%d %H:%M:%S')
return dt
def unixtime_to_date(timestamp):
dt = unixtime_to_datetime(timestamp)
return dt.split(' ')[0]
def unixtime_to_time(timestamp):
dt = unixtime_to_datetime(timestamp)
return dt.split(' ')[1]
def unixtime_to_timeOfDay(timestamp):
tm = unixtime_to_time(timestamp)
toks = tm.split(':')
h = int(toks[0])
m = int(toks[1])
timeOfday = h * 60 + m
return timeOfday
ut = 1512506705814
print(unixtime_to_datetime(ut))
print(unixtime_to_date(ut))
print(unixtime_to_time(ut))
print(unixtime_to_timeOfDay(ut))
import json
def get_fileName(cur_dir, file_sufix):
filenames = [name for name in os.listdir(cur_dir) if name.endswith(file_sufix)]
if len(filenames) > 0:
return filenames[0]
else:
return None
def get_EMA_data(cur_dir, filename):
if filename is None:
return []
fp = open(cur_dir + filename)
file_content = fp.read()
fp.close()
lines = file_content.splitlines()
data = []
for line in lines:
if len(line) > 1:
ts, offset, sample = line.split(',', 2)
start_time = int(float(ts)) / 1000.0
start_time = datetime.fromtimestamp(start_time)
offset = int(offset)
sample = sample[1:-1]
data.append([start_time, offset, sample])
return data
name)
data = []
for ema in emas:
d = ema[2]
jsn_file = json.loads(d)
status = jsn_file['status']
if status == 'COMPLETED':
is_smoked = jsn_file['question_answers'][32]['response'][0]
if is_smoked == 'Yes':
nSmoked = jsn_file['question_answers'][33]['response'][0]
if int(nSmoked) == 1:
nQI = 34
else:
nQI = 35
howlong_ago = jsn_file['question_answers'][nQI]['response']
sample = [int(nSmoked)]
for hla in howlong_ago:
hla = str(hla)
if hla in ["More than 12 hrs"]:
sample.extend([12 * 60 * 60 * 1000, 24 * 60 * 60 * 1000])
continue
st = hla.split('-')[0]
et = hla.split('-')[1]
st = st.split(' ')[0]
st = int(st.strip()) * 60 * 60 * 1000
et = et.strip().split(' ')[0]
et = int(et.strip()) * 60 * 60 * 1000
sample.extend([st, et])
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=sample))
return data
def get_smoking_EMA(cur_dir, filename) -> List[DataPoint]:
emas = get_EMA_data(cur_dir, filename)
data = []
for ema in emas:
d = ema[2]
jsn_file = json.loads(d)
status = jsn_file['status']
if status == 'COMPLETED':
is_smoked = jsn_file['question_answers'][0]['question_answer'][0:3]
if is_smoked.lower() == 'yes':
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=1))
else:
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=0))
return data
def get_smoking_self_report(cur_dir, filename) -> List[DataPoint]:
emas = get_EMA_data(cur_dir, filename)
data = []
for ema in emas:
d = ema[2]
jsn_file = json.loads(d)
status = jsn_file['message']
if 'YES' in status:
data.append(DataPoint(start_time=ema[0], offset=ema[1], sample=1))
return data
cur_dir = data_dir + '2007/'
def get_RICE_PILOT_EMAs(pid):
cur_dir = data_dir + pid + '/'
smoking_epis = load_data_offset(cur_dir + get_fileName(cur_dir, streamprocessor_puffmarker_smoking_epi_file))
smoking_selfreport = get_smoking_self_report(cur_dir, get_fileName(cur_dir, smoking_self_report_file))
smoking_emas = get_smoking_EMA(cur_dir, get_fileName(cur_dir, ema_smoking_file))
random_emas = get_random_EMA(cur_dir, get_fileName(cur_dir, ema_random_file))
stressed_emas = get_random_EMA(cur_dir, get_fileName(cur_dir, ema_stressed_file))
sup_sr = [0] * len(smoking_epis)
sup_cr = [0] * len(smoking_epis)
sup_ema = [0] * len(smoking_epis)
for i, epi in enumerate(smoking_epis):
for sr in smoking_selfreport:
time_diff = (sr.start_time - epi.start_time).total_seconds()
if (time_diff > -1800 and time_diff < 1800):
sup_sr[i] = 1
break
for sr in smoking_emas:
time_diff = (sr.start_time - epi.start_time).total_seconds()
if (time_diff > -600 and time_diff < 1800):
sup_cr[i] = 1
break
for re in random_emas:
st = re.start_time - timedelta(milliseconds=re.sample[2])
et = re.start_time - timedelta(milliseconds=re.sample[1])
if (epi.start_time >= st and epi.start_time <= et):
sup_ema[i] = 1
break
for re in stressed_emas:
st = re.start_time - timedelta(milliseconds=re.sample[2])
et = re.start_time - timedelta(milliseconds=re.sample[1])
if (epi.start_time >= st and epi.start_time <= et):
sup_ema[i] = 1
break
sup = [sup_sr[i] * 100 + sup_cr[i] * 10 + sup_ema[i] for i in range(len(sup_ema))]
print('se=' + str(len(smoking_epis)) + ' : sup sr = ' + str(sum(sup_sr)) + ' : sup cr = ' + str(
sum(sup_cr)) + ' : sup ema = ' + str(sum(sup_ema)))
non_sup = len([v for v in sup if v == 0])
print('Supported : Not supported = ' + str(len(sup) - non_sup) + ' : ' + str(non_sup))
pids = ["2006", "2007", "2009", "2013", "2014", "2015", "2016", "2017"]
get_RICE_PILOT_EMAs('2006')
| true | true |
790208024d887b2aff748f478010a86fbfd452eb | 1,210 | py | Python | pandapower/pypower/pqcost.py | Zamwell/pandapower | ce51946342109e969b87b60c8883d7eec02d3060 | [
"BSD-3-Clause"
] | 221 | 2015-01-03T23:18:11.000Z | 2022-03-27T10:21:40.000Z | pandapower/pypower/pqcost.py | lvzhibai/pandapower | 24ed3056558887cc89f67d15b5527523990ae9a1 | [
"BSD-3-Clause"
] | 126 | 2017-02-15T17:09:08.000Z | 2018-07-16T13:25:15.000Z | pandapower/pypower/pqcost.py | gdgarcia/pandapower | 630e3278ca012535f78282ae73f1b86f3fe932fc | [
"BSD-3-Clause"
] | 114 | 2015-02-02T15:07:38.000Z | 2022-03-22T17:01:55.000Z | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Splits the gencost variable into two pieces if costs are given for Qg.
"""
from sys import stderr
from numpy import array, arange
def pqcost(gencost, ng, on=None):
"""Splits the gencost variable into two pieces if costs are given for Qg.
Checks whether C{gencost} has cost information for reactive power
generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng}
rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves
C{qcost} empty. Also does some error checking.
If C{on} is specified (list of indices of generators which are on line)
it only returns the rows corresponding to these generators.
@author: Ray Zimmerman (PSERC Cornell)
"""
if on is None:
on = arange(ng)
if gencost.shape[0] == ng:
pcost = gencost[on, :]
qcost = array([])
elif gencost.shape[0] == 2 * ng:
pcost = gencost[on, :]
qcost = gencost[on + ng, :]
else:
stderr.write('pqcost: gencost has wrong number of rows\n')
return pcost, qcost
| 31.842105 | 77 | 0.66281 |
from sys import stderr
from numpy import array, arange
def pqcost(gencost, ng, on=None):
if on is None:
on = arange(ng)
if gencost.shape[0] == ng:
pcost = gencost[on, :]
qcost = array([])
elif gencost.shape[0] == 2 * ng:
pcost = gencost[on, :]
qcost = gencost[on + ng, :]
else:
stderr.write('pqcost: gencost has wrong number of rows\n')
return pcost, qcost
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.