id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3249445 | # -*- coding: utf-8 -*-
"""
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import grpc
from contextlib import contextmanager
from typing import Optional
from dapr.conf import settings
from dapr.proto.common.v1 import common_pb2 as common_v1
from dapr.proto.runtime.v1 import dapr_pb2 as api_v1
from dapr.proto.runtime.v1 import dapr_pb2_grpc as api_service_v1
from dapr.proto.runtime.v1 import appcallback_pb2 as appcallback_v1
from dapr.proto.runtime.v1 import appcallback_pb2_grpc as appcallback_service_v1
@contextmanager
def connect_dapr(port: Optional[int] = -1):
if port == -1:
port = settings.DAPR_GRPC_PORT
channel = grpc.insecure_channel(f"127.0.0.1:{port}")
stub = api_service_v1.DaprStub(channel)
yield stub
channel.close()
__all__ = [
'connect_dapr',
'common_v1',
'api_v1',
'appcallback_v1',
'appcallback_service_v1',
]
| StarcoderdataPython |
2180 | # Test definitions for Lit, the LLVM test runner.
#
# This is reusing the LLVM Lit test runner in the interim until the new build
# rules are upstreamed.
# TODO(b/136126535): remove this custom rule.
"""Lit runner globbing test
"""
load("//tensorflow:tensorflow.bzl", "filegroup")
load("@bazel_skylib//lib:paths.bzl", "paths")
load("//tensorflow:tensorflow.bzl", "tf_cc_test", "tf_native_cc_binary", "tf_copts")
# Default values used by the test runner.
_default_test_file_exts = ["mlir", ".pbtxt", ".td"]
_default_driver = "@llvm-project//mlir:run_lit.sh"
_default_size = "small"
_default_tags = []
# These are patterns which we should never match, for tests, subdirectories, or
# test input data files.
_ALWAYS_EXCLUDE = [
"**/LICENSE.txt",
"**/README.txt",
"**/lit.local.cfg",
# Exclude input files that have spaces in their names, since bazel
# cannot cope with such "targets" in the srcs list.
"**/* *",
"**/* */**",
]
def _run_lit_test(name, test_file, data, size, tags, driver, features, exec_properties):
"""Runs lit on all tests it can find in `data` under tensorflow/compiler/mlir.
Note that, due to Bazel's hermetic builds, lit only sees the tests that
are included in the `data` parameter, regardless of what other tests might
exist in the directory searched.
Args:
name: str, the name of the test, including extension.
data: [str], the data input to the test.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
name_without_suffix = test_file[0].split('.')[0]
local_test_files = name + ".test_files"
filegroup(
name = local_test_files,
srcs = native.glob([
"data/" + name_without_suffix + "*.mlir",
]),
)
tf_cc_test(
name = name,
srcs = test_file,
size = size,
deps = [
"//tensorflow/compiler/mlir/disc/tests:mlir_feature_test",
"//tensorflow/core:test",
"//tensorflow/core:test_main",
"//tensorflow/core:testlib",
],
data = [":" + local_test_files] + data + [
"//tensorflow/compiler/mlir/disc:disc_compiler_main",
"//tensorflow/compiler/mlir:tf-mlir-translate",
"//tensorflow/compiler/mlir:tf-opt",
],
)
def glob_op_tests(
exclude = [],
test_file_exts = _default_test_file_exts,
default_size = _default_size,
size_override = {},
data = [],
per_test_extra_data = {},
default_tags = _default_tags,
tags_override = {},
driver = _default_driver,
features = [],
exec_properties = {}):
"""Creates all plausible Lit tests (and their inputs) under this directory.
Args:
exclude: [str], paths to exclude (for tests and inputs).
test_file_exts: [str], extensions for files that are tests.
default_size: str, the test size for targets not in "size_override".
size_override: {str: str}, sizes to use for specific tests.
data: [str], additional input data to the test.
per_test_extra_data: {str: [str]}, extra data to attach to a given file.
default_tags: [str], additional tags to attach to the test.
tags_override: {str: str}, tags to add to specific tests.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
exec_properties: a dictionary of properties to pass on.
"""
# Ignore some patterns by default for tests and input data.
exclude = _ALWAYS_EXCLUDE + exclude
tests = native.glob(
["*." + ext for ext in test_file_exts],
exclude = exclude,
)
# Run tests individually such that errors can be attributed to a specific
# failure.
for i in range(len(tests)):
curr_test = tests[i]
# Instantiate this test with updated parameters.
lit_test(
name = curr_test,
data = data + per_test_extra_data.get(curr_test, []),
size = size_override.get(curr_test, default_size),
tags = default_tags + tags_override.get(curr_test, []),
driver = driver,
features = features,
exec_properties = exec_properties,
)
def lit_test(
name,
data = [],
size = _default_size,
tags = _default_tags,
driver = _default_driver,
features = [],
exec_properties = {}):
"""Runs test files under lit.
Args:
name: str, the name of the test.
data: [str], labels that should be provided as data inputs.
size: str, the size of the test.
tags: [str], tags to attach to the test.
driver: str, label of the driver shell script.
Note: use of a custom driver is not currently supported
and specifying a default driver will abort the tests.
features: [str], list of extra features to enable.
"""
_run_lit_test(name + ".test", [name], data, size, tags, driver, features, exec_properties)
| StarcoderdataPython |
3237839 | import time
from load_tests.utils import client_request, stats
def test_peak(stats):
"""send 50 client requests at once with 0.3 delay between them"""
n = 50
delay = 0.3
r = []
for i in range(n):
r.append(client_request("{}:{}".format(n, i), stats))
time.sleep(delay)
for thread in r:
thread.join()
| StarcoderdataPython |
1701891 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class SurveyComposeMessage(models.TransientModel):
_inherit = 'survey.mail.compose.message'
@api.model
def default_get(self, fields):
result = super(SurveyComposeMessage, self).default_get(fields)
if self._context.get('active_model') == 'crm.lead' and self._context.get('active_ids'):
partner_ids = []
emails_list = []
for lead in self.env['crm.lead'].browse(self._context.get('active_ids')):
if lead.partner_id:
partner_ids.append(lead.partner_id.id)
else:
email = lead.contact_name and "%s <%s>" % (lead.contact_name, lead.email_from or "") or lead.email_from or None
if email and email not in emails_list:
emails_list.append(email)
multi_email = "\n".join(emails_list)
result.update({'partner_ids': list(set(partner_ids)), 'multi_email': multi_email})
return result
| StarcoderdataPython |
3203557 | <filename>python/flask/pattern_replace.py<gh_stars>0
# goal: to replace a string in a pattern
# example: "hello" : 3312
# we want to replace 3312 which follows from "hello" :
import re
s1 = '"hello": 3312'
s2 = '"hello" : 3315'
# replace the number after "hello" : to another number
s_r = re.sub('(\s*)("\w+")\s*:\s*(.*)', '\\1 \\2 : 472', s1)
print(s_r)
s_r = re.sub('(\s*)("\w+")\s*:\s*(.*)', '\\1 \\2 : 2', s2)
print(s_r)
| StarcoderdataPython |
1796293 | <filename>model/modify_model.py<gh_stars>0
# coding: utf-8
# 封装其他模块对模型的修改接口
# 外部模块只能通过本文件的函数对模型进行修改
from global_data import SUCCESS, Orders, Trucks, Bases
# truck接单,修改模型相关状态:truck/order/base
# 传入参数为object
# 返回是否修改成功
from model.base_model.base import Base
def model_truck_take_orders(truck_id, order_ids):
result = SUCCESS
truck = Trucks[truck_id]
if truck_id in Bases[truck.current_base].local_truck:
Bases[truck.current_base].local_truck.remove(truck_id)
elif truck_id in Bases[truck.current_base].other_truck:
Bases[truck.current_base].other_truck.remove(truck_id)
truck.add_orders(order_list=order_ids)
for order_id in order_ids:
Orders[order_id].trunk_id = truck_id
# del Orders[order_id]
return result
| StarcoderdataPython |
143414 | import os
import logging
from argparse import ArgumentParser
from concurrent.futures import ThreadPoolExecutor, as_completed
from .lib.saj import StreamAnalyticsJobs
from .lib.utils import chkpath, mklog
def get_args():
parser = ArgumentParser(description="Start or stop Stream Analytics Jobs")
parser.add_argument(
"-C",
"--config",
type=chkpath,
metavar=("PATH"),
default=f"{os.path.expanduser('~')}/.azure.ini",
help="path to azure configuration file",
)
parser.add_argument(
"-r", "--resource_group", metavar=("NAME"), help="azure resource group"
)
parser.add_argument(
"-j",
"--stream_analytics_jobs",
metavar=("JOBS"),
nargs="+",
help="list of azure stream analytics jobs",
)
parser.add_argument(
"-a",
"--action",
metavar=("START/STOP"),
help="action to carry out - start or stop.",
)
parser.add_argument("-v", action="count", default=0, help="increase verbosity")
return parser.parse_args()
def sajctl(config, rg, jobs, action):
saj = StreamAnalyticsJobs(config, rg)
print(f"Sending {action} to " + ", ".join(jobs) + "...")
# http://masnun.com/2016/03/29/python-a-quick-introduction-to-the-concurrent-futures-module.html
with ThreadPoolExecutor(max_workers=len(jobs)) as executor:
future_job = {
executor.submit(saj.toggle_stream_analytics_job, job, action): job
for job in jobs
}
for future in as_completed(future_job):
job = future_job[future]
print(f"Status of {job}: ", end="", flush="True")
try:
result = future.result()
except ValueError as error:
print(f"FAILED")
logging.error(error)
else:
print(result["properties"]["jobState"].upper())
def main():
args = get_args()
mklog(args.v)
sajctl(args.config, args.resource_group, args.stream_analytics_jobs, args.action)
if __name__ == "__main__":
main()
| StarcoderdataPython |
79216 | <filename>tystrings/tylogger.py
from .tyformatter import *
from colorama import Fore, Style
from tabulate import tabulate
# Emoji
BEER_EMOJI = u'\U0001F37A '
BEERS_EMOJI = u'\U0001F37B '
GHOST_EMOJI = u'\U0001F47B '
# Log Level
PROCESS = 11
DONE = 12
SUCCESS = 13
ADDITION = 14
class TyLogger(logging.Logger):
def __init__(self, name, level=logging.NOTSET):
logging.Logger.__init__(self, name, level)
logging.addLevelName(DONE, 'DONE')
logging.addLevelName(SUCCESS, 'SUCCESS')
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
handler.setFormatter(TyFormatter())
self.addHandler(handler)
self.setLevel(logging.INFO)
def process(self, msg, *args, **kwargs):
self._log(PROCESS, msg, args, **kwargs)
def done(self, msg, *args, **kwargs):
self._log(DONE, msg, args, **kwargs)
def success(self, msg, *args, **kwargs):
self._log(SUCCESS, BEERS_EMOJI + ' ' + Fore.LIGHTWHITE_EX + msg + Fore.RESET, args, **kwargs)
def addition(self, msg, *args, **kwargs):
self._log(ADDITION, BEERS_EMOJI + ' ' + Fore.LIGHTWHITE_EX + msg + Fore.RESET, args, **kwargs)
def finished(self, return_code, *args, **kwargs):
self.debug(BEER_EMOJI + ' process finished with %s' % ('success' if
return_code == 0 or return_code is None else (
'exit code %r' % return_code)), *args, **kwargs)
def diffs(self, diffs, *args, **kwargs):
def __coloring(elem, color):
return tuple(['%s%s%s' % (color, item, Style.RESET_ALL)
for item in list(elem)])
rows = [__coloring(item, Fore.LIGHTGREEN_EX if item[0] == '+' else Fore.LIGHTRED_EX) for item in diffs]
self.info(tabulate(rows,
tablefmt="psql", headers=['', 'File1', 'File2', 'Key', 'Value']), *args, **kwargs)
logging.setLoggerClass(TyLogger)
logger = logging.getLogger('tystrings')
| StarcoderdataPython |
90535 | <reponame>haimadrian/AlgorithmsInMultimediaUsingPython
__author__ = "<NAME>"
import numpy as np
import cv2
from matplotlib import pyplot as plt
# Assignment 1
def normalizeImage(img):
img = img - np.min(img)
img = np.round(img * 255 / np.max(img))
return np.uint8(img)
def myMasking(img, mask):
if not isinstance(img, np.ndarray) or not isinstance(mask, np.ndarray):
print("myMasking: Not a tensor. Was: Image=", img.__class__, ", Mask=", mask.__class__)
return None
maskShape = mask.shape
imgShape = img.shape
if (len(maskShape) != 2) or (maskShape[0] != maskShape[1]) or (maskShape[0] % 2 == 0):
print("myMasking: Mask is not supported. Only two square odd dimensional masks. e.g. 3x3 or 5x5")
return None
def myMaskingInner(img2D, msk):
# Find the pad size. For example, we will get 1 in case dim is 3, or 2 in case dim is 5, and so on
mid = np.int((msk.shape[0] - 1) / 2)
# Extended padding
padded = cv2.copyMakeBorder(img2D, mid, mid, mid, mid, cv2.BORDER_REPLICATE)
result = np.zeros(img2D.shape, dtype=np.float64)
# Now apply the mask
for i in range(mid, padded.shape[0] - mid):
for j in range(mid, padded.shape[1] - mid):
result[i - mid, j - mid] = np.sum(padded[i - mid:i + mid + 1, j - mid:j + mid + 1] * msk)
return result
if len(imgShape) > 3 or len(imgShape) < 2:
print("myMasking: Illegal image dimension. Length of shape can be 2 or 3 only")
return None
if len(imgShape) == 2:
return myMaskingInner(img, mask)
b, g, r = cv2.split(img)
return cv2.merge((myMaskingInner(b, mask), myMaskingInner(g, mask), myMaskingInner(r, mask)))
# Assignment 2
def someMask():
img = cv2.imread("BlurryImage1.jpg")
mask = np.array([[-1, -1, -1],
[-1, 10, -1],
[-1, -1, -1]])
masked = normalizeImage(np.abs(myMasking(img, mask)))
return img, masked
# Assignment 3
def myHistPlot(img1, img2):
if not isinstance(img1, np.ndarray) or not isinstance(img2, np.ndarray):
print("Not ndarray")
return None
if (len(img1.shape) != 2) or (len(img2.shape) != 2):
print("Grayscale (2D) expected")
return None
def histogram(img):
hist = {}
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if img[i, j] in hist:
hist[img[i, j]] += 1
else:
hist[img[i, j]] = 1
return hist
hist1 = histogram(img1)
hist2 = histogram(img2)
# when there is a big difference between max and min, we can barely understand anything from the plot, so set max.
hist1Vals = [x if x <= 30000 else 30000 for x in hist1.values()]
hist2Vals = [x if x <= 30000 else 30000 for x in hist2.values()]
plt.figure('Plot images')
plt.subplot(221)
plt.bar(hist1.keys(), hist1Vals, color='b', fc='k', ec='k')
plt.title('Image1 Hist')
plt.subplot(222)
plt.bar(hist2.keys(), hist2Vals, color='b', fc='k', ec='k')
plt.title('Image2 Hist')
plt.subplot(223)
plt.imshow(img1[:, :], cmap="gray")
plt.axis('off')
plt.title('Image1')
plt.subplot(224)
plt.imshow(img2[:, :], cmap="gray")
plt.axis('off')
plt.title('Image2')
plt.show()
# Assignment 4
def medianFilter():
img = cv2.imread("NoisyS.pgm")
# Median is either 0 or 1, so we will need to use absolute, round and cast (normalizing image)
def medianFilterInner(img2D):
windowSize = 10 * 2 + 1
# Find the pad size. For example, we will get 1 in case dim is 3, or 2 in case dim is 5, and so on
mid = np.int((windowSize - 1) / 2)
# Zero padding
padded = cv2.copyMakeBorder(img2D, mid, mid, mid, mid, cv2.BORDER_CONSTANT, value=0)
result = np.zeros(img2D.shape, dtype=float)
# Now apply the filter
fullWindowLength = windowSize**2
for i in range(mid, padded.shape[0] - mid):
for j in range(mid, padded.shape[1] - mid):
medianArray = np.reshape(padded[i-mid:i+mid+1, j-mid:j+mid+1], (1, fullWindowLength))
result[i-mid, j-mid] = np.median(medianArray)
return result
b, g, r = cv2.split(img)
medianB = medianFilterInner(b)
medianG = medianFilterInner(g)
medianR = medianFilterInner(r)
# Mask sum is 1, we will need to normalize, round and cast
gaussianMask = (1 / 273) * np.array([[1, 4, 7, 4, 1],
[4, 16, 26, 16, 4],
[7, 26, 41, 26, 7],
[4, 16, 26, 16, 4],
[1, 4, 7, 4, 1]])
return img, cv2.merge((myMasking(medianB, gaussianMask), myMasking(medianG, gaussianMask), myMasking(medianR, gaussianMask)))
| StarcoderdataPython |
1678750 | from utils.utils import get_pair, store_json_dic
import random
from scipy.stats import pearsonr
import scipy
from utils.utils import load_json_dic
import json
import os
import numpy as np
import seaborn as sns
from models.mlm_wrapper import MLMWrapper
def compare_distribution(in_distribution, out_distribution, norm_in=False, norm_out=False):
all_tokens = {}
in_tf = []
out_tf = []
for token in in_distribution:
if token not in all_tokens:
all_tokens[token] = 0
for token in out_distribution:
if token not in all_tokens:
all_tokens[token] = 0
for token in all_tokens:
if token in in_distribution:
in_tf.append(in_distribution[token])
else:
in_tf.append(0)
if token in out_distribution:
out_tf.append(out_distribution[token])
else:
out_tf.append(0)
assert len(in_tf) == len(out_tf)
if len(in_tf) < 2:
return 1.0
if norm_in:
in_tf = norm_list(in_tf)
if norm_out:
out_tf = norm_list(out_tf)
pearsonr_corr = pearsonr(in_tf, out_tf)
corr = round(pearsonr_corr[0], 4)
return corr
def topk_dis(dis, topk):
sorted_dis = sort_dis(dis)
sorted_dis = sorted_dis[:topk]
dis = {}
for d in sorted_dis:
dis[d[0]] = d[1]
return dis
def norm_list(lst):
lst_sum = sum(lst)
return [(x / lst_sum) for x in lst]
def calculate_kl(in_distribution, out_distribution):
all_tokens = {}
in_tf = []
out_tf = []
for token in in_distribution:
if token not in all_tokens:
all_tokens[token] = 1<PASSWORD>
for token in out_distribution:
if token not in all_tokens:
all_tokens[token] = 1e-8
for token in all_tokens:
if token in in_distribution:
in_tf.append(in_distribution[token])
else:
in_tf.append(1e-8)
if token in out_distribution:
out_tf.append(out_distribution[token])
else:
out_tf.append(1e-8)
in_tf = norm_list(in_tf)
out_tf = norm_list(out_tf)
KL = scipy.stats.entropy(in_tf, out_tf)
KL = round(KL, 4)
return KL
def get_predict_distribution(args, relation, samples, model_wrapper: MLMWrapper):
distribution = {}
predict_results, p = model_wrapper.evaluate_samples(
relation, samples, mask_pos=0, batch_size=args.batch_size,
topk=args.topk, max_len=args.max_len
)
for i in range(len(predict_results)):
predict_tokens = predict_results[i]['predict_tokens']
topk_tokens = predict_tokens[: 1]
for token in topk_tokens:
if token not in distribution:
distribution[token] = 0
distribution[token] += 1
return distribution
def get_mask_distribution(args, relation, model_wrapper: MLMWrapper, return_topk=False):
mask_distribution = {}
input_sentences = []
relation_template = relation['template']
if relation_template.find("[X]") < relation_template.find("[Y]"):
mask_pos = [-1]
else:
mask_pos = [0]
input_sentence = relation_template.replace('[X]', model_wrapper.tokenizer.mask_token)
input_sentence = input_sentence.replace('[Y]', model_wrapper.tokenizer.mask_token)
input_sentences.append(input_sentence)
predict_results = model_wrapper.predict(
input_sentences, mask_pos=mask_pos, batch_size=args.batch_size,
topk=args.mask_topk, max_len=args.max_len
)
predict_tokens = predict_results[0]['predict_tokens']
predict_prob = predict_results[0]['predict_prob']
topk_tokens = predict_tokens[: args.mask_topk]
for token, prob in zip(topk_tokens, predict_prob):
mask_distribution[token] = float(prob)
if return_topk is True:
return topk_tokens
else:
return mask_distribution
def calculate_prompt_only_dis_kl_div(args, prompt, lama_dis, model_wrapper):
mask_distribution = {}
if "[MASK]" not in prompt:
raise RuntimeError("at least one [MASK] token")
predict_results = model_wrapper.predict(
[prompt], mask_pos=-1, batch_size=args.batch_size,
topk=args.topk, max_len=args.max_len
)
predict_tokens = predict_results[0]['predict_tokens']
predict_prob = predict_results[0]['predict_prob']
topk_tokens = predict_tokens[: args.mask_topk]
for token, prob in zip(topk_tokens, predict_prob):
mask_distribution[token] = float(prob)
mask_dis = topk_dis(mask_distribution, 1000)
return calculate_kl(mask_dis, lama_dis)
def get_obj_distribution(samples):
distribution = {}
for sample in samples:
sub, obj = get_pair(sample)
if obj not in distribution:
distribution[obj] = 0
distribution[obj] += 1
return distribution
def delete_overlap(samples):
temp_samples = []
for sample in samples:
sub, obj = get_pair(sample)
if obj in sub:
continue
else:
temp_samples.append(sample)
return temp_samples
def devide_by_vocab(samples, vocab):
in_samples = []
not_in_samples = []
for sample in samples:
if 'sample' in sample:
sample = sample['sample']
sub = sample['sub_label']
obj = sample['obj_label']
if obj in vocab:
in_samples.append(sample)
else:
not_in_samples.append(sample)
return in_samples, not_in_samples
def store_samples(relation_id, out_dir, samples):
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
with open("{}/{}".format(out_dir, relation_id), "w") as f:
json.dump(samples, f)
def average_sampling(samples, method, threshold, target):
sampled_cases = []
distribution = get_obj_distribution(samples)
fre = []
for obj in distribution:
fre.append(distribution[obj])
diff_obj = len(distribution)
fre = sorted(fre)
down_quantile_fre = int(np.quantile(fre, threshold))
down_target_fre = -1
for i in range(diff_obj, 0, -1):
idx = diff_obj - i
if fre[idx] * i >= target:
down_target_fre = fre[idx]
break
if down_target_fre == -1:
down_target_fre = down_quantile_fre
if method == "threshold_sample":
lower_bound = down_quantile_fre
else:
lower_bound = down_target_fre
obj2samples = {}
for sample in samples:
if 'sample' in sample:
sample = sample['sample']
obj = sample['obj_label']
if obj not in obj2samples:
obj2samples[obj] = []
obj2samples[obj].append(sample)
for obj in obj2samples:
if len(obj2samples[obj]) >= lower_bound:
sampled_cases.extend(random.sample(obj2samples[obj], lower_bound))
return sampled_cases
def load_wiki_uni(relation_id, model="bert"):
return load_json_dic("data/{}_data/wiki_uni/{}".format(model, relation_id))
def store_dis(dis_dir, prompt_type, dis_type, relation_id, data):
if not os.path.isdir("{}/{}/{}".format(dis_dir, prompt_type, dis_type)):
os.makedirs("{}/{}/{}".format(dis_dir, prompt_type, dis_type))
store_json_dic("{}/{}/{}/{}".format(dis_dir, prompt_type, dis_type, relation_id), data)
def load_dis(dis_dir, prompt_type, dis_type, relation_id):
return load_json_dic("{}/{}/{}/{}".format(dis_dir, prompt_type, dis_type, relation_id))
def add_corr(corr_dic, relation_type, corr):
if "original" in relation_type:
corr_dic[r"$T_{man}$"].append(corr)
elif "mine" in relation_type:
corr_dic[r"$T_{mine}$"].append(corr)
elif "auto" in relation_type:
corr_dic[r"$T_{auto}$"].append(corr)
def sort_dis(distribution):
return sorted(distribution.items(), key=lambda x: x[1], reverse=True)
def sum_dis(dis):
ans = 0
for token in dis:
ans += dis[token]
return ans
def draw_token_heat_map(data, pic_dir=None, pic_name=None, v=None):
sns.set_theme()
if v is None:
# ax = sns.heatmap(data=data, cmap="rocket_r", square=True)
ax = sns.heatmap(data=data, cmap="Blues", square=True, linewidths=.1, linecolor="#B0C4DE")
else:
ax = sns.heatmap(data=data, cmap="mako", vmin=v[0], vmax=v[1])
# ax.tick_params(axis='x', labelsize=14)
ax.tick_params(axis='y', labelsize=14)
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.set_ylabel("")
ax.set_xlabel("")
# ax.set_xticklabels(rotation=30)
if pic_dir is not None:
if not os.path.isdir(pic_dir):
os.makedirs(pic_dir)
fig = ax.get_figure()
fig.savefig('{}/{}.eps'.format(pic_dir, pic_name), format='eps', bbox_inches='tight')
| StarcoderdataPython |
3382368 | <reponame>mdmeadows/DSM-to-DTM
# Visualise: Performance of each modelling approach, with reference to test datasets & zones
# Import required packages
import sys
import subprocess
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm, colors, gridspec
from mpl_toolkits.basemap import Basemap
import pandas as pd
from osgeo import gdal, gdalconst
import pickle
gdal.UseExceptions() # Useful for trouble-shooting
# Import helper functions relevant to this script
sys.path.append('E:/mdm123/D/scripts/geo/')
from geo_helpers import extract_projection_info, array_to_geotiff
# List paths to GDAL scripts
ogr2ogr = 'C:/Anaconda3/envs/geo/Library/bin/ogr2ogr.exe'
gdal_warp = 'C:/Anaconda3/envs/geo/Library/bin/gdalwarp.exe'
gdal_rasterise = 'C:/Anaconda3/envs/geo/Library/bin/gdal_rasterize.exe'
# Define paths to relevant folders
folder_srtm = 'E:/mdm123/D/data/DSM/SRTM/proc'
folder_dtm = 'E:/mdm123/D/data/DTM/proc'
folder_logs = 'E:/mdm123/D/ML/logs'
folder_results_rf = 'E:/mdm123/D/ML/results/rf'
folder_results_densenet = 'E:/mdm123/D/ML/results/densenet'
folder_results_convnet = 'E:/mdm123/D/ML/results/convnet'
folder_fig = 'E:/mdm123/D/figures/All'
folder_maps = 'E:/mdm123/D/maps/PNG'
folder_input_1D = 'E:/mdm123/D/ML/inputs/1D'
folder_lcdb = 'E:/mdm123/D/data/LRIS/lris-lcdb-v50/proc'
folder_flha = 'E:/mdm123/D/data/NIWA/NZ_FLHA/proc'
folder_hand = 'E:/mdm123/D/maps/HAND/TIF'
# Define list of colours to be used for each ML model: in order of RF, DCN, FCN
models = ['rf','dcn','fcn']
model_colours = {'rf':'#fc8d62', 'dcn':'#66c2a5', 'fcn':'#8da0cb'}
label_colours = {'rf':'#d95f02', 'dcn':'#1b9e77', 'fcn':'#7570b3'}
dataset_colours = ['blue', 'green', 'firebrick']
# Define a list of the zones within which test areas were defined
test_zones = ['MRL18_WPE','MRL18_WVL','TSM16_ATG']
# Define various properties for the three test zones
test_zones_props = {'MRL18_WPE':{'label':'Wairau Plains East (Marlborough)',
'elv_cbar_range':(0,12),
'res_cbar_range':(-5,10)},
'MRL18_WVL':{'label':'Wairau Valley (Marlborough)',
'elv_cbar_range':(230,300),
'res_cbar_range':(-5,10)},
'TSM16_ATG':{'label':'Takaka (Tasman)',
'elv_cbar_range':(0,75),
'res_cbar_range':(-10,15)}}
# Define no_data value to be used
no_data = -9999
###############################################################################
# 1. Define additional helper functions specific to result visualisation #
###############################################################################
# Define a function that retrieves the DTM, SRTM & DIFF arrays for a given zone, as well as a dictionary defining CRS properties for GeoTIFF generation
def get_base_raster_data(zone, no_data=-9999):
# Import the SRTM raster for that zone - get array & geographic properties
srtm_tif = '{}/{}/SRTM_{}_Z.tif'.format(folder_srtm, zone, zone)
srtm_ds = gdal.Open(srtm_tif, gdalconst.GA_ReadOnly)
srtm_proj, srtm_res_x, srtm_res_y, srtm_x_min, srtm_x_max, srtm_y_min, srtm_y_max, srtm_width, srtm_height = extract_projection_info(srtm_tif)
srtm_props = {'proj':srtm_proj, 'res_x':srtm_res_x, 'res_y':srtm_res_y, 'x_min':srtm_x_min, 'y_max':srtm_y_max, 'x_max':srtm_x_max, 'y_min':srtm_y_min, 'width':srtm_width, 'height':srtm_height}
srtm_array = np.array(srtm_ds.GetRasterBand(1).ReadAsArray()).astype('float32')
srtm_ds = None
# Import the Resampled DTM raster for that zone - get array & geographic properties
dtm_tif = '{}/{}/DTM_{}_30m_Median.tif'.format(folder_dtm, zone, zone)
dtm_ds = gdal.Open(dtm_tif, gdalconst.GA_ReadOnly)
dtm_array = np.array(dtm_ds.GetRasterBand(1).ReadAsArray()).astype('float32')
dtm_array[dtm_array==no_data] = np.nan
dtm_ds = None
# Import the SRTM-DTM DIFF raster for that zone - get array & geographic properties
diff_tif = '{}/{}/SRTM_{}_Median_Diff.tif'.format(folder_srtm, zone, zone)
diff_ds = gdal.Open(diff_tif, gdalconst.GA_ReadOnly)
diff_array = np.array(diff_ds.GetRasterBand(1).ReadAsArray()).astype('float32')
diff_ds = None
# Import raster describing which pixels in that zone were assigned to the test dataset
test_tif = '{}/patches/TIF/{}_test_patches.tif'.format(folder_logs, zone)
test_ds = gdal.Open(test_tif, gdalconst.GA_ReadOnly)
test_array = np.array(test_ds.GetRasterBand(1).ReadAsArray()).astype('float32')
test_ds = None
# Import the MERIT DEM raster for that zone - get array & geographic properties
merit_tif = '{}/{}/MERIT_{}.tif'.format(folder_srtm.replace('SRTM','MERIT'), zone, zone)
merit_ds = gdal.Open(merit_tif, gdalconst.GA_ReadOnly)
merit_array = np.array(merit_ds.GetRasterBand(1).ReadAsArray()).astype('float32')
merit_ds = None
# Import the FLHA (NIWA Flood Hazard) raster for that zone - get array & geographic properties
flha_tif = '{}/FLHA_{}.tif'.format(folder_flha, zone)
flha_ds = gdal.Open(flha_tif, gdalconst.GA_ReadOnly)
flha_array = np.array(flha_ds.GetRasterBand(1).ReadAsArray()).astype('float32')
flha_ds = None
# Replace no_data values (-9999) with 0 (to distinguish no_flood from no_data)
flha_array = np.where(flha_array==no_data, 0, flha_array)
# Import the HAND (Height Above Nearest Drainage) raster for that zone - get array & geographic properties
hand_tif = '{}/DTM-SRTM_HAND_{}.tif'.format(folder_hand, zone)
hand_ds = gdal.Open(hand_tif, gdalconst.GA_ReadOnly)
hand_array = np.array(hand_ds.GetRasterBand(1).ReadAsArray()).astype('float32')
hand_ds = None
# Return arrays for the DTM, SRTM & their DIFF, as well as the SRTM projection property dict
return dtm_array, srtm_array, diff_array, test_array, merit_array, flha_array, hand_array, srtm_props
# Define a function that takes a prediction vector (pixel-based models) & returns three 2D arrays: corrections, corrected SRTM, and residuals
def process_1D_predictions(zone, prediction_vector, output_format, no_data=-9999):
# Get 2D arrays for that zone's DTM, SRTM, DIFF & MERIT data, as well as the SRTM GeoTIFF's geographical projection properties
dtm_array, srtm_array, diff_array, test_array, merit_array, flha_array, hand_array, srtm_props = get_base_raster_data(zone)
# Check that length of predictions vector matches expectations
if diff_array.size != len(prediction_vector): print('Prediction vector does not match expectation')
if diff_array.shape != srtm_array.shape: print('SRTM & DIFF arrays are of different shapes')
# Initialise new numpy arrays of the same dimensions as the SRTM array, for the full zone & also limited to the test patches
pred_corrections = np.zeros(srtm_array.shape)
pred_corrections_test = np.zeros(srtm_array.shape)
pred_elevations = np.zeros(srtm_array.shape)
pred_elevations_test = np.zeros(srtm_array.shape)
# Make copies of other rasters, in which pixels out of test extent will be set to np.nan
dtm_array_test = dtm_array.copy()
srtm_array_test = srtm_array.copy()
diff_array_test = diff_array.copy()
merit_array_test = merit_array.copy()
flha_array_test = flha_array.copy()
hand_array_test = hand_array.copy()
# Iterate through all cells, filling the new arrays as appropriate
i = 0
array_height = srtm_array.shape[0]
array_width = srtm_array.shape[1]
# Starting at the top & moving down:
for j in range(array_height):
# Starting at the left & moving right:
for k in range(array_width):
# If corresponding DIFF pixel is no_data, assign no_data pixels to all new arrays
if diff_array[j,k] == no_data:
pred_corrections[j,k] = np.nan
pred_corrections_test[j,k] = np.nan
pred_elevations[j,k] = np.nan
pred_elevations_test[j,k] = np.nan
dtm_array_test[j,k] = np.nan
srtm_array_test[j,k] = np.nan
diff_array_test[j,k] = np.nan
merit_array_test[j,k] = np.nan
flha_array_test[j,k] = np.nan
hand_array_test[j,k] = np.nan
# If corresponding DIFF pixel is valid, use the predictions available in the input vector
else:
# Full arrays processed the same regardless of whether or not this pixel belongs to a test patch
correction = prediction_vector[i]
elevation = srtm_array[j,k]
pred_corrections[j,k] = correction
pred_elevations[j,k] = elevation - correction
# For test arrays, assign predicted values if patch is a test patch, otherwise no_data value
if test_array[j,k]:
pred_corrections_test[j,k] = correction
pred_elevations_test[j,k] = elevation - correction
else:
pred_corrections_test[j,k] = np.nan
pred_elevations_test[j,k] = np.nan
dtm_array_test[j,k] = np.nan
srtm_array_test[j,k] = np.nan
diff_array_test[j,k] = np.nan
merit_array_test[j,k] = np.nan
flha_array_test[j,k] = np.nan
hand_array_test[j,k] = np.nan
# Increment the vector counter
i += 1
# Calculate residuals (diff - predictions)
pred_residuals = diff_array - pred_corrections
pred_residuals_test = pred_residuals.copy()
pred_residuals_test[test_array==0] = np.nan
# Get array extents of test data, for export of clipped arrays
x_min = np.where(test_array==1)[1].min()
x_max = np.where(test_array==1)[1].max()
y_min = np.where(test_array==1)[0].min()
y_max = np.where(test_array==1)[0].max()
# Arrays returned will depend on the output_format argument provided
if output_format == 'full_zone':
return pred_corrections, pred_elevations, pred_residuals, dtm_array, srtm_array, diff_array, merit_array, flha_array, hand_array
elif output_format == 'test_pad':
return pred_corrections_test, pred_elevations_test, pred_residuals_test, dtm_array_test, srtm_array_test, diff_array_test, merit_array_test, flha_array_test, hand_array_test
elif output_format == 'test_clip':
return pred_corrections_test[y_min:y_max+1, x_min:x_max+1], pred_elevations_test[y_min:y_max+1, x_min:x_max+1], pred_residuals_test[y_min:y_max+1, x_min:x_max+1], dtm_array_test[y_min:y_max+1, x_min:x_max+1], srtm_array_test[y_min:y_max+1, x_min:x_max+1], diff_array_test[y_min:y_max+1, x_min:x_max+1], merit_array_test[y_min:y_max+1, x_min:x_max+1], flha_array_test[y_min:y_max+1, x_min:x_max+1], hand_array_test[y_min:y_max+1, x_min:x_max+1]
else:
print('Unknown input value for output_format!')
return None
# Define a function that takes a prediction array (patch-based models) & returns three 2D arrays: corrections, corrected SRTM, and residuals
def process_2D_predictions(zone, prediction_array, output_format, no_data=-9999):
# Get 2D arrays for that zone's DTM, SRTM, DIFF & MERIT data, as well as the SRTM GeoTIFF's geographical projection properties
dtm_array, srtm_array, diff_array, test_array, merit_array, flha_array, hand_array, srtm_props = get_base_raster_data(zone)
# Limit predictions array extent to same dimensions as other arrays (as they have same origin in upper-left)
pred_corrections = prediction_array[0, :srtm_array.shape[0], :srtm_array.shape[1]]
# Calculate elevations & residuals arrays
pred_elevations = srtm_array - pred_corrections
pred_residuals = diff_array - pred_corrections
# Calculate another set of arrays, with nan values outside of the test patches extent
dtm_array_test = dtm_array.copy()
srtm_array_test = srtm_array.copy()
diff_array_test = diff_array.copy()
merit_array_test = merit_array.copy()
flha_array_test = flha_array.copy()
hand_array_test = hand_array.copy()
pred_corrections_test = pred_corrections.copy()
pred_elevations_test = pred_elevations.copy()
pred_residuals_test = pred_residuals.copy()
dtm_array_test[test_array==0] = np.nan
srtm_array_test[test_array==0] = np.nan
diff_array_test[test_array==0] = np.nan
merit_array_test[test_array==0] = np.nan
flha_array_test[test_array==0] = np.nan
hand_array_test[test_array==0] = np.nan
pred_corrections_test[test_array==0] = np.nan
pred_elevations_test[test_array==0] = np.nan
pred_residuals_test[test_array==0] = np.nan
# Get array extents of test data, for export of clipped arrays
x_min = np.where(test_array==1)[1].min()
x_max = np.where(test_array==1)[1].max()
y_min = np.where(test_array==1)[0].min()
y_max = np.where(test_array==1)[0].max()
# Arrays returned will depend on the output_format argument provided
if output_format == 'full_zone':
return pred_corrections, pred_elevations, pred_residuals, dtm_array, srtm_array, diff_array, merit_array, flha_array, hand_array
elif output_format == 'test_pad':
return pred_corrections_test, pred_elevations_test, pred_residuals_test, dtm_array_test, srtm_array_test, diff_array_test, merit_array_test, flha_array_test, hand_array_test
elif output_format == 'test_clip':
return pred_corrections_test[y_min:y_max+1, x_min:x_max+1], pred_elevations_test[y_min:y_max+1, x_min:x_max+1], pred_residuals_test[y_min:y_max+1, x_min:x_max+1], dtm_array_test[y_min:y_max+1, x_min:x_max+1], srtm_array_test[y_min:y_max+1, x_min:x_max+1], diff_array_test[y_min:y_max+1, x_min:x_max+1], merit_array_test[y_min:y_max+1, x_min:x_max+1], flha_array_test[y_min:y_max+1, x_min:x_max+1], hand_array_test[y_min:y_max+1, x_min:x_max+1]
else:
print('Unknown input value for output_format!')
return None
###############################################################################
# 2. Generate rasters indicating test patch coverage within each zone #
###############################################################################
# Define path to SHP of all patches, split up according to usage (train, dev, test)
patches_all = '{}/patches/SHP/patches_target_split.shp'.format(folder_logs)
# Loop through each of the test zones, generating a new SHP & GeoTIFF showing coverage of test patches for each
for zone in test_zones:
print('Processing {} zone...'.format(zone))
# Save a filtered version of the patch SHP, containing only those in the appropriate zone
patches_zone_shp = '{}/patches/SHP/patches_target_split_{}.shp'.format(folder_logs, zone)
filter_query = "zone = '{}' and usage = 'test'".format(zone)
filter_command = [ogr2ogr, patches_zone_shp, patches_all, '-sql', 'SELECT * FROM patches_target_split WHERE {}'.format(filter_query), '-overwrite']
filter_result = subprocess.run(filter_command, stdout=subprocess.PIPE)
if filter_result.returncode != 0: print(filter_result.stdout)
# Read that zone's SRTM GeoTIFF's geographical projection properties, to use as a template
diff_tif = '{}/{}/SRTM_{}_Median_Diff.tif'.format(folder_srtm, zone, zone)
diff_proj, diff_res_x, diff_res_y, diff_x_min, diff_x_max, diff_y_min, diff_y_max, diff_width, diff_height = extract_projection_info(diff_tif)
# Convert new SHP to a GeoTIFF, using that zone's DIFF raster as a template
patches_zone_tif = '{}/patches/TIF/{}_test_patches.tif'.format(folder_logs, zone)
rasterise_command = [gdal_rasterise, '-te', str(diff_x_min), str(diff_y_min), str(diff_x_max), str(diff_y_max), '-tr', str(diff_res_x), str(-diff_res_y), '-burn', '1', '-ot', 'Int16', '-a_nodata', '-9999', '-init', '0', patches_zone_shp, patches_zone_tif]
rasterise_result = subprocess.run(rasterise_command, stdout=subprocess.PIPE)
if rasterise_result.returncode != 0: print(rasterise_result.stdout)
###############################################################################
# 3. Generate prediction GeoTIFFs for each test zone, for each ML model #
###############################################################################
# Random Forest predictions
for zone in test_zones:
# Import zone predictions as vector
rf_predictions = np.load('{}/RF_Predictions_ByZone_{}.npy'.format(folder_results_rf, zone))
# Get general 2D arrays for this zone
dtm_array, srtm_array, diff_array, test_array, merit_array, flha_array, hand_array, srtm_props = get_base_raster_data(zone)
# Get 2D result arrays covering FULL zone & save to GeoTIFFs
rf_corrections_array, rf_elevations_array, rf_residuals_array, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'full_zone', no_data=-9999)
rf_corrections_tif = '{}/TIF/rf_corrections_{}.tif'.format(folder_results_rf, zone)
rf_elevations_tif = '{}/TIF/rf_elevations_{}.tif'.format(folder_results_rf, zone)
rf_residuals_tif = '{}/TIF/rf_residuals_{}.tif'.format(folder_results_rf, zone)
array_to_geotiff(rf_corrections_array, rf_corrections_tif, -9999, srtm_props)
array_to_geotiff(rf_elevations_array, rf_elevations_tif, -9999, srtm_props)
array_to_geotiff(rf_residuals_array, rf_residuals_tif, -9999, srtm_props)
# Get 2D result arrays covering FULL zone (with non-test pixels set to np.nan) & save to GeoTIFFs
rf_corrections_test_array, rf_elevations_test_array, rf_residuals_test_array, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'test_pad', no_data=-9999)
rf_corrections_test_tif = '{}/TIF/rf_corrections_{}_test.tif'.format(folder_results_rf, zone)
rf_elevations_test_tif = '{}/TIF/rf_elevations_{}_test.tif'.format(folder_results_rf, zone)
rf_residuals_test_tif = '{}/TIF/rf_residuals_{}_test.tif'.format(folder_results_rf, zone)
array_to_geotiff(rf_corrections_test_array, rf_corrections_test_tif, -9999, srtm_props)
array_to_geotiff(rf_elevations_test_array, rf_elevations_test_tif, -9999, srtm_props)
array_to_geotiff(rf_residuals_test_array, rf_residuals_test_tif, -9999, srtm_props)
# Densely-connected neural network predictions
for zone in test_zones:
# Import zone predictions as vector
densenet_predictions = np.load('{}/predictions/densenet_ensemble_{}_prediction.npy'.format(folder_results_densenet, zone))
# Get general 2D arrays for this zone
dtm_array, srtm_array, diff_array, test_array, merit_array, flha_array, hand_array, srtm_props = get_base_raster_data(zone)
# Get 2D result arrays covering FULL zone & save to GeoTIFFs
densenet_corrections_array, densenet_elevations_array, densenet_residuals_array,_,_,_,_,_,_ = process_1D_predictions(zone, densenet_predictions, 'full_zone', no_data=-9999)
densenet_corrections_tif = '{}/TIF/densenet_corrections_{}.tif'.format(folder_results_densenet, zone)
densenet_elevations_tif = '{}/TIF/densenet_elevations_{}.tif'.format(folder_results_densenet, zone)
densenet_residuals_tif = '{}/TIF/densenet_residuals_{}.tif'.format(folder_results_densenet, zone)
array_to_geotiff(densenet_corrections_array, densenet_corrections_tif, -9999, srtm_props)
array_to_geotiff(densenet_elevations_array, densenet_elevations_tif, -9999, srtm_props)
array_to_geotiff(densenet_residuals_array, densenet_residuals_tif, -9999, srtm_props)
# Get 2D result arrays covering FULL zone (with non-test pixels set to np.nan) & save to GeoTIFFs
densenet_corrections_test_array, densenet_elevations_test_array, densenet_residuals_test_array,_,_,_,_,_,_ = process_1D_predictions(zone, densenet_predictions, 'test_pad', no_data=-9999)
densenet_corrections_test_tif = '{}/TIF/densenet_corrections_{}_test.tif'.format(folder_results_densenet, zone)
densenet_elevations_test_tif = '{}/TIF/densenet_elevations_{}_test.tif'.format(folder_results_densenet, zone)
densenet_residuals_test_tif = '{}/TIF/densenet_residuals_{}_test.tif'.format(folder_results_densenet, zone)
array_to_geotiff(densenet_corrections_test_array, densenet_corrections_test_tif, -9999, srtm_props)
array_to_geotiff(densenet_elevations_test_array, densenet_elevations_test_tif, -9999, srtm_props)
array_to_geotiff(densenet_residuals_test_array, densenet_residuals_test_tif, -9999, srtm_props)
# Fully-convolutional neural network predictions
for zone in test_zones:
# Import zone predictions as array
convnet_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
# Get general 2D arrays for this zone
dtm_array, srtm_array, diff_array, test_array, merit_array, flha_array, hand_array, srtm_props = get_base_raster_data(zone)
# Get 2D result arrays covering FULL zone & save to GeoTIFFs
convnet_corrections_array, convnet_elevations_array, convnet_residuals_array,_,_,_,_,_,_ = process_2D_predictions(zone, convnet_predictions, 'full_zone', no_data=-9999)
convnet_corrections_tif = '{}/TIF/convnet_corrections_{}.tif'.format(folder_results_convnet, zone)
convnet_elevations_tif = '{}/TIF/convnet_elevations_{}.tif'.format(folder_results_convnet, zone)
convnet_residuals_tif = '{}/TIF/convnet_residuals_{}.tif'.format(folder_results_convnet, zone)
array_to_geotiff(convnet_corrections_array, convnet_corrections_tif, -9999, srtm_props)
array_to_geotiff(convnet_elevations_array, convnet_elevations_tif, -9999, srtm_props)
array_to_geotiff(convnet_residuals_array, convnet_residuals_tif, -9999, srtm_props)
# Get 2D result arrays covering FULL zone (with non-test pixels set to np.nan) & save to GeoTIFFs
convnet_corrections_test_array, convnet_elevations_test_array, convnet_residuals_test_array,_,_,_,_,_,_ = process_2D_predictions(zone, convnet_predictions, 'test_pad', no_data=-9999)
convnet_corrections_test_tif = '{}/TIF/convnet_corrections_{}_test.tif'.format(folder_results_convnet, zone)
convnet_elevations_test_tif = '{}/TIF/convnet_elevations_{}_test.tif'.format(folder_results_convnet, zone)
convnet_residuals_test_tif = '{}/TIF/convnet_residuals_{}_test.tif'.format(folder_results_convnet, zone)
array_to_geotiff(convnet_corrections_test_array, convnet_corrections_test_tif, -9999, srtm_props)
array_to_geotiff(convnet_elevations_test_array, convnet_elevations_test_tif, -9999, srtm_props)
array_to_geotiff(convnet_residuals_test_array, convnet_residuals_test_tif, -9999, srtm_props)
###############################################################################
# 4. Map elevations & residuals in each zone, showing results of all models #
###############################################################################
# Loop through each test zone
for zone in test_zones:
# Import Random Forest arrays - ONLY test dataset pixels
rf_predictions = np.load('{}/RF_Predictions_ByZone_{}.npy'.format(folder_results_rf, zone))
rf_cor, rf_elv, rf_res, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'test_clip', no_data=-9999)
# Import Densenet arrays - ONLY test dataset pixels
dn_predictions = np.load('{}/predictions/densenet_ensemble_{}_prediction.npy'.format(folder_results_densenet, zone))
dn_cor, dn_elv, dn_res, _,_,_,_,_,_ = process_1D_predictions(zone, dn_predictions, 'test_clip', no_data=-9999)
# Import Convnet arrays - ONLY test dataset pixels
cn_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
cn_cor, cn_elv, cn_res, dtm, srtm, diff, merit, flha, hand = process_2D_predictions(zone, cn_predictions, 'test_clip', no_data=-9999)
# Read satellite imagery for that test zone into memory
sat_img = plt.imread('{}/SatImg_{}.png'.format(folder_maps, zone))
# Evaluate RMSE of all DSMs, with reference to the LiDAR DTM available
srtm_RMSE = np.sqrt(np.nanmean(np.square(dtm - srtm)))
merit_RMSE = np.sqrt(np.nanmean(np.square(dtm - merit)))
rf_RMSE = np.sqrt(np.nanmean(np.square(dtm - rf_elv)))
dn_RMSE = np.sqrt(np.nanmean(np.square(dtm - dn_elv)))
cn_RMSE = np.sqrt(np.nanmean(np.square(dtm - cn_elv)))
# Get corresponding improvement, with respect to SRTM's RMSE
merit_RMSE_reduction = (srtm_RMSE - merit_RMSE)/srtm_RMSE * 100
rf_RMSE_reduction = (srtm_RMSE - rf_RMSE)/srtm_RMSE * 100
dn_RMSE_reduction = (srtm_RMSE - dn_RMSE)/srtm_RMSE * 100
cn_RMSE_reduction = (srtm_RMSE - cn_RMSE)/srtm_RMSE * 100
# Get elevation range & build appropriate colourmap
elv_min, elv_max = test_zones_props[zone]['elv_cbar_range']
elv_cmap = cm.terrain
elv_cmap.set_bad(color='whitesmoke')
elv_norm = colors.Normalize(vmin=elv_min, vmax=elv_max)
# Get residual range & build appropriate colourmap
res_min, res_max = test_zones_props[zone]['res_cbar_range']
res_cmap = cm.coolwarm
res_cmap.set_bad(color='whitesmoke')
res_norm = colors.Normalize(vmin=res_min, vmax=res_max)
res_norm = colors.TwoSlopeNorm(vmin=res_min, vcenter=0.0, vmax=res_max)
# For the 'TSM16_ATG' zone, rotate arrays for easier plotting
if zone == 'TSM16_ATG':
dtm, srtm, merit, diff, sat_img, rf_elv, rf_res, dn_elv, dn_res, cn_elv, cn_res = [np.rot90(raster, axes=(1,0)) for raster in [dtm, srtm, merit, diff, sat_img, rf_elv, rf_res, dn_elv, dn_res, cn_elv, cn_res]]
# Determine figure size based on desired width & array dimensions
width = 8
scale = width/(2*srtm.shape[1])
height = 1.05 * 5 * srtm.shape[0] * scale
# Generate plot showing both elevations & residuals
fig, axes = plt.subplots(nrows=5, ncols=2, figsize=(width,height))
# Row 1A: LiDAR DTM
axes[0,0].imshow(dtm, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[0,0].set_title('a) LiDAR (resampled to SRTM resolution)', x=0, ha='left', size=9, pad=4)
axes[0,0].axis('off')
# Row 1B: Satellite imagery
axes[0,1].imshow(sat_img, aspect='equal')
axes[0,1].set_title("b) 'NZ Imagery' basemap (LINZ Data Service)", x=0, ha='left', size=9, pad=4)
axes[0,1].axis('off')
# Row 2A: SRTM DSM
axes[1,0].imshow(srtm, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[1,0].set_title('c) SRTM: RMSE={:.2f}m (compared to LiDAR)'.format(srtm_RMSE), x=0, ha='left', size=9, pad=4)
axes[1,0].axis('off')
# Row 2B: SRTM Residuals
axes[1,1].imshow(diff, aspect='equal', cmap=res_cmap, norm=res_norm)
axes[1,1].set_title('d) SRTM residuals: \u03BC={:.2f}m, \u03C3={:.2f}m'.format(np.nanmean(diff), np.nanstd(diff)), x=0, ha='left', size=9, pad=4)
axes[1,1].axis('off')
# Row 3A: Random Forest - elevations
axes[2,0].imshow(rf_elv, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[2,0].set_title('e) RF-corrected SRTM: RMSE={:.2f}m (-{:.1f}%)'.format(rf_RMSE, rf_RMSE_reduction), x=0, ha='left', size=9, pad=4)
axes[2,0].axis('off')
# Row 3B: Random Forest - residuals
axes[2,1].imshow(rf_res, aspect='equal', cmap=res_cmap, norm=res_norm)
axes[2,1].set_title('f) RF-corrected SRTM residuals: \u03BC={:.2f}m, \u03C3={:.2f}m'.format(np.nanmean(rf_res), np.nanstd(rf_res)), x=0, ha='left', size=9, pad=4)
axes[2,1].axis('off')
# Row 4A: Densenet - elevations
axes[3,0].imshow(dn_elv, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[3,0].set_title('g) DCN-corrected SRTM: RMSE={:.2f}m (-{:.1f}%)'.format(dn_RMSE, dn_RMSE_reduction), x=0, ha='left', size=9, pad=4)
axes[3,0].axis('off')
# Row 4B: Densenet - residuals
axes[3,1].imshow(dn_res, aspect='equal', cmap=res_cmap, norm=res_norm)
axes[3,1].set_title('h) DCN-corrected SRTM: \u03BC={:.2f}m, \u03C3={:.2f}m'.format(np.nanmean(dn_res), np.nanstd(dn_res)), x=0, ha='left', size=9, pad=4)
axes[3,1].axis('off')
# Row 5A: Convnet - elevations
axes[4,0].imshow(cn_elv, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[4,0].set_title('i) FCN-corrected SRTM: RMSE={:.2f}m (-{:.1f}%)'.format(cn_RMSE, cn_RMSE_reduction), x=0, ha='left', size=9, pad=4)
axes[4,0].axis('off')
# Row 5B: Convnet - residuals
axes[4,1].imshow(cn_res, aspect='equal', cmap=res_cmap, norm=res_norm)
axes[4,1].set_title('j) FCN-corrected SRTM residuals: \u03BC={:.2f}m, \u03C3={:.2f}m'.format(np.nanmean(cn_res), np.nanstd(cn_res)), x=0, ha='left', size=9, pad=4)
axes[4,1].axis('off')
# Add a small north arrow indicator to each map
arrowprops = dict(facecolor='black', width=1.5, headwidth=4, headlength=4)
if zone == 'TSM16_ATG':
x, y, arrow_length = 0.1, 0.95, 0.06
xytext = (x-arrow_length, y)
else:
x, y, arrow_length = 0.97, 0.96, 0.11
xytext = (x, y-arrow_length)
for ax in axes.ravel():
ax.annotate('N', xy=(x,y), xycoords='axes fraction', xytext=xytext, textcoords='axes fraction', arrowprops=arrowprops, ha='center', va='center', fontsize=8)
# Add a simple scale bar to the DTM map, assuming that each grid cell is approx. 23m (SRTM at this latitude)
ncells_1km = 1000/23
offset = 8
adjust_y = 10 if zone == 'TSM16_ATG' else 0
axes[0,0].plot([offset, offset + ncells_1km], [offset + adjust_y, offset + adjust_y], color='black', linewidth=0.8)
axes[0,0].plot([offset, offset], [offset-1+adjust_y, offset+1+adjust_y], color='black', linewidth=0.8)
axes[0,0].plot([offset + ncells_1km, offset + ncells_1km], [offset-1+adjust_y, offset+1+adjust_y], color='black', linewidth=0.8)
axes[0,0].annotate('1km', xy=(offset + 0.5*ncells_1km, 1.3*offset + adjust_y), ha='center', va='top', size=8)
# Tighten layout
fig.tight_layout(pad=1)
# Adjust layout to fit two colourbars at the bottom
fig.subplots_adjust(top=0.98, bottom=0.06, wspace=0.05, hspace=0.12)
# Add colourbar for elevations
elv_cbar = fig.add_axes([0.03, 0.04, 0.4, 0.01]) # [left, bottom, width, height]
fig.colorbar(cm.ScalarMappable(cmap=elv_cmap, norm=elv_norm), cax=elv_cbar, orientation='horizontal').set_label(label='Elevation [m]', size=9)
elv_cbar.tick_params(labelsize=8)
# Add colourbar for residuals
res_cbar = fig.add_axes([0.5, 0.04, 0.4, 0.01]) # [left, bottom, width, height]
fig.colorbar(cm.ScalarMappable(cmap=res_cmap, norm=res_norm), cax=res_cbar, orientation='horizontal').set_label(label='Residuals [m]', size=9)
res_cbar.tick_params(labelsize=8)
# Save figure
fig.savefig('{}/maps_elv_res_{}.png'.format(folder_fig, zone), dpi=300, bbox_inches='tight')
plt.close()
# Generate plot showing only elevations, and only the convnet results
width = 8
scale = width/(srtm.shape[1])
height = 1.05 * 3 * srtm.shape[0] * scale
fig, axes = plt.subplots(nrows=3, figsize=(width,height))
# Row 1: LiDAR DTM
axes[0].imshow(dtm, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[0].set_title('a) LiDAR (resampled to SRTM resolution)', x=0, ha='left', size=9, pad=4)
axes[0].axis('off')
# Row 2: SRTM DSM
axes[1].imshow(srtm, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[1].set_title('b) SRTM: RMSE={:.2f}m (compared to LiDAR)'.format(srtm_RMSE), x=0, ha='left', size=9, pad=4)
axes[1].axis('off')
# Row 3: Convnet - elevations
axes[2].imshow(cn_elv, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[2].set_title('c) FCN-corrected SRTM: RMSE={:.2f}m (an improvement of {:.1f}% over raw SRTM)'.format(cn_RMSE, cn_RMSE_reduction), x=0, ha='left', size=9, pad=4)
axes[2].axis('off')
# Add a small north arrow indicator to each map
arrowprops = dict(facecolor='black', width=1.5, headwidth=4, headlength=4)
if zone == 'TSM16_ATG':
x, y, arrow_length = 0.07, 0.95, 0.04
xytext = (x-arrow_length, y)
else:
x, y, arrow_length = 0.97, 0.96, 0.07
xytext = (x, y-arrow_length)
for ax in axes.ravel():
ax.annotate('N', xy=(x,y), xycoords='axes fraction', xytext=xytext, textcoords='axes fraction', arrowprops=arrowprops, ha='center', va='center', fontsize=10)
# Add a simple scale bar to the DTM map, assuming that each grid cell is approx. 23m (SRTM at this latitude)
ncells_1km = 1000/23
offset = 8
adjust_y = 10 if zone == 'TSM16_ATG' else 0
axes[0].plot([offset, offset + ncells_1km], [offset + adjust_y, offset + adjust_y], color='black', linewidth=0.8)
axes[0].plot([offset, offset], [offset-1+adjust_y, offset+1+adjust_y], color='black', linewidth=0.8)
axes[0].plot([offset + ncells_1km, offset + ncells_1km], [offset-1+adjust_y, offset+1+adjust_y], color='black', linewidth=0.8)
axes[0].annotate('1km', xy=(offset + 0.5*ncells_1km, 1.3*offset + adjust_y), ha='center', va='top', size=8)
# Tighten layout
fig.tight_layout(pad=1)
# Adjust layout to fit two colourbars at the bottom
fig.subplots_adjust(bottom=0.07, wspace=0.05, hspace=0.07)
# Add colourbar for elevations
elv_cbar = fig.add_axes([0.06, 0.04, 0.88, 0.015]) # [left, bottom, width, height]
fig.colorbar(cm.ScalarMappable(cmap=elv_cmap, norm=elv_norm), cax=elv_cbar, orientation='horizontal').set_label(label='Elevation [m]', size=9)
elv_cbar.tick_params(labelsize=8)
# Save figure
fig.savefig('{}/maps_elv_{}_convnet.png'.format(folder_fig, zone), dpi=300, bbox_inches='tight')
plt.close()
###############################################################################
# 5. Compare overall residuals using boxplots #
###############################################################################
# Read in error residuals calculated earlier for the test dataset
residuals_dict_rf = pickle.load(open('{}/rf_residuals.p'.format(folder_results_rf), 'rb'))
residuals_dict_densenet = pickle.load(open('{}/densenet_residuals_models.p'.format(folder_results_densenet), 'rb'))
residuals_dict_convnet = pickle.load(open('{}/convnet_residuals_models.p'.format(folder_results_convnet), 'rb'))
# Check that initial residuals are the same (in each dictionary)
fig, axes = plt.subplots(figsize=(9,5))
axes.boxplot([d['test']['initial'] for d in [residuals_dict_rf, residuals_dict_densenet, residuals_dict_convnet]], showfliers=False)
# Get residuals to plot
res_initial = residuals_dict_convnet['test']['initial']
res_baseline = residuals_dict_convnet['test']['naive']
res_rf = residuals_dict_rf['test']['rf']
res_densenet = residuals_dict_densenet['test']['densenet_ensemble']
res_convnet = residuals_dict_convnet['test']['convnet_ensemble']
# Boxplots of error residuals
bp_data = [res_initial, res_baseline, res_rf, res_densenet, res_convnet]
bp_colours = ['dimgrey', 'darkgrey'] + [model_colours[m] for m in models]
bp_label_colours = ['dimgrey', 'darkgrey'] + [label_colours[m] for m in models]
# Add boxplots to the figure
fig, axes = plt.subplots(figsize=(9,5))
bps = axes.boxplot(bp_data, showfliers=False, medianprops={'color':'black'}, patch_artist=True)
for patch, colour in zip(bps['boxes'], bp_colours):
patch.set_facecolor(colour)
# Add axis ticks & labels
axes.set_xticks(range(1,6))
axes.set_xticklabels(['Initial','Baseline\ncorrection','RF\ncorrection','DCN\ncorrection','FCN\ncorrection'])
axes.set_ylabel('Residual error before/after correction [m]')
# Turn spines off
[axes.spines[edge].set_visible(False) for edge in ['top','right']]
# Add a horizontal line for zero error
axes.axhline(y=0, linestyle='dashed', color='black', linewidth=0.8, alpha=0.3)
# Add labels for medians & IQR
iqr_label_y = 0
for i, data in enumerate(bp_data):
median = np.median(data)
q75, q25 = np.percentile(data, [75 ,25])
iqr = q75 - q25
iqr_label_y = max(1.02*(q75 + 1.5*iqr), iqr_label_y)
axes.annotate('{:.3f}m'.format(median), xy=(i+1.28, median), ha='left', va='center')
axes.annotate('IQR = {:.3f}m'.format(iqr), xy=(i+1, iqr_label_y), color=bp_label_colours[i], fontweight='bold', ha='center', va='bottom')
fig.tight_layout()
fig.savefig('{}/residuals_bymodel_boxplots.png'.format(folder_fig), dpi=300)
plt.close()
###############################################################################
# 6. Assess correction efficacy by zone, land cover, FLHA class & HAND range #
###############################################################################
# Set up a dictionary to contain SRTM-LiDAR difference values corresponding to each Manaaki Whenua landclass type present in that LiDAR zone coverage
diff_by_landcover = {1:{'label':'Artificial\nsurfaces', 'data':[], 'colour':(78/255, 78/255, 78/255)},
2:{'label':'Bare/lightly-\nvegetated\nsurfaces', 'data':[], 'colour':(255/255, 235/255, 190/255)},
3:{'label':'Water\nbodies', 'data':[], 'colour':(0/255, 197/255, 255/255)},
4:{'label':'Cropland', 'data':[], 'colour':(255/255, 170/255, 0/255)},
5:{'label':'Grassland,\nSedgeland\n& Marshland', 'data':[], 'colour':(255/255, 255/255, 115/255)},
6:{'label':'Scrub &\nShrubland', 'data':[], 'colour':(137/255, 205/255, 102/255)},
7:{'label':'Forest', 'data':[], 'colour':(38/255, 115/255, 0/255)},
8:{'label':'Other', 'data':[], 'colour':'#FF0000'}}
# Initalise dictionary to hold test residuals, classed in different ways
res = {'initial':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}},
'rf':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}},
'dn':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}},
'cn':{'by_zone':{zone:[] for zone in ['All'] + test_zones}, 'by_lcdb':{i:[] for i in range(1,8)}, 'by_flha':{'flood':[], 'noflood':[]}, 'by_hand':{'hand_{}'.format(h):[] for h in range(1,6)}}}
# Loop through the three test zones
for i, zone in enumerate(test_zones):
print('Processing {} zone...'.format(zone))
# Get the test array & SRTM props dictionary
_, _, _, test, _, _, _, srtm_props = get_base_raster_data(zone)
# Land cover classes
lcdb_tif = '{}/LCDB_GroupID_{}.tif'.format(folder_lcdb, zone)
lcdb_ds = gdal.Open(lcdb_tif, gdalconst.GA_ReadOnly)
lcdb = np.array(lcdb_ds.ReadAsArray())
lcdb_ds = None
# Import Random Forest arrays - ONLY test dataset pixels
rf_predictions = np.load('{}/RF_Predictions_ByZone_{}.npy'.format(folder_results_rf, zone))
rf_cor, rf_elv, rf_res, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'test_clip', no_data=-9999)
# Import Densenet arrays - ONLY test dataset pixels
dn_predictions = np.load('{}/predictions/densenet_ensemble_{}_prediction.npy'.format(folder_results_densenet, zone))
dn_cor, dn_elv, dn_res, _,_,_,_,_,_ = process_1D_predictions(zone, dn_predictions, 'test_clip', no_data=-9999)
# Import Convnet arrays - ONLY test dataset pixels
cn_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
cn_cor, cn_elv, cn_res, dtm, srtm, diff, merit, flha, hand = process_2D_predictions(zone, cn_predictions, 'test_clip', no_data=-9999)
# Check extent of the test patch coverage, with reference to the zone coverage as a whole
x_min = np.where(test==1)[1].min()
x_max = np.where(test==1)[1].max()
y_min = np.where(test==1)[0].min()
y_max = np.where(test==1)[0].max()
# For the LCDB array, set to np.nan any pixels which aren't in the test patches & clip it to test data extent
lcdb[test==0] = np.nan
lcdb = lcdb[y_min:y_max+1, x_min:x_max+1]
# Mask all arrays wherever no_data values are present
dtm = np.ma.masked_equal(dtm, no_data)
srtm = np.ma.masked_equal(srtm, no_data)
diff = np.ma.masked_equal(diff, no_data)
test = np.ma.masked_equal(test[y_min:y_max+1, x_min:x_max+1], no_data)
lcdb = np.ma.masked_equal(lcdb, no_data)
flha = np.ma.masked_equal(flha, no_data)
hand = np.ma.masked_equal(hand, no_data)
rf_cor = np.ma.masked_equal(rf_cor, no_data)
rf_elv = np.ma.masked_equal(rf_elv, no_data)
rf_res = np.ma.masked_equal(rf_res, no_data)
dn_cor = np.ma.masked_equal(dn_cor, no_data)
dn_elv = np.ma.masked_equal(dn_elv, no_data)
dn_res = np.ma.masked_equal(dn_res, no_data)
cn_cor = np.ma.masked_equal(cn_cor, no_data)
cn_elv = np.ma.masked_equal(cn_elv, no_data)
cn_res = np.ma.masked_equal(cn_res, no_data)
# Check that all arrays have the same shape
if not (dtm.shape == srtm.shape == diff.shape == test.shape == lcdb.shape == flha.shape == hand.shape == rf_cor.shape == dn_cor.shape == cn_cor.shape):
print('Different test array dimensions!')
break
# Class residuals by test zone
# Get list of residuals for that zone (for each model)
res_initial_byzone = diff.flatten().tolist()
res_rf_byzone = rf_res.flatten().tolist()
res_dn_byzone = dn_res.flatten().tolist()
res_cn_byzone = cn_res.flatten().tolist()
# Filter out any None values (masked)
res_initial_byzone = [r for r in res_initial_byzone if (not np.isnan(r) and r != None)]
res_rf_byzone = [r for r in res_rf_byzone if (not np.isnan(r) and r != None)]
res_dn_byzone = [r for r in res_dn_byzone if (not np.isnan(r) and r != None)]
res_cn_byzone = [r for r in res_cn_byzone if (not np.isnan(r) and r != None)]
# Update dictionary of all test residuals
res['initial']['by_zone'][zone] = res_initial_byzone
res['initial']['by_zone']['All'] = np.append(res['initial']['by_zone']['All'], res_initial_byzone)
res['rf']['by_zone'][zone] = res_rf_byzone
res['rf']['by_zone']['All'] = np.append(res['rf']['by_zone']['All'], res_rf_byzone)
res['dn']['by_zone'][zone] = res_dn_byzone
res['dn']['by_zone']['All'] = np.append(res['dn']['by_zone']['All'], res_dn_byzone)
res['cn']['by_zone'][zone] = res_cn_byzone
res['cn']['by_zone']['All'] = np.append(res['cn']['by_zone']['All'], res_cn_byzone)
# Class residuals by land cover class
# Loop through each potential land cover class (as defined in proc_LCDB.py) and calculate elevation residuals for that particular class
for i in range(1,8):
# Get lists of residuals for that land cover class - for each of the input residual arrays
res_initial_byclass = diff[lcdb==i].flatten().tolist()
res_rf_byclass = rf_res[lcdb==i].flatten().tolist()
res_dn_byclass = dn_res[lcdb==i].flatten().tolist()
res_cn_byclass = cn_res[lcdb==i].flatten().tolist()
# Filter out any None values (masked)
res_initial_byclass = [r for r in res_initial_byclass if (not np.isnan(r) and r != None)]
res_rf_byclass = [r for r in res_rf_byclass if (not np.isnan(r) and r != None)]
res_dn_byclass = [r for r in res_dn_byclass if (not np.isnan(r) and r != None)]
res_cn_byclass = [r for r in res_cn_byclass if (not np.isnan(r) and r != None)]
# Update dictionary of all test residuals
res['initial']['by_lcdb'][i] = np.append(res['initial']['by_lcdb'][i], res_initial_byclass)
res['rf']['by_lcdb'][i] = np.append(res['rf']['by_lcdb'][i], res_rf_byclass)
res['dn']['by_lcdb'][i] = np.append(res['dn']['by_lcdb'][i], res_dn_byclass)
res['cn']['by_lcdb'][i] = np.append(res['cn']['by_lcdb'][i], res_cn_byclass)
# Class residuals by NIWA's Flood Hazard susceptibility map
# Loop through each potential land cover class (as defined in proc_LCDB.py) and calculate elevation residuals for that particular class
for flha_code, flha_label in zip([1,0], ['flood','noflood']):
# Get lists of residuals for that flood susceptibility - for each of the input residual arrays
res_initial_byflha = diff[flha==flha_code].flatten().tolist()
res_rf_byflha = rf_res[flha==flha_code].flatten().tolist()
res_dn_byflha = dn_res[flha==flha_code].flatten().tolist()
res_cn_byflha = cn_res[flha==flha_code].flatten().tolist()
# Filter out any None values (masked)
res_initial_byflha = [r for r in res_initial_byflha if (not np.isnan(r) and r != None)]
res_rf_byflha = [r for r in res_rf_byflha if (not np.isnan(r) and r != None)]
res_dn_byflha = [r for r in res_dn_byflha if (not np.isnan(r) and r != None)]
res_cn_byflha = [r for r in res_cn_byflha if (not np.isnan(r) and r != None)]
# Update dictionary of all test residuals
res['initial']['by_flha'][flha_label] = np.append(res['initial']['by_flha'][flha_label], res_initial_byflha)
res['rf']['by_flha'][flha_label] = np.append(res['rf']['by_flha'][flha_label], res_rf_byflha)
res['dn']['by_flha'][flha_label] = np.append(res['dn']['by_flha'][flha_label], res_dn_byflha)
res['cn']['by_flha'][flha_label] = np.append(res['cn']['by_flha'][flha_label], res_cn_byflha)
# Class residuals by HAND (height above nearest drainage) range
# Define breaks for each of the five HAND ranges
hand_breaks = [(0,2), (2,5), (5,10), (10,20), (20, max(50, np.nanmax(hand))+1)]
# Loop through each HAND range
for j, breaks in enumerate(hand_breaks):
hand_class = 'hand_{}'.format(j+1)
# Get lists of residuals corresponding to that range of HAND values - for each of the input residual arrays
res_initial_byhand = diff[(hand >= breaks[0]) & (hand < breaks[1])].flatten().tolist()
res_rf_byhand = rf_res[(hand >= breaks[0]) & (hand < breaks[1])].flatten().tolist()
res_dn_byhand = dn_res[(hand >= breaks[0]) & (hand < breaks[1])].flatten().tolist()
res_cn_byhand = cn_res[(hand >= breaks[0]) & (hand < breaks[1])].flatten().tolist()
# Filter out any None values (masked)
res_initial_byhand = [r for r in res_initial_byhand if (not np.isnan(r) and r != None)]
res_rf_byhand = [r for r in res_rf_byhand if (not np.isnan(r) and r != None)]
res_dn_byhand = [r for r in res_dn_byhand if (not np.isnan(r) and r != None)]
res_cn_byhand = [r for r in res_cn_byhand if (not np.isnan(r) and r != None)]
# Update dictionary of all test residuals
res['initial']['by_hand'][hand_class] = np.append(res['initial']['by_hand'][hand_class], res_initial_byhand)
res['rf']['by_hand'][hand_class] = np.append(res['rf']['by_hand'][hand_class], res_rf_byhand)
res['dn']['by_hand'][hand_class] = np.append(res['dn']['by_hand'][hand_class], res_dn_byhand)
res['cn']['by_hand'][hand_class] = np.append(res['cn']['by_hand'][hand_class], res_cn_byhand)
# Define some common properties for the boxplots of residuals
bp_width = 0.1
bp_offset = 0.2
bp_colours = ['dimgrey'] + [model_colours[m] for m in models]
# Define labels for the four test zones/subsets
test_zones_classes = ['All'] + test_zones
test_zones_labels = ['Overall (all test\nzones combined)', 'Wairau Plains East\n(Marlborough)', 'Wairau Valley\n(Marlborough)', 'Takaka\n(Tasman)']
# Define colours for the six HAND ranges
hand_cmap = cm.Blues_r
hand_colours = [hand_cmap(i/5) for i in range(5)]
# 6a. Set up a figure summarising all residuals by test zone & LCDB class
fig, axes = plt.subplots(nrows=2, figsize=(9,9))
# Plot 1: Residuals by test zone
for i, test_zone in enumerate(test_zones_classes):
i += 1
# Add boxplots to the figure
bps = axes[0].boxplot([res['initial']['by_zone'][test_zone], res['rf']['by_zone'][test_zone], res['dn']['by_zone'][test_zone], res['cn']['by_zone'][test_zone]], positions=[i-1.5*bp_offset, i-0.5*bp_offset, i+0.5*bp_offset, i+1.5*bp_offset], showfliers=False, medianprops={'color':'black'}, widths=bp_width, patch_artist=True)
for patch, colour in zip(bps['boxes'], bp_colours):
patch.set_facecolor(colour)
# Get RMSE associated with each set of residuals
RMSE_initial = np.sqrt(np.nanmean(np.square(res['initial']['by_zone'][test_zone])))
RMSE_rf = np.sqrt(np.nanmean(np.square(res['rf']['by_zone'][test_zone])))
RMSE_dn = np.sqrt(np.nanmean(np.square(res['dn']['by_zone'][test_zone])))
RMSE_cn = np.sqrt(np.nanmean(np.square(res['cn']['by_zone'][test_zone])))
# Calculate percentage improvement made by method
RMSE_rf_improve = (RMSE_initial - RMSE_rf)/RMSE_initial * 100.
RMSE_dn_improve = (RMSE_initial - RMSE_dn)/RMSE_initial * 100.
RMSE_cn_improve = (RMSE_initial - RMSE_cn)/RMSE_initial * 100.
# Determine which method achieved the highest improvement & label that point
RMSEs_ordered = [RMSE_rf, RMSE_dn, RMSE_cn]
best_index = np.argmin(RMSEs_ordered)
best_RMSE = RMSEs_ordered[best_index]
best_improve = [RMSE_rf_improve, RMSE_dn_improve, RMSE_cn_improve][best_index]
best_model = models[best_index]
best_colour = label_colours[best_model]
axes[0].annotate(u'RMSE \u2193\nby {:.0f}%'.format(best_improve), xy=(i/4 - 1/8, 0.98), xycoords='axes fraction', ha='center', va='top', size=10, color=best_colour)
# Add axis ticks & labels, and turn some spines off
axes[0].set_xticks(range(1,5))
axes[0].set_xticklabels(test_zones_labels)
axes[0].set_xticklabels(['{}\n[{:,} pixels]'.format(label, len(res['initial']['by_zone'][z])) for label, z in zip(test_zones_labels, test_zones_classes)])
axes[0].set_ylabel('Residuals before/after correction [m]')
[axes[0].spines[edge].set_visible(False) for edge in ['top','right']]
# Colour background based on land cover class & add a horizontal line for zero error
[axes[0].axvspan(j-0.45, j+0.45, alpha=0.25, facecolor='lightgrey', edgecolor='none') for j in range(1,5)]
axes[0].axhline(y=0, linestyle='dashed', color='black', linewidth=0.8, alpha=0.3)
axes[0].set_xlim(0.5,4.5)
# Add title
axes[0].set_title('a) By test zone/subset', x=0, ha='left', color='dimgrey', weight='bold', alpha=0.8)
# Plot 2: Residuals by land cover class (LCDB), as defined in geo_process_LCDB.py
for j in range(1,8):
# Add boxplots to the figure
bps = axes[1].boxplot([res['initial']['by_lcdb'][j], res['rf']['by_lcdb'][j], res['dn']['by_lcdb'][j], res['cn']['by_lcdb'][j]], positions=[j-1.5*bp_offset, j-0.5*bp_offset, j+0.5*bp_offset, j+1.5*bp_offset], showfliers=False, medianprops={'color':'black'}, widths=bp_width, patch_artist=True)
for patch, colour in zip(bps['boxes'], bp_colours):
patch.set_facecolor(colour)
# Get RMSE associated with each set of residuals
RMSE_initial = np.sqrt(np.nanmean(np.square(res['initial']['by_lcdb'][j])))
RMSE_rf = np.sqrt(np.nanmean(np.square(res['rf']['by_lcdb'][j])))
RMSE_dn = np.sqrt(np.nanmean(np.square(res['dn']['by_lcdb'][j])))
RMSE_cn = np.sqrt(np.nanmean(np.square(res['cn']['by_lcdb'][j])))
# Calculate percentage improvement made by method
RMSE_rf_improve = (RMSE_initial - RMSE_rf)/RMSE_initial * 100.
RMSE_dn_improve = (RMSE_initial - RMSE_dn)/RMSE_initial * 100.
RMSE_cn_improve = (RMSE_initial - RMSE_cn)/RMSE_initial * 100.
# Determine which method achieved the highest improvement & label that point
RMSEs_ordered = [RMSE_rf, RMSE_dn, RMSE_cn]
best_index = np.argmin(RMSEs_ordered)
best_RMSE = RMSEs_ordered[best_index]
best_improve = [RMSE_rf_improve, RMSE_dn_improve, RMSE_cn_improve][best_index]
best_model = models[best_index]
best_colour = label_colours[best_model]
axes[1].annotate(u'RMSE \u2193\nby {:.0f}%'.format(best_improve), xy=(j/7 - 1/14, 0.98), xycoords='axes fraction', ha='center', va='top', size=10, color=best_colour)
# Add axis ticks & labels, and turn some spines off
axes[1].set_xticks(range(1,8))
axes[1].set_xticklabels(['{}\n[{:,} pixels]'.format(diff_by_landcover[j]['label'], len(res['initial']['by_lcdb'][j])) for j in range(1,8)])
axes[1].set_ylabel('Residuals before/after correction [m]')
[axes[1].spines[edge].set_visible(False) for edge in ['top','right']]
# Colour background based on land cover class & add a horizontal line for zero error
[axes[1].axvspan(j-0.45, j+0.45, alpha=0.1, facecolor=diff_by_landcover[j]['colour'], edgecolor='none') for j in range(1,8)]
axes[1].axhline(y=0, linestyle='dashed', color='black', linewidth=0.8, alpha=0.3)
axes[1].set_xlim(0.5,7.5)
# Add title
axes[1].set_title('b) By land cover class', x=0, ha='left', color='dimgrey', weight='bold', alpha=0.8)
# Tighten up layout, add a common legend at the top & save figure
fig.tight_layout(pad=0, h_pad=1.8)
fig.subplots_adjust(top=0.93)
fig.legend(handles=bps['boxes'], labels=['Initial','RF','DCN','FCN'], frameon=False, ncol=4, loc='upper center', prop={'size':11})
fig.savefig('{}/residuals_boxplots_by_zone-lcdb.png'.format(folder_fig), dpi=300, bbox='tight')
plt.close()
# 6b. Set up a figure summarising all residuals by FLHA & HAND zones
fig, axes = plt.subplots(nrows=2, figsize=(9,9))
# Plot 1: Residuals by FLHA zone
for i, flha_label in enumerate(['flood','noflood']):
i += 1
# Add boxplots to the figure
bps = axes[0].boxplot([res['initial']['by_flha'][flha_label], res['rf']['by_flha'][flha_label], res['dn']['by_flha'][flha_label], res['cn']['by_flha'][flha_label]], positions=[i-1.5*bp_offset, i-0.5*bp_offset, i+0.5*bp_offset, i+1.5*bp_offset], showfliers=False, medianprops={'color':'black'}, widths=bp_width, patch_artist=True)
for patch, colour in zip(bps['boxes'], bp_colours):
patch.set_facecolor(colour)
# Get RMSE associated with each set of residuals
RMSE_initial = np.sqrt(np.nanmean(np.square(res['initial']['by_flha'][flha_label])))
RMSE_rf = np.sqrt(np.nanmean(np.square(res['rf']['by_flha'][flha_label])))
RMSE_dn = np.sqrt(np.nanmean(np.square(res['dn']['by_flha'][flha_label])))
RMSE_cn = np.sqrt(np.nanmean(np.square(res['cn']['by_flha'][flha_label])))
# Calculate percentage improvement made by method
RMSE_rf_improve = (RMSE_initial - RMSE_rf)/RMSE_initial * 100.
RMSE_dn_improve = (RMSE_initial - RMSE_dn)/RMSE_initial * 100.
RMSE_cn_improve = (RMSE_initial - RMSE_cn)/RMSE_initial * 100.
# Determine which method achieved the highest improvement & label that point
RMSEs_ordered = [RMSE_rf, RMSE_dn, RMSE_cn]
best_index = np.argmin(RMSEs_ordered)
best_RMSE = RMSEs_ordered[best_index]
best_improve = [RMSE_rf_improve, RMSE_dn_improve, RMSE_cn_improve][best_index]
best_model = models[best_index]
best_colour = label_colours[best_model]
axes[0].annotate(u'RMSE \u2193\nby {:.0f}%'.format(best_improve), xy=(i/2 - 1/4, 0.98), xycoords='axes fraction', ha='center', va='top', size=10, color=best_colour)
# Add axis ticks & labels, and turn some spines off
axes[0].set_xticks(range(1,3))
axes[0].set_xticklabels(['Flood-prone\n[{:,} pixels]'.format(len(res['initial']['by_flha']['flood'])), 'Not flood-prone\n({:,} pixels)'.format(len(res['initial']['by_flha']['noflood']))])
axes[0].set_ylabel('Residuals before/after correction [m]')
[axes[0].spines[edge].set_visible(False) for edge in ['top','right']]
# Colour background based on flood proneness & add a horizontal line for zero error
[axes[0].axvspan(j-0.48+1, j+0.48+1, alpha=0.08, facecolor=flha_colour, edgecolor='none') for j, flha_colour in enumerate(['red','green'])]
axes[0].axhline(y=0, linestyle='dashed', color='black', linewidth=0.8, alpha=0.3)
axes[0].set_xlim(0.5,2.5)
# Add legend & title
axes[0].set_title('a) By flood susceptibility, based on resampled NIWA raster [39]', x=0, ha='left', color='dimgrey', weight='bold', alpha=0.8)
# Plot 2: Residuals by HAND range
for k in range(1,6):
# Add boxplots to the figure
h = 'hand_{}'.format(k)
bps = axes[1].boxplot([res['initial']['by_hand'][h], res['rf']['by_hand'][h], res['dn']['by_hand'][h], res['cn']['by_hand'][h]], positions=[k-1.5*bp_offset, k-0.5*bp_offset, k+0.5*bp_offset, k+1.5*bp_offset], showfliers=False, medianprops={'color':'black'}, widths=bp_width, patch_artist=True)
for patch, colour in zip(bps['boxes'], bp_colours):
patch.set_facecolor(colour)
# Get RMSE associated with each set of residuals
RMSE_initial = np.sqrt(np.nanmean(np.square(res['initial']['by_hand'][h])))
RMSE_rf = np.sqrt(np.nanmean(np.square(res['rf']['by_hand'][h])))
RMSE_dn = np.sqrt(np.nanmean(np.square(res['dn']['by_hand'][h])))
RMSE_cn = np.sqrt(np.nanmean(np.square(res['cn']['by_hand'][h])))
# Calculate percentage improvement made by method
RMSE_rf_improve = (RMSE_initial - RMSE_rf)/RMSE_initial * 100.
RMSE_dn_improve = (RMSE_initial - RMSE_dn)/RMSE_initial * 100.
RMSE_cn_improve = (RMSE_initial - RMSE_cn)/RMSE_initial * 100.
# Determine which method achieved the highest improvement & label that point
RMSEs_ordered = [RMSE_rf, RMSE_dn, RMSE_cn]
best_index = np.argmin(RMSEs_ordered)
best_RMSE = RMSEs_ordered[best_index]
best_improve = [RMSE_rf_improve, RMSE_dn_improve, RMSE_cn_improve][best_index]
best_model = models[best_index]
best_colour = label_colours[best_model]
axes[1].annotate(u'RMSE \u2193\nby {:.0f}%'.format(best_improve), xy=(k/5 - 1/10, 0.98), xycoords='axes fraction', ha='center', va='top', size=10, color=best_colour)
# Add axis ticks & labels, and turn some spines off
axes[1].set_xticks(range(1,6))
axes[1].set_xticklabels(['{}\n[{:,} pixels]'.format(label, len(res['initial']['by_hand'][h])) for label, h in zip(['0 - 2 m','2 - 5 m','5 - 10 m','10 - 20 m','> 20 m'], ['hand_1','hand_2','hand_3','hand_4','hand_5'])])
axes[1].set_ylabel('Residuals before/after correction [m]')
[axes[1].spines[edge].set_visible(False) for edge in ['top','right']]
# Colour background based on graded blues & add a horizontal line for zero error
[axes[1].axvspan(k-0.45, k+0.45, alpha=0.1, facecolor=hand_colours[k-1], edgecolor='none') for k in range(1,6)]
axes[1].axhline(y=0, linestyle='dashed', color='black', linewidth=0.8, alpha=0.3)
axes[1].set_xlim(0.5,5.5)
# Add legend & title
axes[1].set_title('b) By height above nearest drainage (HAND) [68]', x=0, ha='left', color='dimgrey', weight='bold', alpha=0.8)
# Tighten up layout, add a common legend at the top & save figure
fig.tight_layout(pad=0, h_pad=1.8)
fig.subplots_adjust(top=0.93)
fig.legend(handles=bps['boxes'], labels=['Initial','RF','DCN','FCN'], frameon=False, ncol=4, loc='upper center', prop={'size':11})
fig.savefig('{}/residuals_boxplots_by_flha-hand.png'.format(folder_fig), dpi=300, bbox='tight')
plt.close()
###############################################################################
# 7. Check distribution of land cover, elevations & slopes in each dataset #
###############################################################################
# Read in 1D inputs for each dataset (training, validation, testing)
vectors_train = pd.read_csv('{}/Input1D_Train.csv'.format(folder_input_1D))
vectors_dev = pd.read_csv('{}/Input1D_Dev.csv'.format(folder_input_1D))
vectors_test = pd.read_csv('{}/Input1D_Test.csv'.format(folder_input_1D))
# 7a. Generate plot showing distribution of elevations & slope (by SRTM) for each dataset
fig, axes = plt.subplots(nrows=2, figsize=(8, 7))
# Extract elevation values from each dataset
z_train = vectors_train['srtm_z'].values
z_dev = vectors_dev['srtm_z'].values
z_test = vectors_test['srtm_z'].values
# Extract slope values from each dataset
slope_train = vectors_train['srtm_slope'].values
slope_dev = vectors_dev['srtm_slope'].values
slope_test = vectors_test['srtm_slope'].values
# Add elevation data as histograms
axes[0].hist([z_train, z_dev, z_test], color=dataset_colours, label=['Train','Validation','Test'], linewidth=2, alpha=0.5, bins=50, density=True, histtype='step', log=True)
axes[0].set_xlabel('SRTM Elevations [m]')
axes[0].set_ylabel('Relative frequency (log-scale)')
axes[0].set_title('a) Distribution of elevation values, by input dataset', x=0, size=10, ha='left', color='dimgrey', weight='bold', alpha=0.8)
[axes[0].spines[edge].set_visible(False) for edge in ['top','right']]
h0, l0 = axes[0].get_legend_handles_labels()
axes[0].legend(h0[::-1], l0[::-1], frameon=False, loc='upper right')
# Add slope data as histograms
axes[1].hist([slope_train, slope_dev, slope_test], color=dataset_colours, label=['Train','Validation','Test'], linewidth=2, alpha=0.5, bins=50, density=True, histtype='step', log=True)
axes[1].set_xlabel('Slopes - derived from SRTM elevations [%]')
axes[1].set_ylabel('Relative frequency (log-scale)')
axes[1].set_title('b) Distribution of slope values, by input dataset', x=0, size=10, ha='left', color='dimgrey', weight='bold', alpha=0.8)
[axes[1].spines[edge].set_visible(False) for edge in ['top','right']]
h1, l1 = axes[1].get_legend_handles_labels()
axes[1].legend(h1[::-1], l1[::-1], frameon=False, loc='upper right')
# Save figure
fig.tight_layout(h_pad=1.8)
fig.savefig('{}/inputdata_distribution_srtm_z-slope.png'.format(folder_fig), dpi=300)
plt.close()
# 7b. Generate plot showing distribution of land cover groups for each dataset
fig, axes = plt.subplots(nrows=3, sharex=True, figsize=(8,5))
lc_colours = [diff_by_landcover[i]['colour'] for i in range(1,8)]
lc_labels = [diff_by_landcover[i]['label'] for i in range(1,8)]
# Plot 1: Training data
lc_train_counts = [len(vectors_train[vectors_train['lcdb']==i].index) for i in range(1,8)]
lc_train_freq = [count/sum(lc_train_counts) for count in lc_train_counts]
axes[0].bar(x=range(1,8), height=lc_train_freq, color=lc_colours, alpha=0.7)
[axes[0].annotate('{:.1f}%'.format(freq*100), xy=(i, freq), ha='center', va='bottom') for i, freq in zip(range(1,8), lc_train_freq)]
axes[0].annotate('a) Training dataset', xy=(0.02, 0.98), xycoords='axes fraction', ha='left', va='top', fontweight='bold', color='dimgrey', alpha=0.8)
# Plot 2: Validation data
lc_dev_counts = [len(vectors_dev[vectors_dev['lcdb']==i].index) for i in range(1,8)]
lc_dev_freq = [count/sum(lc_dev_counts) for count in lc_dev_counts]
axes[1].bar(x=range(1,8), height=lc_dev_freq, color=lc_colours, alpha=0.7)
[axes[1].annotate('{:.1f}%'.format(freq*100), xy=(i, freq), ha='center', va='bottom') for i, freq in zip(range(1,8), lc_dev_freq)]
axes[1].annotate('b) Validation dataset', xy=(0.02, 0.98), xycoords='axes fraction', ha='left', va='top', fontweight='bold', color='dimgrey', alpha=0.8)
# Plot 3: Testing data
lc_test_counts = [len(vectors_test[vectors_test['lcdb']==i].index) for i in range(1,8)]
lc_test_freq = [count/sum(lc_test_counts) for count in lc_test_counts]
axes[2].bar(x=range(1,8), height=lc_test_freq, color=lc_colours, alpha=0.7)
[axes[2].annotate('{:.1f}%'.format(freq*100), xy=(i, freq), ha='center', va='bottom') for i, freq in zip(range(1,8), lc_test_freq)]
axes[2].annotate('c) Testing dataset', xy=(0.02, 0.98), xycoords='axes fraction', ha='left', va='top', fontweight='bold', color='dimgrey', alpha=0.8)
# Add figure labels, etc
[[[axes[i].spines[edge].set_visible(False)] for edge in ['top','right']] for i in [0,1,2]]
[axes[i].set_xticks(range(1,8)) for i in [0,1,2]]
[axes[i].set_ylabel('Frequency') for i in [0,1,2]]
axes[2].set_xticklabels(lc_labels)
fig.tight_layout()
fig.savefig('{}/inputdata_distribution_lcdb.png'.format(folder_fig), dpi=300)
plt.close()
###############################################################################
# 8. 'Graphical abstract' image for Remote Sensing journal requirements #
###############################################################################
# Instructions from Remote Sensing journal
# - The minimum required size for the GA is 560 × 1100 pixels (height × width)
# - When submitting larger images, please make sure to keep to the same ratio
# - High-quality illustration or diagram (PNG, JPEG, EPS, SVG, PSD or AI)
# - Written text in a GA should be clear and easy to read, using Times/Arial/Courier/Helvetica/Ubuntu/Calibri
# Set figure height & width based on required dimensions
width = 16
height = 560/1100 * width
# Read in MRL18_WPE data for the convolutional network results (ONLY test dataset pixels)
zone = 'MRL18_WPE'
cn_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
cn_cor, cn_elv, cn_res, dtm, srtm, diff, merit, flha, hand = process_2D_predictions(zone, cn_predictions, 'test_clip', no_data=-9999)
# Set up grid for 3D plot
ny, nx = cn_elv.shape
x = range(nx)
y = range(ny)
X, Y = np.meshgrid(x, y)
# Develop consistent colourmap (same as that used previously for this zone)
elv_min, elv_max = test_zones_props[zone]['elv_cbar_range']
elv_cmap = cm.terrain
elv_cmap.set_bad(color='whitesmoke')
elv_norm = colors.Normalize(vmin=elv_min, vmax=elv_max)
# Calculate RMSE for the SRTM & FCN-corrected SRTM (compared to DTM)
RMSE_srtm = np.sqrt(np.mean(np.square(diff)))
RMSE_fcn = np.sqrt(np.mean(np.square(cn_res)))
improve = (RMSE_srtm-RMSE_fcn)/RMSE_srtm * 100
# Define plotting parameters for the 3D visualisation
stride = 1
offset = 50
# Define colours to be used for map inset
colour_land = (215/255, 194/255, 158/255)
colour_ocean = (190/255, 232/255, 255/255)
# 8a. 3D terrain visualisations, with horizontal bar charts summarising change in RMSE
fig = plt.figure(figsize=(width, height))
gs = gridspec.GridSpec(3, 2, width_ratios=[6, 1], height_ratios=[1,1,1])
ax1 = fig.add_subplot(gs[0], projection='3d')
ax2 = fig.add_subplot(gs[2], projection='3d')
ax3 = fig.add_subplot(gs[4], projection='3d')
ax4_hold = fig.add_subplot(gs[1])
ax5_hold = fig.add_subplot(gs[3])
ax6_hold = fig.add_subplot(gs[5])
# Top: DTM
ax1.plot_surface(X, Y, dtm, cmap=elv_cmap, norm=elv_norm, linewidth=0, antialiased=False, rstride=stride, cstride=stride)
# Middle: SRTM
ax2.plot_surface(X, Y, srtm, cmap=elv_cmap, norm=elv_norm, linewidth=0, antialiased=False, rstride=stride, cstride=stride)
# Bottom: FCN-corrected SRTM
ax3.plot_surface(X, Y, cn_elv, cmap=elv_cmap, norm=elv_norm, linewidth=0, antialiased=False, rstride=stride, cstride=stride)
# General properties to be applied to each ax
for ax in [ax1, ax2, ax3]:
ax.set_xlim((0,nx))
ax.set_ylim((0,ny))
ax.set_zlim((0,20))
ax.view_init(65, 70)
ax.set_axis_off()
ax.patch.set_alpha(0)
# Leave the others empty - essentially just placeholders to keep space open for the map & violin plots
for ax in [ax4_hold, ax5_hold, ax6_hold]:
ax.set_axis_off()
fig.tight_layout(pad=0, h_pad=-7, w_pad=-7)
# Add axes for the horizontal bar chart
ax_bars = fig.add_axes([0.81, 0.1, 0.19, 0.58])
ax_bars.barh(y=[0,1], width=[RMSE_fcn, RMSE_srtm], height=0.7, color='firebrick', alpha=0.5)
ax_bars.grid(axis='x', which='major', color='dimgrey', alpha=0.25)
ax_bars.set_xlabel('Root Mean Square Error [m]', size=13)
ax_bars.tick_params(axis='x', which='both', labelsize=11)
ax_bars.set_yticks([0,1])
ax_bars.set_yticklabels(['Corrected\nSRTM','Original\nSRTM'], size=13)
[ax_bars.spines[side].set_visible(False) for side in ['top','right','bottom']]
ax_bars.set_title('Impact of correcting SRTM\nusing model predictions', size=14)
ax_bars.annotate('Applying corrections\npredicted by model\nreduces RMSE from\n{:.2f}m to {:.2}m\n(-{:.1f}%)'.format(RMSE_srtm, RMSE_fcn, improve), xy=(1.05, 0.35), xycoords='data', ha='left', va='top', color='firebrick', size=12)
ax_bars.annotate('', xy=(-0.6, 0.23), xycoords='axes fraction', xytext=(-0.35, 0.23), arrowprops=dict(width=3, headwidth=3, headlength=1, linestyle='--', fc='dimgrey', ec='none', alpha=0.5), ha='left', va='center')
ax_bars.annotate('', xy=(-0.6, 0.77), xycoords='axes fraction', xytext=(-0.3, 0.77), arrowprops=dict(width=3, headwidth=3, headlength=1, linestyle='--', fc='dimgrey', ec='none', alpha=0.5), ha='left', va='center')
# Add axes for the inset map
ax_map = fig.add_axes([0.91, 0.79, 0.09, 0.2])
m = Basemap(projection='merc', resolution='h', llcrnrlat=-48.1, urcrnrlat=-33.6, llcrnrlon=164.9, urcrnrlon=179.9, ax=ax_map)
m.drawcoastlines(linewidth=0.1, color='none')
m.drawmapboundary(fill_color=colour_ocean, color='none')
m.fillcontinents(color=colour_land, lake_color=colour_land)
# Map coordinates for test zone to (x, y) for plotting
zone_x, zone_y = m(174.0, -41.4743)
ax_map.plot(zone_x, zone_y, marker='o', markeredgecolor='black', markeredgewidth=1, markerfacecolor='white', markersize=5)
plt.annotate('Test zone (data unseen during model training)', xy=(0.91, 0.985), xycoords='figure fraction', ha='right', va='top', size=14)
plt.annotate('Wairau Plains East, Marlborough Region\nAotearoa New Zealand', xy=(0.91, 0.95), xycoords='figure fraction', ha='right', va='top', size=13, color='dimgrey')
# Add general explanatory labels
plt.annotate('We trained a fully-convolutional neural network to convert a Digital Surface Model (DSM)\nto a Digital Terrain Model (DTM), by predicting vertical biases due to vegetation and\nbuilt-up areas, using two DSMs, multi-spectral imagery (Landsat-7 & -8),\nnight-time light, and maps of forest cover & canopy height\n(all free & global datasets).', xy=(0.01, 0.985), xycoords='figure fraction', ha='left', va='top', size=14)
plt.annotate('a) DTM (LiDAR)\nReference data', xy=(0.01, 0.72), xycoords='figure fraction', ha='left', va='bottom', size=14)
plt.annotate('LiDAR-derived DTM, resampled from\n1m resolution to match SRTM grid (~23m),\nwith river channel clearly visible.', xy=(0.01, 0.63), xycoords='figure fraction', ha='left', va='bottom', size=14, color='dimgrey')
plt.annotate('', xy=(0.235, 0.732), xycoords='figure fraction', xytext=(0.107, 0.732), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
plt.annotate('b) DSM (SRTM)\nBefore correction', xy=(0.01, 0.461), xycoords='figure fraction', ha='left', va='bottom', size=14)
plt.annotate("Original SRTM DSM, showing large\nvertical biases along river channel and\nrandom 'speckle' noise spread across the\nfloodplain.", xy=(0.01, 0.345), xycoords='figure fraction', ha='left', va='bottom', size=14, color='dimgrey')
plt.annotate('', xy=(0.205, 0.475), xycoords='figure fraction', xytext=(0.12, 0.475), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
plt.annotate('c) DSM (SRTM)\nAfter correction', xy=(0.01, 0.17), xycoords='figure fraction', ha='left', va='bottom', size=14)
plt.annotate('SRTM after applying corrections\npredicted by the fully-convolutional\nneural network, with a {:.0f}% reduction\nin RMSE and river channels better resolved.'.format(improve), xy=(0.01, 0.05), xycoords='figure fraction', ha='left', va='bottom', size=14, color='dimgrey')
plt.annotate('', xy=(0.2, 0.182), xycoords='figure fraction', xytext=(0.11, 0.182), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
# Save figure
fig.savefig('{}/graphical_abstract_barh.png'.format(folder_fig), dpi=300)
plt.close()
# 8b. 3D terrain visualisations, with horizontal boxplots summarising change in residuals
fig = plt.figure(figsize=(width, height))
gs = gridspec.GridSpec(3, 2, width_ratios=[6, 1], height_ratios=[1,1,1])
ax1 = fig.add_subplot(gs[0], projection='3d')
ax2 = fig.add_subplot(gs[2], projection='3d')
ax3 = fig.add_subplot(gs[4], projection='3d')
ax4_hold = fig.add_subplot(gs[1])
ax5_hold = fig.add_subplot(gs[3])
ax6_hold = fig.add_subplot(gs[5])
# Top: DTM
ax1.plot_surface(X, Y, dtm, cmap=elv_cmap, norm=elv_norm, linewidth=0, antialiased=False, rstride=stride, cstride=stride)
# Middle: SRTM
ax2.plot_surface(X, Y, srtm, cmap=elv_cmap, norm=elv_norm, linewidth=0, antialiased=False, rstride=stride, cstride=stride)
# Bottom: FCN-corrected SRTM
ax3.plot_surface(X, Y, cn_elv, cmap=elv_cmap, norm=elv_norm, linewidth=0, antialiased=False, rstride=stride, cstride=stride)
# General properties to be applied to each ax
for ax in [ax1, ax2, ax3]:
ax.set_xlim((0,nx))
ax.set_ylim((0,ny))
ax.set_zlim((0,20))
ax.view_init(65, 70)
ax.set_axis_off()
ax.patch.set_alpha(0)
# Leave the others empty - essentially just placeholders to keep space open for the map & violin plots
for ax in [ax4_hold, ax5_hold, ax6_hold]:
ax.set_axis_off()
fig.tight_layout(pad=0, h_pad=-7, w_pad=-7)
# Add axes for the boxplots
ax_box = fig.add_axes([0.77, 0.1, 0.22, 0.58])
ax_box.boxplot(x=[cn_res.flatten(), diff.flatten()], positions=[0,1.1], boxprops={'linewidth':1.5}, whiskerprops={'linewidth':1.5}, medianprops={'color':'white', 'linewidth':2}, vert=False, showfliers=False)
# Add jittered points to plot
diff_jitter = np.random.normal(loc=1.1, scale=0.08, size=diff.size)
ax_box.scatter(diff.flatten(), diff_jitter, color='steelblue', s=8, alpha=0.15)
cn_res_jitter = np.random.normal(loc=0.0, scale=0.08, size=cn_res.size)
ax_box.scatter(cn_res.flatten(), cn_res_jitter, color='steelblue', s=8, alpha=0.15)
ax_box.set_yticks([])
[ax_box.spines[side].set_visible(False) for side in ['left','top','right']]
ax_box.set_title('Change in residuals\nafter applying corrections', size=14)
ax_box.grid(axis='x', which='major', color='dimgrey', alpha=0.25)
ax_box.set_xlabel('Residuals [m]\n(compared to reference DTM)', size=13)
ax_box.tick_params(axis='x', which='both', labelsize=11)
ax_box.patch.set_alpha(0)
ax_box.annotate('', xy=(0, 0.23), xycoords='axes fraction', xytext=(-0.33, 0.23), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
ax_box.annotate('', xy=(0, 0.76), xycoords='axes fraction', xytext=(-0.33, 0.76), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
ax_box.annotate('Applying corrections predicted\nby model reduces RMSE from\n{:.2f}m to {:.2}m (-{:.1f}%)'.format(RMSE_srtm, RMSE_fcn, improve), xy=(-2, 0.66), xycoords='data', ha='left', va='top', color='dimgrey', size=13)
# Add axes for the inset map
ax_map = fig.add_axes([0.91, 0.79, 0.09, 0.2])
m = Basemap(projection='merc', resolution='h', llcrnrlat=-48.1, urcrnrlat=-33.6, llcrnrlon=164.9, urcrnrlon=179.9, ax=ax_map)
m.drawcoastlines(linewidth=0.1, color='none')
m.drawmapboundary(fill_color=colour_ocean, color='none')
m.fillcontinents(color=colour_land, lake_color=colour_land)
# Map coordinates for test zone to (x, y) for plotting
zone_x, zone_y = m(174.0, -41.4743)
ax_map.plot(zone_x, zone_y, marker='o', markeredgecolor='black', markeredgewidth=1, markerfacecolor='white', markersize=5)
plt.annotate('Test zone (data unseen during model training)', xy=(0.91, 0.985), xycoords='figure fraction', ha='right', va='top', size=14)
plt.annotate('Wairau Plains East, Marlborough Region\nAotearoa New Zealand', xy=(0.91, 0.95), xycoords='figure fraction', ha='right', va='top', size=13, color='dimgrey')
# Add general explanatory labels
plt.annotate('We trained a fully-convolutional neural network to convert a Digital Surface Model (DSM)\nto a Digital Terrain Model (DTM), by predicting vertical biases due to vegetation and\nbuilt-up areas, using two DSMs, multi-spectral imagery (Landsat-7 & -8),\nnight-time light, and maps of forest cover & canopy height\n(all free & global datasets).', xy=(0.01, 0.985), xycoords='figure fraction', ha='left', va='top', size=14)
plt.annotate('a) DTM (LiDAR)\nReference data', xy=(0.01, 0.72), xycoords='figure fraction', ha='left', va='bottom', size=14)
plt.annotate('LiDAR-derived DTM, resampled from\n1m resolution to match SRTM grid (~23m),\nwith river channel clearly visible.', xy=(0.01, 0.63), xycoords='figure fraction', ha='left', va='bottom', size=14, color='dimgrey')
plt.annotate('', xy=(0.235, 0.732), xycoords='figure fraction', xytext=(0.107, 0.732), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
plt.annotate('b) DSM (SRTM)\nBefore correction', xy=(0.01, 0.461), xycoords='figure fraction', ha='left', va='bottom', size=14)
plt.annotate("Original SRTM DSM, showing large\nvertical biases along river channel and\nrandom 'speckle' noise spread across the\nfloodplain.", xy=(0.01, 0.345), xycoords='figure fraction', ha='left', va='bottom', size=14, color='dimgrey')
plt.annotate('', xy=(0.205, 0.475), xycoords='figure fraction', xytext=(0.12, 0.475), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
plt.annotate('c) DSM (SRTM)\nAfter correction', xy=(0.01, 0.17), xycoords='figure fraction', ha='left', va='bottom', size=14)
plt.annotate('SRTM after applying corrections\npredicted by the fully-convolutional\nneural network, with a {:.0f}% reduction\nin RMSE and river channels better resolved.'.format(improve), xy=(0.01, 0.05), xycoords='figure fraction', ha='left', va='bottom', size=14, color='dimgrey')
plt.annotate('', xy=(0.2, 0.182), xycoords='figure fraction', xytext=(0.11, 0.182), arrowprops=dict(fc='black', ec='none', width=1.5, headwidth=1.5, headlength=1, alpha=0.5), ha='left', va='center')
# Save figure
fig.savefig('{}/graphical_abstract_boxplots.png'.format(folder_fig), dpi=300)
plt.close()
###############################################################################
# 9. Extract cross-sections as part of results illustration #
###############################################################################
# Define a function that takes a filepath (to a corrected-top GeoTIFF) & two lon-lat coordinate pairs, and returns the elevation profile between those two points
def get_cross_section(tif_path, point_a, point_b, n_points):
# Read the raster into memory, extracting its geotransform & the inverse
ds = gdal.Open(tif_path, gdalconst.GA_ReadOnly)
array = ds.GetRasterBand(1).ReadAsArray()
gt = ds.GetGeoTransform() # maps raster grid to lon-lat coordinates
gt_inv = gdal.InvGeoTransform(gt) # maps lon-lat coordinates to raster grid
ds = None
# Calculate x & y steps between the points (in degrees)
dx = (point_b[0] - point_a[0]) / n_points
dy = (point_b[1] - point_a[1]) / n_points
# Initialise a list to hold elevations extracted from the topography raster
elevations = []
# Loop through all steps along the line between the two points, getting the underlying raster value
for i in range(n_points):
# Get the step coordinates in terms of lon-lat
step_coords = (point_a[0] + i*dx, point_a[1] + i*dy)
# Convert these coordinates to array indices
array_x = int(gt_inv[0] + gt_inv[1]*step_coords[0] + gt_inv[2]*step_coords[1])
array_y = int(gt_inv[3] + gt_inv[4]*step_coords[0] + gt_inv[5]*step_coords[1])
elevation = array[array_y, array_x]
elevations.append(elevation)
return elevations
# Define a dictionary describing the cross-sections of interest, for each test zone
cs_dict = {'MRL18_WPE':{'A':{'start':(173.98786, -41.46714), 'end':(173.97545, -41.47835)},
'B':{'start':(174.00127, -41.47945), 'end':(173.99717, -41.48526)}},
'MRL18_WVL':{'C':{'start':(173.31079, -41.61877), 'end':(173.31722, -41.63086)},
'D':{'start':(173.36445, -41.60312), 'end':(173.36751, -41.61092)}},
'TSM16_ATG':{'E':{'start':(172.79475, -40.85864), 'end':(172.79853, -40.85309)},
'F':{'start':(172.79357, -40.82249), 'end':(172.79936, -40.82627)}}}
# Set up a figure to show two cross-sections for each test zone
fig, axes = plt.subplots(nrows=6, sharex=True, figsize=(4.4, 9.25))
# Define the number of points to extract for each section
n_points = 1000
# Loop through each cross-section in turn
for zone in test_zones:
# Define a dictionary describing the topography datasets available
dem_dict = {'srtm':{'label':'SRTM', 'colour':'black', 'lw':0.8, 'path':'{}/{}/SRTM_{}_Z.tif'.format(folder_srtm, zone, zone)},
'dtm':{'label':'LiDAR DTM', 'colour':'red', 'lw':0.9, 'path':'{}/{}/DTM_{}_30m_Median.tif'.format(folder_dtm, zone, zone)},
'rf':{'label':'RF', 'colour':label_colours['rf'], 'lw':1.2, 'path':'{}/TIF/rf_elevations_{}_test.tif'.format(folder_results_rf, zone)},
'dcn':{'label':'DCN', 'colour':label_colours['dcn'], 'lw':1.2, 'path':'{}/TIF/densenet_elevations_{}_test.tif'.format(folder_results_densenet, zone)},
'fcn':{'label':'FCN', 'colour':label_colours['fcn'], 'lw':1.2, 'path':'{}/TIF/convnet_elevations_{}_test.tif'.format(folder_results_convnet, zone)}}
# Loop through the two cross-sections for each zone
for i, cs in enumerate(['A','B','C','D','E','F']):
if cs in cs_dict[zone].keys():
# Get the start & end points of that cross-section
cs_start = cs_dict[zone][cs]['start']
cs_end = cs_dict[zone][cs]['end']
# Loop through the DEMs available, adding its cross-section to the figure
for dem in ['srtm','dtm','rf','dcn','fcn']:
dem_path = dem_dict[dem]['path']
dem_label = dem_dict[dem]['label']
dem_colour = dem_dict[dem]['colour']
dem_lw = dem_dict[dem]['lw']
# Extract cross-section
cs_elevs = get_cross_section(dem_path, cs_start, cs_end, n_points)
# Add profile to appropriate axes
axes[i].plot(cs_elevs, label=dem_label, color=dem_colour, linewidth=dem_lw)
# Force a 5m interval in the y-axis ticks used
yticks = axes[i].get_yticks()
axes[i].yaxis.set_ticks(np.arange(yticks[1], yticks[-1], 5))
# Turn off all spines, the x-axis & the y-axis tick marks
axes[i].xaxis.set_visible(False)
[axes[i].spines[edge].set_visible(False) for edge in ['top','right','bottom','left']]
axes[i].yaxis.set_tick_params(length=0)
axes[i].tick_params(axis='y', labelsize=7)
# Add faded y-axis grid lines & y-axis label
axes[i].grid(axis='y', which='major', color='dimgrey', alpha=0.1)
axes[i].set_ylabel('Elevation [m]', fontsize=8)
# Add annotations indicating section name/code
axes[i].annotate(cs, xy=(0.05, 0.0), xycoords='axes fraction', ha='left', va='bottom', fontweight='bold', alpha=0.8)
axes[i].annotate("{} '".format(cs), xy=(0.95, 0.0), xycoords='axes fraction', ha='right', va='bottom', fontweight='bold', alpha=0.8)
# Tighten layout & make space for the legend
fig.tight_layout(h_pad=1, w_pad=0)
plt.subplots_adjust(top=0.97)
# Add overall legend & align labels
legend_handles, legend_labels = axes[i-1].get_legend_handles_labels()
fig.legend(legend_handles, legend_labels, frameon=False, loc='upper center', ncol=5, columnspacing=1.5, handletextpad=0.3, prop={'size':8})
fig.align_labels()
# Save figure
fig.savefig('{}/results_sections.png'.format(folder_fig), dpi=300, bbox_inches='tight')
plt.close()
###############################################################################
# 10. Assess RMSE for SRTM 1-20m zone, following Kulp & Strauss (CoastalDEM) #
###############################################################################
# Loop through each test zone
for zone in test_zones:
# Import Random Forest arrays - ONLY test dataset pixels
rf_predictions = np.load('{}/RF_Predictions_ByZone_{}.npy'.format(folder_results_rf, zone))
rf_cor, rf_elv, rf_res, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'test_clip', no_data=-9999)
# Import Densenet arrays - ONLY test dataset pixels
dn_predictions = np.load('{}/densenet_Predictions_ByZone_{}.npy'.format(folder_results_densenet, zone))
dn_cor, dn_elv, dn_res, _,_,_,_,_,_ = process_1D_predictions(zone, dn_predictions, 'test_clip', no_data=-9999)
# Import Convnet arrays - ONLY test dataset pixels
cn_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
cn_cor, cn_elv, cn_res, dtm, srtm, diff, merit, flha, hand = process_2D_predictions(zone, cn_predictions, 'test_clip', no_data=-9999)
# Mask all arrays wherever no_data values are present
srtm_m = np.ma.masked_equal(srtm, no_data)
dtm_m = np.ma.masked_equal(dtm, no_data)
diff_m = np.ma.masked_equal(diff, no_data)
rf_elv_m = np.ma.masked_equal(rf_elv, no_data)
dn_elv_m = np.ma.masked_equal(dn_elv, no_data)
cn_elv_m = np.ma.masked_equal(cn_elv, no_data)
# Process only if that zone's SRTM array contains elevations between 1-20 m
if np.any(srtm[(srtm_m >= 1) & (srtm_m <= 20)]):
# Filter above arrays to only include values for which SRTM is between 1 and 20 m
srtm_m_f = srtm_m[np.where((srtm_m >= 1) & (srtm_m <= 20))]
dtm_m_f = dtm_m[np.where((srtm_m >= 1) & (srtm_m <= 20))]
diff__f = diff_m[np.where((srtm_m >= 1) & (srtm_m <= 20))]
rf_elv_m_f = rf_elv_m[np.where((srtm_m >= 1) & (srtm_m <= 20))]
dn_elv_m_f = dn_elv_m[np.where((srtm_m >= 1) & (srtm_m <= 20))]
cn_elv_m_f = cn_elv_m[np.where((srtm_m >= 1) & (srtm_m <= 20))]
# Get all RMSE values, for areas where SRTM is between 1 and 20m
RMSE_initial = np.sqrt(np.mean((srtm_m_f - dtm_m_f)**2))
RMSE_rf = np.sqrt(np.mean((rf_elv_m_f - dtm_m_f)**2))
RMSE_dn = np.sqrt(np.mean((dn_elv_m_f - dtm_m_f)**2))
RMSE_cn = np.sqrt(np.mean((cn_elv_m_f - dtm_m_f)**2))
# Calculate improvements
improv_rf = (RMSE_rf - RMSE_initial)/RMSE_initial * 100.
improv_dn = (RMSE_dn - RMSE_initial)/RMSE_initial * 100.
improv_cn = (RMSE_cn - RMSE_initial)/RMSE_initial * 100.
# Print results
print(zone)
print('RMSE_initial:', RMSE_initial)
print('RMSE_rf:', RMSE_rf)
print('RMSE_dn:', RMSE_dn)
print('RMSE_cn:', RMSE_cn)
print('improv_rf:', improv_rf)
print('improv_dn:', improv_dn)
print('improv_cn:', improv_cn)
# Only two test zones have elevations within the 1-20m range (MRL18_WPE & TSM16_ATG)
# Results were found to be very similar to the overall results already presented
# MRL18_WPE
# - RMSE_initial: 2.8066373
# - RMSE_rf: 1.1405739737630056
# - RMSE_dn: 0.9603291958237794
# - RMSE_cn: 0.6547264
# - improv_rf: -59.36154703747597
# - improv_dn: -65.78363722936197
# - improv_cn: -76.67220830917358
# TSM16_ATG
# - RMSE_initial: 4.651809
# - RMSE_rf: 2.0713059254273296
# - RMSE_dn: 2.218630549176275
# - RMSE_cn: 1.5238178
# - improv_rf: -55.47311101011337
# - improv_dn: -52.30607175887735
# - improv_cn: -67.24247336387634
###############################################################################
# 11. Assess test zone RMSEs compared with MERIT DEM (Yamazaki et al. 2017) #
###############################################################################
# Initalise dictionary to hold test residuals for each available DSM (compared to the DTM)
res = {'srtm':[], 'merit':[], 'rf':[], 'dn':[], 'cn':[]}
# Loop through each test zone, appending the residuals for each DSM source to the relevant list
for zone in test_zones:
# Import Random Forest arrays - ONLY test dataset pixels
rf_predictions = np.load('{}/RF_Predictions_ByZone_{}.npy'.format(folder_results_rf, zone))
rf_cor, rf_elv, rf_res, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'test_clip', no_data=-9999)
# Import Densenet arrays - ONLY test dataset pixels
dn_predictions = np.load('{}/densenet_Predictions_ByZone_{}.npy'.format(folder_results_densenet, zone))
dn_cor, dn_elv, dn_res, _,_,_,_,_,_ = process_1D_predictions(zone, dn_predictions, 'test_clip', no_data=-9999)
# Import Convnet arrays - ONLY test dataset pixels
cn_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
cn_cor, cn_elv, cn_res, dtm, srtm, diff, merit, flha, hand = process_2D_predictions(zone, cn_predictions, 'test_clip', no_data=-9999)
# Get list of residuals for that zone & model
res_srtm = (srtm - dtm).flatten().tolist()
res_merit = (merit - dtm).flatten().tolist()
res_rf = (rf_elv - dtm).flatten().tolist()
res_dn = (dn_elv - dtm).flatten().tolist()
res_cn = (cn_elv - dtm).flatten().tolist()
# Filter out any None or nan values
res_srtm = [r for r in res_srtm if (not np.isnan(r) and r != None)]
res_merit = [r for r in res_merit if (not np.isnan(r) and r != None)]
res_rf = [r for r in res_rf if (not np.isnan(r) and r != None)]
res_dn = [r for r in res_dn if (not np.isnan(r) and r != None)]
res_cn = [r for r in res_cn if (not np.isnan(r) and r != None)]
# Update dictionary of all test residuals
res['srtm'] = np.append(res['srtm'], res_srtm)
res['merit'] = np.append(res['merit'], res_merit)
res['rf'] = np.append(res['rf'], res_rf)
res['dn'] = np.append(res['dn'], res_dn)
res['cn'] = np.append(res['cn'], res_cn)
# Check that arrays are showing up as expected
fig, axes = plt.subplots(ncols=2, figsize=(9,6))
axes[0].imshow(merit, vmin=0, vmax=15)
axes[1].imshow(dtm, vmin=0, vmax=15)
# Calculate overall RMSE for each available DSM
RMSE_srtm = np.sqrt(np.nanmean((res['srtm'])**2))
RMSE_merit = np.sqrt(np.nanmean((res['merit'])**2))
RMSE_rf = np.sqrt(np.nanmean((res['rf'])**2))
RMSE_dn = np.sqrt(np.nanmean((res['dn'])**2))
RMSE_cn = np.sqrt(np.nanmean((res['cn'])**2))
# RMSE: Generate summary plots for the test dataset results
fig, axes = plt.subplots(figsize=(9,4.5))
axes.bar(range(5), [RMSE_srtm, RMSE_merit, RMSE_rf, RMSE_dn, RMSE_cn], color=dataset_colours[2], alpha=0.5)
axes.set_xticks(range(5))
axes.yaxis.set_tick_params(length=0)
axes.set_xticklabels(['SRTM','MERIT DEM\n[27]','RF\ncorrection','DCN\ncorrection','FCN\ncorrection'])
axes.set_ylabel('Root Mean Square Error [m]')
axes.grid(axis='y', which='major', color='dimgrey', alpha=0.1)
[axes.spines[edge].set_visible(False) for edge in ['left','top','right']]
# Add a horizontal line showing the initial error & a label
axes.axhline(y=RMSE_srtm, color=dataset_colours[2], linestyle='dashed', alpha=0.5)
axes.annotate('{:.3f}m'.format(RMSE_srtm), xy=(0, RMSE_srtm), xytext=(0, -5), textcoords='offset points', ha='center', va='top')
# Add labels indicating improvement achieved by each method
for j, RMSE_new in enumerate([RMSE_merit, RMSE_rf, RMSE_dn, RMSE_cn]):
# Add downward arrow from initial RMSE to improved RMSE
axes.annotate('', xy=(j+1, RMSE_new), xytext=(j+1, RMSE_srtm), arrowprops=dict(arrowstyle='->'))
# Add label indicating new RMSE and the percentage improvement it equates to
improvement_percentage = (RMSE_new-RMSE_srtm)/RMSE_srtm * 100.
axes.annotate('{:.3f}m\n({:.1f}%)'.format(RMSE_new, improvement_percentage), xy=(j+1, RMSE_new), xytext=(0, -5), textcoords='offset points', ha='center', va='top')
axes.set_title('Performance on test dataset')
fig.tight_layout()
fig.savefig('{}/results_RMSE_MERIT.png'.format(folder_fig), dpi=300)
plt.close()
# Figure showing topo maps for each test zone (DTM, MERIT & FCN-SRTM)
fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(9,5.83))
# Loop through each zone, adding three topography maps for each
for i, zone in enumerate(test_zones):
# Import Random Forest arrays - ONLY test dataset pixels
rf_predictions = np.load('{}/RF_Predictions_ByZone_{}.npy'.format(folder_results_rf, zone))
rf_cor, rf_elv, rf_res, _,_,_,_,_,_ = process_1D_predictions(zone, rf_predictions, 'test_clip', no_data=-9999)
# Import Densenet arrays - ONLY test dataset pixels
dn_predictions = np.load('{}/densenet_Predictions_ByZone_{}.npy'.format(folder_results_densenet, zone))
dn_cor, dn_elv, dn_res, _,_,_,_,_,_ = process_1D_predictions(zone, dn_predictions, 'test_clip', no_data=-9999)
# Import Convnet arrays - ONLY test dataset pixels
cn_predictions = np.load('{}/predictions/convnet_ensemble_{}_prediction_intact.npy'.format(folder_results_convnet, zone))
cn_cor, cn_elv, cn_res, dtm, srtm, diff, merit, flha, hand = process_2D_predictions(zone, cn_predictions, 'test_clip', no_data=-9999)
# Calculate RMSE for SRTM, MERIT & FCN DSMs
RMSE_srtm = np.sqrt(np.nanmean((srtm - dtm)**2))
RMSE_merit = np.sqrt(np.nanmean((merit - dtm)**2))
RMSE_cn = np.sqrt(np.nanmean((cn_elv - dtm)**2))
# Calculate improvement (over SRTM) for MERIT & FCN DSMs
improve_merit = (RMSE_merit - RMSE_srtm)/RMSE_srtm * 100.
improve_cn = (RMSE_cn - RMSE_srtm)/RMSE_srtm * 100.
# For the 'TSM16_ATG' zone, rotate arrays for easier plotting
if zone == 'TSM16_ATG':
dtm, merit, cn_elv = [np.rot90(raster, axes=(1,0)) for raster in [dtm, merit, cn_elv]]
# Get elevation range & build appropriate colourmap
elv_min, elv_max = test_zones_props[zone]['elv_cbar_range']
elv_cmap = cm.terrain
elv_cmap.set_bad(color='whitesmoke')
elv_norm = colors.Normalize(vmin=elv_min, vmax=elv_max)
# Column 1: DTM
axes[0,i].imshow(dtm, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[0,i].axis('off')
axes[0,i].set_anchor('N')
# Column 2: MERIT
axes[1,i].imshow(merit, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[1,i].axis('off')
axes[1,i].set_anchor('N')
axes[1,i].annotate('RMSE={:.3f}m ({:.1f}%)'.format(RMSE_merit, improve_merit), xy=(0.02,0.98), xycoords='axes fraction', ha='left', va='top', size=9)
# Column 3: FCN
axes[2,i].imshow(cn_elv, aspect='equal', cmap=elv_cmap, norm=elv_norm)
axes[2,i].axis('off')
axes[2,i].set_anchor('N')
axes[2,i].annotate('RMSE={:.3f}m ({:.1f}%)'.format(RMSE_cn, improve_cn), xy=(0.02,0.98), xycoords='axes fraction', ha='left', va='top', size=9)
# Add a simple scale bar, assuming that each grid cell is approx. 23m (SRTM at this latitude)
ncells_1km = 1000/23
offset = 8
axes[0,i].plot([offset, offset + ncells_1km], [offset, offset], color='black', linewidth=0.8)
axes[0,i].plot([offset, offset], [offset-1, offset+1], color='black', linewidth=0.8)
axes[0,i].plot([offset + ncells_1km, offset + ncells_1km], [offset-1, offset+1], color='black', linewidth=0.8)
axes[0,i].annotate('1km', xy=(offset + 0.5*ncells_1km, 1.5*offset), ha='center', va='top', size=9)
# Tighten layout & make space for labels
fig.tight_layout(pad=0.5, w_pad=0.5, h_pad=0.5)
fig.subplots_adjust(left=0.03, top=0.96)
# Add annotations for the zone names
for i, zone in enumerate(test_zones):
zone_label = test_zones_props[zone]['label']
axes[0,i].annotate(zone_label, xy=([0.19,0.51,0.83][i], 0.99), xycoords='figure fraction', ha='center', va='top', weight='bold', color='dimgrey')
# Add annotations for the DSM names
for j, DSM in enumerate(['LiDAR (resampled)','MERIT (resampled)','FCN-corrected']):
axes[j,0].annotate(DSM, xy=(0.015, [0.955,0.64,0.3][j]), xycoords='figure fraction', ha='center', va='top', rotation=90, weight='bold', color='dimgrey')
# Save figure
fig.savefig('{}/results_elv_MERIT.png'.format(folder_fig), dpi=300)
plt.close() | StarcoderdataPython |
144460 | <reponame>Karl-Krauth/recommender-RCT<gh_stars>1-10
from __future__ import annotations
import secrets
import time
import typing
import jwt
import werkzeug.security
from . import base
SECRET_KEY = secrets.token_hex(128)
class User(base.db.Model):
__tablename__ = 'users'
id = base.db.Column(base.db.Integer, primary_key=True)
username = base.db.Column(base.db.String(32), index=True, unique=True)
password_hash = base.db.Column(base.db.String(128))
def verify_password(self, password: str) -> bool:
return werkzeug.security.check_password_hash(self.password_hash, password)
def generate_auth_token(self, expires_in: int=600) -> str:
return jwt.encode(payload={'id': self.id, 'exp': time.time() + expires_in},
key=SECRET_KEY,
algorithm='HS256')
@staticmethod
def verify_auth_token(token: str) -> typing.Optional[User]:
try:
data = jwt.decode(token,
key=SECRET_KEY,
algorithms=['HS256'])
except:
return None
return User.query.get(data['id'])
@staticmethod
def get_user(username: str) -> typing.Optional[User]:
return User.query.filter_by(username=username).one_or_none()
@staticmethod
def add_user(username: str, password: str) -> typing.Optional[User]:
if User.get_user(username=username) is not None:
return None
password_hash = werkzeug.security.generate_password_hash(password)
user = User(username=username, password_hash=password_hash)
base.db.session.add(user)
base.db.session.commit()
return user
| StarcoderdataPython |
1693709 | <reponame>bergran/pokemon_project_example<filename>core/signals/login_signal.py
# -*- coding: utf-8 -*-
from django.core.cache import cache
from django.conf import settings
def get_key_user(username):
return 'user_attempts:{}'.format(username)
def login_success(sender, user, request, **kwargs):
cache.set(get_key_user(user.username), 0, 0)
def login_fail(sender, credentials, request, **kwargs):
from django.contrib.auth.models import User
username = credentials.get('username')
user_prefix = get_key_user(username)
attempts = cache.get(user_prefix, 0) + 1
cache.set(
get_key_user(user_prefix), attempts, 300
)
if attempts > settings.LOGIN_ATTEMPTS:
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return
user.is_active = False
| StarcoderdataPython |
1733647 | <filename>gym-trivial/gym_trivial/envs/trivial_env.py
from gym import spaces
from gym.utils import seeding
import gym
import numpy as np
class Trivial (gym.Env):
metadata = {"render.modes": ["human"]}
reward_range = (-100.0, 100.0)
def __init__ (self):
self.reset()
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple((spaces.Discrete(2), spaces.Discrete(2)))
def reset (self):
"""
Reset the state of the environment and returns an initial observation.
Returns
-------
observation (object): the initial observation of the space.
"""
self.seed()
self.state = [0, 0]
self.reward = -100.0
self.done = 0
self.info = {}
return self.state
def step (self, action):
"""
The agent takes a step in the environment.
Parameters
----------
action : int
Returns
-------
observation, reward, done, info : tuple
observation (object) :
an environment-specific object representing your observation of
the environment.
reward (float) :
amount of reward achieved by the previous action. The scale
varies between environments, but the goal is always to increase
your total reward.
done (bool) :
whether it's time to reset the environment again. Most (but not
all) tasks are divided up into well-defined episodes, and done
being True indicates the episode has terminated. (For example,
perhaps the pole tipped too far, or you lost your last life.)
info (dict) :
diagnostic information useful for debugging. It can sometimes
be useful for learning (for example, it might contain the raw
probabilities behind the environment's last state change).
However, official evaluations of your agent are not allowed to
use this for learning.
"""
if self.done == 1:
print("game over")
return [self.state, self.reward, self.done, self.info]
else:
pos = action % len(self.state)
if self.state[pos] != 0:
print("invalid step")
return [self.state, self.reward, self.done, self.info]
else:
self.state[pos] = 1
self.reward = 0.0
self.render()
if self._check():
self.reward = 100.0
self.done = 1;
print("win")
return [self.state, self.reward, self.done, self.info]
def render (self, mode="human"):
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
"""
print(self.state)
def close (self):
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
pass
def seed (self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _check (self):
if self.state[0] == 1 and self.state[1] == 1:
return True
else:
return False
| StarcoderdataPython |
3265637 | <reponame>MarcinDadura/Shamboogeon<filename>classes/monster.py<gh_stars>1-10
from classes.player import Player
import pygame
from pygame.sprite import Sprite
from classes.game_object import GameObject
class Monster(GameObject):
"""Monster abstract obj"""
sprite = None
sound = None
horizontal_direction = 0
vertical_direction = 0
def __init__(self, x: int, y: int, speed: int ,sound_path: str, sprite_path: str, obj_name: str, max_s: int, size: int = 1, move: bool = True):
# Load sprite only once
self.speed = speed
self.count = 0
self.name = obj_name
self.max = max_s
self.move = move
self.sprite = pygame.image.load(sprite_path.format(self.count)).convert_alpha()
Monster.sound = pygame.mixer.Sound(sound_path)
self.sound = Monster.sound
super().__init__(x, y, 16 * size, 16 * size, self.sprite, 'monster')
def update(self, time_delta, objects):
if not self.move:
return
old_x = self.get_x()
old_y = self.get_y()
if self.count % 20 == 0:
self.set_sprite(pygame.image.load('img/{}_{}.png'.format(self.name, self.count // 20)).convert_alpha())
player = Player.get_instance()
p_x = player.get_x()
p_y = player.get_y()
self.horizontal_direction = 0
self.vertical_direction = 0
if p_x > self.get_x():
self.horizontal_direction = 1
elif p_x < self.get_x():
self.horizontal_direction = -1
if p_y > self.get_y():
self.vertical_direction = 1
elif p_y < self.get_y():
self.vertical_direction = -1
self.count = self.count + 1 if self.count < self.max*20 else 0
self.set_x(self._x + self.speed * (time_delta / 1000) * self.horizontal_direction)
for obj in pygame.sprite.spritecollide(self, objects, dokill=False):
if obj.type == 'wall' or obj.type == 'trellis':
self.horizontal_direction = -self.horizontal_direction
self.set_x(old_x)
break
#self.set_x(self._x + self.speed * (time_delta / 1000) * self.horizontal_direction)
self.set_y(self._y + self.speed * (time_delta / 1000) * self.vertical_direction)
for obj in pygame.sprite.spritecollide(self, objects, dokill=False):
if obj.type == 'wall' or obj.type == 'trellis':
self.vertical_direction = -self.vertical_direction
self.set_y(old_y)
break
#self.set_y(self._y + self.speed * (time_delta / 1000) * self.vertical_direction)
| StarcoderdataPython |
3300835 | <gh_stars>0
#sort lists
# file name: romeo.txt
filename=input('Enter the file name:')
file=open(filename)
lista=list()
final=list()
for line in file:
lista=line.split()
for i in lista:
if i not in final:
final.append(i)
final.sort()
print(final)
| StarcoderdataPython |
3234439 | """
Django settings for ryu project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."),
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'debug_toolbar',
'south',
'bootstrap3',
'djangobower',
'django_nvd3',
'kurama',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'ryu.urls'
WSGI_APPLICATION = 'ryu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Templates dir search path
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = False
USE_TZ = True
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H i'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'djangobower.finders.BowerFinder',
)
# Django-bower
# ------------
BOWER_COMPONENTS_ROOT = os.path.join(PROJECT_ROOT, 'components')
BOWER_PATH = '/usr/local/bin/bower'
BOWER_INSTALLED_APPS = (
'jquery',
'bootstrap',
'd3#3.3.6',
'nvd3#1.1.12-beta',
'moment',
'eonasdan-bootstrap-datetimepicker#latest',
)
#IMPORT LOCAL SETTINGS
#=====================
try:
from local_settings import *
except ImportError:
pass
| StarcoderdataPython |
1650707 | <reponame>HuberTRoy/pyppeteer
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tracing module."""
import asyncio
from pathlib import Path
from typing import Any, Awaitable
from pyppeteer.connection import CDPSession
from pyppeteer.util import merge_dict
class Tracing(object):
"""Tracing class."""
def __init__(self, client: CDPSession) -> None:
self._client = client
self._recording = False
self._path = ''
async def start(self, options: dict = None, **kwargs: Any) -> None:
"""Start tracing.
Only one trace can be active at a time per browser.
This method accepts the following options:
* ``path`` (str): A path to write the trace file to. **required**
* ``screenshots`` (bool): Capture screenshots in the trace.
* ``categories`` (List[str]): Specify custom categories to use instead
of default.
"""
options = merge_dict(options, kwargs)
defaultCategories = [
'-*', 'devtools.timeline', 'v8.execute',
'disabled-by-default-devtools.timeline',
'disabled-by-default-devtools.timeline.frame', 'toplevel',
'blink.console', 'blink.user_timing', 'latencyInfo',
'disabled-by-default-devtools.timeline.stack',
'disabled-by-default-v8.cpu_profiler',
]
categoriesArray = options.get('categories', defaultCategories)
if 'screenshots' in options:
categoriesArray.append('disabled-by-default-devtools.screenshot')
self._path = options.get('path', '')
self._recording = True
await self._client.send('Tracing.start', {
'transferMode': 'ReturnAsStream',
'categories': ','.join(categoriesArray),
})
async def stop(self) -> Awaitable:
"""Stop tracing."""
contentPromise = asyncio.get_event_loop().create_future()
self._client.once(
'Tracing.tracingComplete',
lambda event: asyncio.ensure_future(
self._readStream(event.get('stream'), self._path)
).add_done_callback(
lambda fut: contentPromise.set_result(
fut.result()) # type: ignore
)
)
await self._client.send('Tracing.end')
self._recording = False
return await contentPromise
async def _readStream(self, handle: str, path: str) -> None:
eof = False
file = Path(path)
with file.open('w') as f:
while not eof:
response = await self._client.send('IO.read', {
'handle': handle
})
eof = response.get('eof', False)
if path:
f.write(response.get('data', ''))
await self._client.send('IO.close', {'handle': handle})
| StarcoderdataPython |
168950 | """
mem_check.py
functions for getting memoroy use of the current python process
Downloaded from: http://pythonchb.github.io/PythonTopics/weak_references.html
Windows and *nix versions
USAGE:
amount = get_mem_use(units='MB') # options are KB, MB, GB
"""
import sys
div = {'GB': 1024*1024*1024,
'MB': 1024*1024,
'KB': 1024,
}
if sys.platform.startswith('win'):
"""
Functions for getting memory usage of Windows processes.
from:
http://code.activestate.com/recipes/578513-get-memory-usage-of-windows-processes-using-getpro/
get_mem_use(units='MB') is the one to get memory use for the current process.
"""
import ctypes
from ctypes import wintypes
GetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
GetCurrentProcess.argtypes = []
GetCurrentProcess.restype = wintypes.HANDLE
SIZE_T = ctypes.c_size_t
class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure):
_fields_ = [
('cb', wintypes.DWORD),
('PageFaultCount', wintypes.DWORD),
('PeakWorkingSetSize', SIZE_T),
('WorkingSetSize', SIZE_T),
('QuotaPeakPagedPoolUsage', SIZE_T),
('QuotaPagedPoolUsage', SIZE_T),
('QuotaPeakNonPagedPoolUsage', SIZE_T),
('QuotaNonPagedPoolUsage', SIZE_T),
('PagefileUsage', SIZE_T),
('PeakPagefileUsage', SIZE_T),
('PrivateUsage', SIZE_T),
]
GetProcessMemoryInfo = ctypes.windll.psapi.GetProcessMemoryInfo
GetProcessMemoryInfo.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(PROCESS_MEMORY_COUNTERS_EX),
wintypes.DWORD,
]
GetProcessMemoryInfo.restype = wintypes.BOOL
def get_current_process():
"""Return handle to current process."""
return GetCurrentProcess()
def get_memory_info(process=None):
"""Return Win32 process memory counters structure as a dict."""
if process is None:
process = get_current_process()
counters = PROCESS_MEMORY_COUNTERS_EX()
ret = GetProcessMemoryInfo(process, ctypes.byref(counters),
ctypes.sizeof(counters))
if not ret:
raise ctypes.WinError()
info = dict((name, getattr(counters, name))
for name, _ in counters._fields_)
return info
def get_mem_use(units='MB'):
"""
returns the total memory use of the current python process
:param units='MB': the units you want the reslut in. Options are:
'GB', 'MB', 'KB'
"""
info = get_memory_info()
return info['PrivateUsage'] / float(div[units])
else: # for posix systems only tested on OS-X for now
def get_mem_use(units='MB'):
"""
returns the total memory use of the current python process
:param units='MB': the units you want the reslut in. Options are:
'GB', 'MB', 'KB'
"""
import resource
#useage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
div = {'GB': 1024*1024*1024,
'MB': 1024*1024,
'KB': 1024,
}
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / float(div[units])
| StarcoderdataPython |
33831 | <gh_stars>0
def params_create_rels_unwind_from_objects(relationships, property_identifier=None):
"""
Format Relationship properties into a one level dictionary matching the query generated in
`query_create_rels_from_list`. This is necessary because you cannot access nested dictionairies
in the UNWIND query.
UNWIND { rels } AS rel
MATCH (a:Gene), (b:GeneSymbol)
WHERE a.sid = rel.start_sid AND b.sid = rel.end_sid AND b.taxid = rel.end_taxid
CREATE (a)-[r:MAPS]->(b)
SET r = rel.properties
Call with params:
{'start_sid': 1, 'end_sid': 2, 'end_taxid': '9606', 'properties': {'foo': 'bar} }
:param relationships: List of Relationships.
:return: List of parameter dictionaries.
"""
if not property_identifier:
property_identifier = 'rels'
output = []
for r in relationships:
d = {}
for k, v in r.start_node_properties.items():
d['start_{}'.format(k)] = v
for k, v in r.end_node_properties.items():
d['end_{}'.format(k)] = v
d['properties'] = r.properties
output.append(d)
return {property_identifier: output}
| StarcoderdataPython |
1696696 | from perfcomp import ansbile_playbook, pip_diff, rpm_diff
from perfcomp.graphs import graph_ansible_playbook
class JobDiff:
def __init__(self, good, bad, ansible_playbooks_diff, rpm_diff, pip_diff):
self.good, self.bad = good, bad
self.ansible_diff = ansible_playbooks_diff
self.rpm_diff = rpm_diff
self.pip_diff = pip_diff
def ansible_playbooks_diff(self):
data = ansbile_playbook.compare(self.good, self.bad)
images = {}
for i in data:
images[i] = graph_ansible_playbook(data[i], i) if data[i] else None
return {'ans_data': data, 'images': images}
def rpm_files_diff(self):
inline, uniq1, uniq2 = rpm_diff.rpms(self.good, self.bad)
# sometimes we need to inmprove the diff
inline, uniq1, uniq2 = rpm_diff.check_packages(inline, uniq1, uniq2)
colored_inline = [rpm_diff.colorize_diff(i) for i in inline]
inline_with_links = rpm_diff.add_github_links(inline, colored_inline)
return {
'inline': inline_with_links, "uniq1": uniq1, "uniq2": uniq2,
'rpms_diff_max_length': max([len(v) for v in (uniq1, uniq2)])
}
def pip_files_diff(self):
inline, uniq1, uniq2 = pip_diff.pip_modules(self.good, self.bad)
return {
'pip_inline': inline, "pip_uniq1": uniq1, "pip_uniq2": uniq2,
'pip_diff_max_length': max([len(v) for v in (uniq1, uniq2)])
}
def generate(self):
data_results = {}
if self.ansible_diff:
data_results.update(self.ansible_playbooks_diff())
if self.rpm_diff:
data_results.update(self.rpm_files_diff())
if self.pip_diff:
data_results.update(self.pip_files_diff())
return data_results
| StarcoderdataPython |
4824809 | # Write a program to find the node at which the intersection of two singly linked lists begins.
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# 先把A用字典保存起来,然后用B中的数字一个个判断是否在字典中存在
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
p = headA
hashmap = {}
while p:
hashmap[p] = 0
p = p.next
q = headB
while q:
res = hashmap.get(q)
if res is not None:
return q
q = q.next
| StarcoderdataPython |
105298 |
from shuffle import SHUFFLE
import csv
shuffle=SHUFFLE()
documents=[]
labell=[]
negative=[]
positive=[]
file="dataset_twitter.csv"
dataset_names=['A','B','C','D','E','F']
Size=[500000,450000,350000,250000,150000,100000]
with open(file) as Data:
reader=csv.reader(Data)
records=list(reader)
negative=shuffle.shuffle(records[1:800000])
positive=shuffle.shuffle(records[800001:1048575])
Data.close()
for x in range(len(Size)):
Data=shuffle.shuffle(negative[0:int(Size[x]*0.6)]+positive[0:int(Size[x]*0.4)])
with open(dataset_names[x]+'.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(records[0])
for row in Data:
writer.writerow(row)
| StarcoderdataPython |
1667856 | player1 = int(input())
player2 = int(input())
winner = input()
out_of_eggs = False
while not winner == "End of battle":
if winner == "one":
player2 -= 1
else:
player1 -= 1
if 0 == player1:
print(f"Player one is out of eggs. Player two has {player2} eggs left.")
out_of_eggs = True
break
elif 0 == player2:
print(f"Player two is out of eggs. Player one has {player1} eggs left.")
out_of_eggs = True
break
winner = input()
if not out_of_eggs:
print(f"Player one has {player1} eggs left.")
print(f"Player two has {player2} eggs left.")
| StarcoderdataPython |
4834272 |
from typing import List
from base import version
class Solution:
@version("40ms, 18.3mb")
def solve(self, board: List[List[str]]) -> None:
height, width = len(board), len(board[0])
proxy = [[item for item in row] for row in board]
for m in range(height):
for n in range(width):
board[m][n] = 'X'
def search(m, n):
if proxy[m][n] == 'X':
return 0
stack = [(m, n)]
while stack:
m, n = stack.pop()
board[m][n] = 'O'
proxy[m][n] = 'X'
for l, r in ((-1, 0), (0, -1), (1, 0), (0, 1)):
i, j = m + l, n + r
if (0 <= i <= height - 1) and (0 <= j <= width - 1) and proxy[i][j] == 'O':
stack.append((i, j))
for m in (0, height - 1):
for n in range(width):
search(m, n)
for n in (0, width - 1):
for m in range(height):
search(m, n)
| StarcoderdataPython |
3267190 | from .RandomPerturb import perturb_randCorr
from .RandomCorrNear import nearcorr
from .RandomCorr import randCorr
from .RandomCorrMatEigen import randCorrGivenEgienvalues
from .Diagnostics import CorrDiagnostics, isPD, isvalid_corr
from .ConstantCorr import constantCorrMat | StarcoderdataPython |
1600842 | <filename>events/api/serializers.py
from events.models import Event
from rest_framework import serializers
class EventSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Event
fields = ("id", "title", "start", "url")
| StarcoderdataPython |
49251 | import os
import sys
import cv2
import numpy as np
class Equirectangular:
def __init__(self, img):
self._img = img
#self._img = cv2.imread(img_name, cv2.IMREAD_COLOR)
[self._height, self._width, _] = self._img.shape
print(self._img.shape)
def GetPerspective(self, FOV, THETA, PHI, height, width):
#
# THETA is left/right angle, PHI is up/down angle, both in degree
#
equ_h = self._height
equ_w = self._width
equ_cx = (equ_w - 1) / 2.0
equ_cy = (equ_h - 1) / 2.0
wFOV = FOV
hFOV = float(height) / width * wFOV
w_len = np.tan(np.radians(wFOV / 2.0))
h_len = np.tan(np.radians(hFOV / 2.0))
x_map = np.ones([height, width], np.float32)
y_map = np.tile(np.linspace(-w_len, w_len,width), [height,1])
z_map = -np.tile(np.linspace(-h_len, h_len,height), [width,1]).T
D = np.sqrt(x_map**2 + y_map**2 + z_map**2)
xyz = np.stack((x_map,y_map,z_map),axis=2)/np.repeat(D[:, :, np.newaxis], 3, axis=2)
y_axis = np.array([0.0, 1.0, 0.0], np.float32)
z_axis = np.array([0.0, 0.0, 1.0], np.float32)
[R1, _] = cv2.Rodrigues(z_axis * np.radians(THETA))
[R2, _] = cv2.Rodrigues(np.dot(R1, y_axis) * np.radians(-PHI))
xyz = xyz.reshape([height * width, 3]).T
xyz = np.dot(R1, xyz)
xyz = np.dot(R2, xyz).T
lat = np.arcsin(xyz[:, 2])
lon = np.arctan2(xyz[:, 1] , xyz[:, 0])
lon = lon.reshape([height, width]) / np.pi * 180
lat = -lat.reshape([height, width]) / np.pi * 180
lon = lon / 180 * equ_cx + equ_cx
lat = lat / 90 * equ_cy + equ_cy
persp = cv2.remap(self._img, lon.astype(np.float32), lat.astype(np.float32), cv2.INTER_CUBIC, borderMode=cv2.BORDER_WRAP)
return persp
| StarcoderdataPython |
135240 |
# Problem code
def countAndSay(n):
if n == 1:
return "1"
current = "1"
for i in range(2, n + 1):
current = helper(current)
return current
def helper(current):
group_count = 1
group_member = current[0]
result = ""
for i in range(1, len(current)):
if current[i] == group_member:
group_count += 1
continue
else:
result += str(group_count)
result += str(group_member)
group_count = 1
group_member = current[i]
result += str(group_count)
result += str(group_member)
return result
# Setup
print(countAndSay(4))
| StarcoderdataPython |
1659100 | import logging
from hikcamerabot.camera import HikvisionCam
from hikcamerabot.config import get_result_queue
from hikcamerabot.processes import CameraSupervisorProc
class CameraProcessManager:
"""Camera Process Manager Class."""
def __init__(self, cam_registry, event_q):
self._log = logging.getLogger(self.__class__.__name__)
self._log.debug('Initializing %r', self)
self.cam_registry = cam_registry
self._event_queues = event_q
self._result_queue = get_result_queue()
self._procs = []
def start_processes(self):
"""Start Updater Processes."""
for cam_id, meta in self.cam_registry.get_all().items():
cam = HikvisionCam(_id=cam_id, conf=meta['conf'])
proc = CameraSupervisorProc(camera=cam,
queue_in=self._event_queues[cam_id])
self._log.debug('Starting %s', proc)
proc.start()
self._procs.append(proc)
def stop_processes(self):
for proc in self._procs:
proc.terminate()
proc.join()
| StarcoderdataPython |
92424 | <reponame>maynard242/Machine-Learning-At-Scale
#!/usr/bin/env python
"""
This mapper reads from STDIN and waits 0.001 seconds per line.
Its only purpose is to demonstrate one of the scalability ideas.
"""
import sys
import time
for line in sys.stdin:
time.sleep(0.001) | StarcoderdataPython |
1746168 | from tesi_ao import main220316
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits
from tesi_ao.mems_command_to_position_linearization_measurer import CommandToPositionLinearizationMeasurer
from tesi_ao.mems_command_to_position_linearization_analyzer import CommandToPositionLinearizationAnalyzer
def _what_I_do_on_terminal(): # don't use!
'''
an example of how I used main220303
'''
iea = InterpolationErrorAnalyzer(ACTUATOR=63, NUM_SCAN_LIST=[
10, 20, 30, 40, 50, 60, 100], test_points=20)
# do and save WF maps for each scan sampling
iea.do_more_scans('_f0')
# load mcl objects into a list
mcls_int = iea.load_mcls('_f0')
# plot all the interpolated functions
iea.plot_all_interpolation_functions(mcls_int)
# plot interpolated function difference wrt the one with the biggest
# samples
iea.plot_interpolation_difference(mcls_int)
# from the 'old' mcls elements, we need the interpolated functions
# to compute p2c and save the a 'new' measured mcl object
iea.do_calibrated_measure(mcls_int, '_f0')
# load new mcl
mcls_meas = iea.load_calibrated_measure('_f0')
# Plot the difference between the measured and expected deflection, as a
# function of the expected one
rms_list = iea.plot_Measured_vs_Expected_common(mcls_meas, mcls_int)
iea.fitting_Meas_vs_Exp_common(mcls_meas, rms_list, mcls_int)
class InterpolationErrorAnalyzer(object):
ffmt = '.fits'
def __init__(self, ACTUATOR=63, NUM_SCAN_LIST=[10, 20, 30, 40, 50, 60, 100], test_points=10):
self.ACTUATOR = ACTUATOR
self.NUM_SCAN_LIST = NUM_SCAN_LIST
self.test_points = test_points
self.fpath = 'prova/act%d' % ACTUATOR + '/main220303/cplm'
def _execute_measure(self, fname, Nscan):
'''
Executes WF maps measure, one for each scan, and saves the
related CPLM object into fname.fits.
'''
act_list = [self.ACTUATOR]
wyko, bmc = main220316.create_devices()
cplm = CommandToPositionLinearizationMeasurer(wyko, bmc)
cplm.NUMBER_STEPS_VOLTAGE_SCAN = Nscan
cplm.execute_command_scan(act_list)
cplm.save_results(fname)
def _get_mcl_from_file(self, fname):
'''
From a fits file, loads CPLA object and evaluating
interpolation function.
Returns the related MemsCommandLinearization object.
'''
cpla = CommandToPositionLinearizationAnalyzer(fname)
mcl = cpla.compute_linearization()
return mcl
def _plot_interpolation_function(self, mcl):
plt.plot(mcl._cmd_vector[0], mcl._deflection[0], 'o',
label='%d scans' % mcl._cmd_vector.shape[1])
Npt = 1024
f_int = mcl._finter[0]
span = np.linspace(
min(mcl._cmd_vector[0]), max(mcl._cmd_vector[0]), Npt)
plt.plot(span, f_int(span), '-', color=plt.gca().lines[-1].get_color())
def _get_common_cmds_range(self, mcl_list):
'''
Returns the extremes[a,b] of the common cmd domain
between all interpolated functions
Input: list, mcl_list
Returns: a, b
'''
min_container = []
max_container = []
for mcl in mcl_list:
min_container.append(min(mcl._cmd_vector[0]))
max_container.append(max(mcl._cmd_vector[0]))
a = max(min_container)
b = min(max_container)
return a, b
def _get_common_deflections_range(self, mcl_list):
min_container = []
max_container = []
for mcl in mcl_list:
min_container.append(min(mcl._calibrated_position[0]))
max_container.append(max(mcl._calibrated_position[0]))
a = max(min_container)
b = min(max_container)
return a, b
def do_more_scans(self, version_file):
'''
For each scan sampling defined in NUM_SCAN_LIST,
executes WF mapping through the class objects CPLM and CPLA defined in sandbox.py,
and saves into file.fits
'''
for scans in self.NUM_SCAN_LIST:
print('\n%d voltage scans:' % scans)
fname = self.fpath + '%d' % scans + version_file + self.ffmt
self._execute_measure(fname, scans)
def load_mcls(self, version_file):
'''
Loads MemsCommandLinearization objects defined in sandbox.py,
computed by do_more_scans
and returns them into a list.
Input: string,'vesion_file'
Return: list, mcl_list
len(mcl_list) == number of interpolated function (one for each scan sampling)
'''
mcl_list = []
for scans in self.NUM_SCAN_LIST:
fname = self.fpath + '%d' % scans + version_file + self.ffmt
mcl_list.append(self._get_mcl_from_file(fname))
return mcl_list
def plot_all_interpolation_functions(self, mcl_list):
'''
Plots all interpolated functions obtained by varying scan sampling,
as a function of actuator's deflections.
'''
plt.figure()
plt.clf()
plt.ion()
plt.title('act#%d: interpolation functions for several scans' %
self.ACTUATOR, size=25)
for mcl in mcl_list:
self._plot_interpolation_function(mcl)
plt.xlabel('Commands [au]', size=25)
plt.ylabel('Deflection [m]', size=25)
plt.grid()
plt.legend(loc='best')
def plot_interpolation_difference(self, mcl_list):
'''
Plots the difference between all the interpolated function with
respect to the one computed with the biggest scan sampling, as a function of
actuators deflections.
Input: list, mcl_list
'''
Npt = 1024
# looking for the common deflections domain for the interpolated
# functions
min_span, max_span = self._get_common_cmds_range(mcl_list)
common_span_cmds = np.linspace(
min_span, max_span, Npt)
# interpolated function with the biggest scans sampling
f_ref = mcl_list[-1]._finter[0]
plt.figure()
plt.clf()
plt.ion()
plt.title('act#%d:' % self.ACTUATOR +
'cubic interpolation error w-r-t %dscans' % max(self.NUM_SCAN_LIST), size=25)
for idx, scans in enumerate(self.NUM_SCAN_LIST):
f_i = mcl_list[idx]._finter[0]
plt.plot(common_span_cmds, (f_i(common_span_cmds) -
f_ref(common_span_cmds)) / 1e-9, '.-', label='%d scans' % scans)
print((f_i(common_span_cmds) - f_ref(common_span_cmds)).std())
plt.legend(loc='best')
plt.grid()
plt.ylabel('Deflection Difference [m]', size=25)
plt.xlabel('cmd [au]', size=25)
def do_calibrated_measure(self, mcl_list, version):
'''
Though the interpolated functions contained in the 'old' MCL objects
and listed in mcl_list, saves new WF maps using converted
actuator's deflections (calling p2c and MyCalibrationMeasurer class as defined below).
Input:
list, mcl_list
string, 'file version'
'''
Npt = self.test_points
# self.NUM_SCAN_LIST
act_list = [self.ACTUATOR]
wyko, bmc = main220316.create_devices()
min_span, max_span = self._get_common_deflections_range(mcl_list)
expected_deflection = np.linspace(max_span, min_span, Npt)
# expected_deflection = np.linspace(-800e-9, 1600e-9, Npt) #@act63
#converted_cmd = np.zeros((len(mcl_list), Npt))
for idx, mcl in enumerate(mcl_list):
mcm = MyCalibrationMeasurer(wyko, bmc, mcl, expected_deflection)
mcm.execute_command_scan(act_list)
fname = self.fpath + '%d' % Npt + 'meas' + version + \
'_cal%d' % self.NUM_SCAN_LIST[idx] + self.ffmt
mcm.save_results(fname)
def load_calibrated_measure(self, version):
'''
Loads the 'new' mcl objects from file created by do_calibrated_measure,
and returns them into a list.
Input: string, 'file_version'
Return: list, mcl_list
'''
mcl_list = []
Npt = self.test_points
for scans in self.NUM_SCAN_LIST:
fname = self.fpath + '%d' % Npt + 'meas' + version + \
'_cal%d' % scans + self.ffmt
mcl_list.append(self._get_mcl_from_file(fname))
return mcl_list
def plot_Measured_vs_Expected_common(self, mcl_meas, mcl_int):
'''
Plots the difference between the measured and expected deflection,
as a function of the expected one.
mcl_meas[i]== element of the list loaded from load_calibrated_measure
Input: list, mcls_meas
list, mcl_int (used for common deflection domain evaluation)
'''
Npt = self.test_points
plt.figure()
plt.clf()
min_span, max_span = self._get_common_deflection_range(mcl_int)
#min_span = -800e-9
#max_span = 1600e-9
x_exp = np.linspace(min_span, max_span, Npt) # expected deflections
rms_list = []
for idx in np.arange(len(mcl_meas)):
x_obs = mcl_meas[idx]._deflection[0]
y = x_obs - x_exp
rms = y.std()
rms = rms / 1.e-9
rms_list.append(y.std())
plt.plot(x_exp / 1.e-9, y / 1.e-9, 'o-', label='%d scans' %
self.NUM_SCAN_LIST[idx])
print('rms = %g' % rms + 'nm\t' +
'(Sampling: %d scans)' % self. NUM_SCAN_LIST[idx])
plt.legend(loc='best')
plt.grid()
plt.xlabel('$x_{exp} [nm]$', size=25)
plt.ylabel('$x_{obs} - x_{exp} [nm]$', size=25)
plt.title('act#%d:' % self.ACTUATOR +
' Error in deflection cmds for each interpolation functions Common', size=25)
return rms_list
# something wrong
def fitting_Meas_vs_Exp_common(self, mcl_meas, rms_list, mcl_int):
'''
Plots the best fits for measured vs expected deflection, for each scan sampling.
'''
Npt = self.test_points
plt.figure()
plt.clf()
min_span, max_span = self._get_common_deflection_range(mcl_int)
#min_span = -800e-9
#max_span = 1600e-9
x_exp = np.linspace(min_span, max_span, Npt)
ones = np.ones(Npt)
xx = np.linspace(min_span, max_span, 1024)
for idx in np.arange(len(mcl_meas)):
x_obs = mcl_meas[idx]._deflection[0]
plt.plot(x_exp, x_obs, 'o', label='%d scans' %
self.NUM_SCAN_LIST[idx])
sigma = ones * rms_list[idx]
coeff, coeff_cov = np.polyfit(
x_exp, x_obs, 1, w=sigma, cov=True, full=False)
err_coeff = np.sqrt(np.diag(coeff_cov))
print('\nFit relative to Sampling: %d scans)' %
self. NUM_SCAN_LIST[idx])
print('A = %g' % coeff[0] + '\t+/- %g ' % err_coeff[0])
print('offset = %g' % coeff[1] + '\t+/- %g' % err_coeff[1])
print('Cov Matrix:')
print(coeff_cov)
fit_func = np.poly1d(coeff)
residuals = x_obs - fit_func(x_obs)
chi_2 = np.sum((residuals / sigma)**2)
print('Chi2 = %g' % chi_2)
dof = len(x_obs) - len(coeff)
chi2red = chi_2 / float(dof)
print('RedChi2 = %g' % chi2red)
plt.plot(xx, fit_func(xx), '-', label='relative fit',
color=plt.gca().lines[-1].get_color())
# plt.errorbar(x_exp, x_obs, sigma,
# color=plt.gca().lines[-1].get_color())
plt.legend(loc='best')
plt.grid()
plt.xlabel('$x_{exp} [m]$', size=25)
plt.ylabel('$x_{obs} [m]$', size=25)
plt.title('act#%d:' % self.ACTUATOR +
' Common deflection span', size=25)
# similar to CommandtoPositionLinearizationMeasurer
class MyCalibrationMeasurer(object): # changes when bmc set shape
'''
As CommandToPositionLinearizationMeasurer defined in sandbox.py,
acquires WF maps, one for each expected deflection command.
These deflections are converted in voltage commands through
p2c function stored in MCL object.
'''
NUMBER_WAVEFRONTS_TO_AVERAGE = 1
#NUMBER_STEPS_VOLTAGE_SCAN = 10
def __init__(self, interferometer, mems_deformable_mirror, mlc, expected_deflections):
self._interf = interferometer
self._bmc = mems_deformable_mirror
self._n_acts = self._bmc.get_number_of_actuators()
self._mlc = mlc
self._exp_deflections = expected_deflections
self.NUMBER_STEPS_VOLTAGE_SCAN = len(expected_deflections)
self._wfflat = None
def _get_zero_command_wavefront(self):
if self._wfflat is None:
cmd = np.zeros(self._n_acts)
self._bmc.set_shape(cmd)
self._wfflat = self._interf.wavefront(
self.NUMBER_WAVEFRONTS_TO_AVERAGE)
return self._wfflat
def execute_command_scan(self, act_list=None):
if act_list is None:
act_list = np.arange(self._n_acts)
self._actuators_list = np.array(act_list)
n_acts_to_meas = len(self._actuators_list)
wfflat = self._get_zero_command_wavefront()
self._reference_cmds = self._bmc.get_reference_shape()
self._reference_tag = self._bmc.get_reference_shape_tag()
self._cmd_vector = np.zeros((n_acts_to_meas,
self.NUMBER_STEPS_VOLTAGE_SCAN))
self._wfs = np.ma.zeros(
(n_acts_to_meas, self.NUMBER_STEPS_VOLTAGE_SCAN,
wfflat.shape[0], wfflat.shape[1]))
N_pixels = self._wfs.shape[2] * self._wfs.shape[3]
for act_idx, act in enumerate(self._actuators_list):
self._cmd_vector[act_idx] = self._mcl.linear_p2c(
act, self._exp_deflections)
for cmd_idx in range(len(self._cmd_vector[act_idx])):
print("Act:%d - command" % (act))
cmd = np.zeros(self._n_acts)
cmd[act] = self._mlc.linear_p2c(
act, self._exp_deflections[cmd_idx])
self._bmc.set_shape(cmd)
self._wfs[act_idx, cmd_idx, :,
:] = self._get_wavefront_flat_subtracted()
masked_pixels = self._wfs[act_idx, cmd_idx].mask.sum()
masked_ratio = masked_pixels / N_pixels
if masked_ratio > 0.8227:
print('Warning: Bad measure acquired for: act%d' %
act_idx + ' cmd_idx %d' % cmd_idx)
self._avoid_saturated_measures(
masked_ratio, act_idx, cmd_idx, N_pixels)
def _avoid_saturated_measures(self, masked_ratio, act_idx, cmd_idx, N_pixels):
while masked_ratio > 0.8227:
self._wfs[act_idx, cmd_idx, :,
:] = self._get_wavefront_flat_subtracted()
masked_pixels = self._wfs[act_idx, cmd_idx].mask.sum()
masked_ratio = masked_pixels / N_pixels
print('Repeated measure completed!')
def _get_wavefront_flat_subtracted(self):
dd = self._interf.wavefront(
self.NUMBER_WAVEFRONTS_TO_AVERAGE) - self._get_zero_command_wavefront()
return dd - np.ma.median(dd)
def _reset_flat_wavefront(self):
self._wfflat = None
def save_results(self, fname):
hdr = fits.Header()
hdr['REF_TAG'] = self._reference_tag
hdr['N_AV_FR'] = self.NUMBER_WAVEFRONTS_TO_AVERAGE
fits.writeto(fname, self._wfs.data, hdr)
fits.append(fname, self._wfs.mask.astype(int))
fits.append(fname, self._cmd_vector)
fits.append(fname, self._actuators_list)
fits.append(fname, self._reference_cmds)
@staticmethod
def load(fname):
header = fits.getheader(fname)
hduList = fits.open(fname)
wfs_data = hduList[0].data
wfs_mask = hduList[1].data.astype(bool)
wfs = np.ma.masked_array(data=wfs_data, mask=wfs_mask)
cmd_vector = hduList[2].data
actuators_list = hduList[3].data
reference_commands = hduList[4].data
return {'wfs': wfs,
'cmd_vector': cmd_vector,
'actuators_list': actuators_list,
'reference_shape': reference_commands,
'reference_shape_tag': header['REF_TAG']
}
| StarcoderdataPython |
1723933 | <reponame>XavierBecerra/block3_py_questionnaire
#Defining imports
import pandas as pd
# QUESTION 4
def compute_drivers_performance():
#pandas library will be used.
#First we import both tables needed for the exercise from the csv provided
orders_df = pd.read_csv("D:/paack/Data/orders_table.csv", sep=',')
drivers_df = pd.read_csv("D:/paack/Data/drivers_table.csv", sep=',')
#Before continuing we filter out duplicates
orders_df.drop_duplicates(keep='first', inplace=True)
drivers_df.drop_duplicates(keep='first', inplace=True) #Not really needed in this particular case
#now we convert the time data into datetime type
orders_df['Delivery Start'] = pd.to_datetime(orders_df['Delivery Start'])
orders_df['Delivery End'] = pd.to_datetime(orders_df['Delivery End'])
orders_df['Attempted time'] = pd.to_datetime(orders_df['Attempted time'])
#lets filter out if there are deliveries with non valid attempt times (not the case)
orders_df = orders_df[ ~orders_df.isnull() ]
#lets compute the perfromance of each driver
aggr_df = orders_df.groupby(['driver_id', 'Deliver date']) \
.agg( {'Delivery Start':'min', 'Delivery End': 'max', 'id': lambda x: x.nunique()}) \
.rename(columns={'id':'orders'}) \
.reset_index(drop=False)
aggr_df['WorkHours'] = aggr_df['Delivery End'].apply(lambda x: x.hour) - aggr_df['Delivery Start'].apply(lambda x: x.hour)
aggr_df['Performance'] = aggr_df.orders / aggr_df.WorkHours
#Finally obtain the full river list with its performance. Drivers that did not work will be accounted for NULL performance
merged_df = pd.merge(drivers_df, aggr_df, left_on='id', right_on='driver_id', how='left')
merged_df.drop(columns={'driver_id'}, inplace=True)
merged_df.to_csv("D:/paack/Data/Out/block3_out.csv", sep=',')
return merged_df | StarcoderdataPython |
1791211 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_concurrency import processutils as putils
from os_brick import privileged
from os_brick.privileged import rootwrap as priv_rootwrap
from os_brick.tests import base
class PrivRootwrapTestCase(base.TestCase):
def setUp(self):
super(PrivRootwrapTestCase, self).setUp()
# Bypass privsep and run these simple functions in-process
# (allows reading back the modified state of mocks)
privileged.default.set_client_mode(False)
self.addCleanup(privileged.default.set_client_mode, True)
@mock.patch('os_brick.privileged.rootwrap.execute_root')
@mock.patch('oslo_concurrency.processutils.execute')
def test_execute(self, mock_putils_exec, mock_exec_root):
priv_rootwrap.execute('echo', 'foo', run_as_root=False)
self.assertFalse(mock_exec_root.called)
priv_rootwrap.execute('echo', 'foo', run_as_root=True,
root_helper='baz', check_exit_code=0)
mock_exec_root.assert_called_once_with(
'echo', 'foo', check_exit_code=0)
@mock.patch('oslo_concurrency.processutils.execute')
def test_execute_root(self, mock_putils_exec):
priv_rootwrap.execute_root('echo', 'foo', check_exit_code=0)
mock_putils_exec.assert_called_once_with(
'echo', 'foo', check_exit_code=0, shell=False, run_as_root=False)
# Exact exception isn't particularly important, but these
# should be errors:
self.assertRaises(TypeError,
priv_rootwrap.execute_root, 'foo', shell=True)
self.assertRaises(TypeError,
priv_rootwrap.execute_root, 'foo', run_as_root=True)
@mock.patch('oslo_concurrency.processutils.execute',
side_effect=OSError(42, 'mock error'))
def test_oserror_raise(self, mock_putils_exec):
self.assertRaises(putils.ProcessExecutionError,
priv_rootwrap.execute, 'foo')
| StarcoderdataPython |
103058 | <filename>tests/test_parsing.py
from units_calculator.all import (
Kilograms,
Meters,
Milliseconds,
Seconds,
parse,
parse_pure_units,
)
def test_parse_pure_units() -> None:
test_str = "s^(-1)*kg*m/s"
x = parse_pure_units(test_str)
assert tuple(x) == ((Seconds, -2), (Kilograms, 1), (Meters, 1))
assert tuple(parse_pure_units("")) == ()
def test_prase_int_units() -> None:
_5m = parse("5m")
_25m2 = parse("25m^2")
assert repr(_5m) == "5.0m"
assert _5m * _5m == _25m2
def test_parse_vloat_units() -> None:
_2500ms = parse("2.5s")
assert _2500ms == Milliseconds(2500)
def test_parse_complex_units() -> None:
_jA = parse("jA")
assert repr(_jA) == "1jA"
_3p2jA = parse("(3+2.5j)A")
assert repr(_3p2jA) == "(3+2.5j)A"
| StarcoderdataPython |
6763 | # Author: <NAME> <<EMAIL>>
import numpy as np
from bolero.representation import BlackBoxBehavior
from bolero.representation import DMPBehavior as DMPBehaviorImpl
class DMPBehavior(BlackBoxBehavior):
"""Dynamical Movement Primitive.
Parameters
----------
execution_time : float, optional (default: 1)
Execution time of the DMP in seconds.
dt : float, optional (default: 0.01)
Time between successive steps in seconds.
n_features : int, optional (default: 50)
Number of RBF features for each dimension of the DMP.
configuration_file : string, optional (default: None)
Name of a configuration file that should be used to initialize the DMP.
If it is set all other arguments will be ignored.
"""
def __init__(self, execution_time=1.0, dt=0.01, n_features=50,
configuration_file=None):
self.dmp = DMPBehaviorImpl(execution_time, dt, n_features,
configuration_file)
def init(self, n_inputs, n_outputs):
"""Initialize the behavior.
Parameters
----------
n_inputs : int
number of inputs
n_outputs : int
number of outputs
"""
self.dmp.init(3 * n_inputs, 3 * n_outputs)
self.n_joints = n_inputs
self.x = np.empty(3 * self.n_joints)
self.x[:] = np.nan
def reset(self):
self.dmp.reset()
self.x[:] = 0.0
def set_inputs(self, inputs):
self.x[:self.n_joints] = inputs[:]
def can_step(self):
return self.dmp.can_step()
def step(self):
self.dmp.set_inputs(self.x)
self.dmp.step()
self.dmp.get_outputs(self.x)
def get_outputs(self, outputs):
outputs[:] = self.x[:self.n_joints]
def get_n_params(self):
return self.dmp.get_n_params()
def get_params(self):
return self.dmp.get_params()
def set_params(self, params):
self.dmp.set_params(params)
def set_meta_parameters(self, keys, values):
self.dmp.set_meta_parameters(keys, values)
def trajectory(self):
return self.dmp.trajectory()
class DMPBehaviorWithGoalParams(DMPBehavior):
def __init__(self, goal, execution_time=1.0, dt=0.01, n_features=50,
configuration_file=None):
super(DMPBehaviorWithGoalParams, self).__init__(
execution_time, dt, n_features, configuration_file)
self.params = np.copy(goal)
def set_meta_parameters(self, keys, values):
self.dmp.set_meta_parameters(keys, values)
self.set_params(self.params)
def get_n_params(self):
return len(self.params)
def get_params(self):
return self.params
def set_params(self, params):
self.params[:] = params
self.dmp.set_meta_parameters(["g"], [self.params])
| StarcoderdataPython |
1727762 | from django.core.management.base import BaseCommand
from common.utils import progress_bar
from intrinsic.models import IntrinsicImagesDecomposition
from intrinsic.tasks import upload_intrinsic_file
class Command(BaseCommand):
args = ''
help = 'Upload images to EC2'
def handle(self, *args, **options):
rows = IntrinsicImagesDecomposition.objects.all() \
.values_list('reflectance_image', 'shading_image')
for names in progress_bar(rows):
for n in names:
upload_intrinsic_file.delay(n)
| StarcoderdataPython |
19804 | <reponame>viditvarshney/100DaysOfCode
from logo import logo
def add(n1, n2):
return n1 + n2
def multiply(n1, n2):
return n1 * n2
def subtract(n1, n2):
return n1 - n2
def divide(n1, n2):
return n1 / n2
symbols = ['+', '-', '/', '*']
operations = {'+': add, '-': subtract,
'*': multiply, '/': divide}
def Calci():
print(logo)
num1 = float(input("Enter 1st number: "))
for key in operations:
print(key)
while True:
choice = input("Choose an operation: ")
if not choice in symbols:
print("WARNING! Invalid Operation symbol: ")
break
num2 = float(input("Enter next number: "))
calculation_func = operations[choice]
result = calculation_func(num1, num2)
print(f"{num1} {choice} {num2} = {result}")
clear = input(
f"Type 'y to continue with {result} or 'new' to start a new calculation 'n' to exit: ")
if clear.casefold() == 'y':
num1 = result
elif clear.casefold() == 'new':
Calci()
else:
print(f"Your final result is: {result}")
break
Calci()
| StarcoderdataPython |
3240398 | <reponame>ATLJoeReed/branch_data_engineering<gh_stars>0
#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
import io
import os
import sys
import pandas as pd
from utils import helpers
def extract_election_results(logger):
logger.info('Starting to extract election summary results')
try:
conn = helpers.get_database_connection()
conn.set_session(autocommit=True)
except Exception as e:
logger.error(f"Getting database connection: {e}")
sys.exit("Unable to get database connection")
logger.info('Fetching summary results file from clarityelections.com')
content = helpers.fetch_summary_results(logger)
last_updated = helpers.extract_last_updated(content, logger)
logger.info(f'Summary results last updated: {last_updated}')
logger.info('Building summary results dataframe')
summary_results = pd.read_csv(io.BytesIO(
content.read('summary.csv')),
encoding='latin1'
)
summary_results = helpers.transform_dataframe(summary_results)
# Tag summary results last updated timestamp...
summary_results.insert(0, 'results_last_updated', last_updated)
helpers.load_summary_results(conn, summary_results, logger)
conn.close()
if __name__ == '__main__':
# move to working directory...
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
logger = helpers.setup_logger_stdout('extract_election_results')
extract_election_results(logger)
| StarcoderdataPython |
1783346 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# FEDERAL UNIVERSITY OF UBERLANDIA
# Faculty of Electrical Engineering
# Biomedical Engineering Lab
# ------------------------------------------------------------------------------
# Author: <NAME>
# Contact: <EMAIL>
# Git: www.github.com/italogfernandes
# ------------------------------------------------------------------------------
# Description:
# ------------------------------------------------------------------------------
from pyqtgraph.Qt import QtCore
import pyqtgraph as pg
from pyqtgraph.ptime import time
from PyQt4.QtGui import QBrush, QColor, QPen
from numpy import clip
import numpy as np
import sys
if sys.version_info.major == 2:
from Queue import Queue
else:
from queue import Queue
# ------------------------------------------------------------------------------
class ArmSkeleton:
def __init__(self):
self.hand = [0, 0]
self.elbow = [0, 0]
self.shoulder = [0, 0]
self.init_positions()
def init_positions(self):
self.hand = [0.7, 0]
self.elbow = [0, 0]
self.shoulder = [0, 1]
def set_angle(self, angle):
r = 0.7
theta = np.deg2rad(-angle+90)
self.hand[0] = r*np.cos(theta)
self.hand[1] = r*np.sin(theta)
def get_x_values(self):
return [self.shoulder[0], self.elbow[0], self.hand[0]]
def get_y_values(self):
return [self.shoulder[1], self.elbow[1], self.hand[1]]
class ArmGraph:
def __init__(self, parent=None, app=None):
self.app = app
self.plotWidget = pg.PlotWidget(parent)
self.arm = ArmSkeleton()
self.configure_plot()
self.curve = self.create_arm()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.show_fps = True
self.lastTime = 0
self.fps = 0
def configure_pen(self):
pen = QPen()
pen.setColor(QColor.fromRgb(255, 10, 0))
pen.setWidthF(0.025)
return pen
def configure_plot(self):
self.configure_area()
self.configure_title("Graph")
def configure_area(self, x_title='Index', x_unit='', y_title='Values', y_unit=''):
self.plotWidget.showGrid(True, True)
# Colors:
self.plotWidget.setBackgroundBrush(QBrush(QColor.fromRgb(255, 255, 255)))
self.plotWidget.getAxis('left').setPen(QPen(QColor.fromRgb(0, 0, 0)))
self.plotWidget.getAxis('bottom').setPen(QPen(QColor.fromRgb(0, 0, 0)))
self.plotWidget.getAxis('left').setPen(QPen(QColor.fromRgb(0, 0, 0)))
self.plotWidget.getAxis('bottom').setPen(QPen(QColor.fromRgb(0, 0, 0)))
# Axis:
self.plotWidget.setXRange(-0.5, 0.9)
self.plotWidget.setYRange(-0.5, 0.9)
self.plotWidget.setLabel('bottom', x_title, units=x_unit)
self.plotWidget.setLabel('left', y_title, units=y_unit)
def configure_title(self, title="Graph"):
self.plotWidget.setTitle('<font color="black"> %s </font>' % title)
def create_arm(self):
return self.plotWidget.plot(self.arm.get_x_values(), self.arm.get_y_values(),
pen=self.configure_pen(), symbol='o', symbolSize=25)
def update(self):
self.curve.setData(self.arm.get_x_values(), self.arm.get_y_values())
if self.show_fps:
self.calculate_fps()
self.plotWidget.setTitle('<font color="red">%0.2f fps</font>' % self.fps)
if self.app is not None:
self.app.processEvents()
def calculate_fps(self):
now = time()
dt = now - self.lastTime
self.lastTime = now
if self.fps is None:
self.fps = 1.0 / dt
else:
s = clip(dt * 3., 0, 1)
self.fps = self.fps * (1 - s) + (1.0 / dt) * s
def test():
import sys
from PyQt4 import QtGui
app = QtGui.QApplication(sys.argv)
form = QtGui.QMainWindow()
form.resize(800, 600)
central_widget = QtGui.QWidget(form)
vertical_layout = QtGui.QVBoxLayout(central_widget)
plot_handler = ArmGraph(parent=central_widget)
vertical_layout.addWidget(plot_handler.plotWidget)
form.setCentralWidget(central_widget)
form.show()
app.exec_()
if __name__ == '__main__':
test()
| StarcoderdataPython |
3229794 | # -*- coding: utf-8 -*-
import errno
import re
from datetime import datetime
from itertools import tee
from shutil import rmtree, copytree
import networkx
import jinja2
from pathlib2 import Path
STATIC_DIR = Path(__file__).parent/'static'
class RouteSchedule(object):
_DAYS = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
def __init__(self, gtfs, route, services):
self.agency = next((agency for agency in gtfs.GetAgencyList()
if agency.agency_id == route.agency_id),
gtfs.GetDefaultAgency())
self.route = route
all_trips = [trip for trip in gtfs.GetTripList()
if trip.route_id == route.route_id]
self.stops = set(sum((list(trip.GetPattern()) for trip in all_trips), []))
self.timepoints = set(sum((timepoint_stops(trip) for trip in all_trips), []))
self.shapes = [gtfs.GetShape(shape_id)
for shape_id in set(trip.shape_id for trip in all_trips
if trip.shape_id)]
# Create a schedule for every day of the week.
periods = []
for i in range(7):
service_ids = [service.service_id for service in services
if service.day_of_week[i] == 1
or service.day_of_week == [0]*7]
trips = [trip for trip in all_trips if trip.service_id in service_ids]
if len(trips) > 0:
periods.append(ServicePeriod('', trips))
# Consolidate similar schedules.
self.service_periods = []
to_examine = range(len(periods))
while len(to_examine) > 0:
this = to_examine.pop(0)
similar = [other for other in to_examine
if periods[this] == periods[other]]
for other in similar:
to_examine.remove(other)
period = periods[this]
day_of_week = [int(i == this or i in similar)
for i in range(len(periods))]
period.rename(self._week_range(day_of_week))
self.service_periods.append(period)
def _week_range(self, day_of_week):
cont_ranges = []
range_begin = None
for i, b in enumerate(day_of_week):
if b and range_begin is None:
range_begin = i
elif not b and range_begin is not None:
cont_ranges.append((range_begin, i - 1))
range_begin = None
if range_begin is not None:
cont_ranges.append((range_begin, i))
def name(begin, end):
if begin == end:
return RouteSchedule._DAYS[begin]
else:
return ('%s - %s'
% (RouteSchedule._DAYS[begin], RouteSchedule._DAYS[end]))
if len(cont_ranges) == 0:
return name(0, -1)
else:
return ', '.join(name(*cont_range) for cont_range in cont_ranges)
class ServicePeriod(object):
def __init__(self, name, trips):
self.rename(name)
directions = []
for trip_list in self._separate(trips):
headsigns = set(trip.trip_headsign or trip.GetPattern()[-1].stop_name
for trip in trip_list)
direction = '/'.join(sorted(headsigns))
timetable = Timetable(trip_list)
directions.append((direction, timetable))
self.directions = sorted(directions, key=lambda (d, t): d)
def rename(self, name):
self.name = name
self.slug = re.sub(r'[^a-zA-Z]', '', name)
def _separate(self, trips):
"""Separate trips into up to two distinct directions. Rationale: Some
feeds misuse the direction_id flag.
"""
if len(trips) == 0:
return []
iter_trips = iter(trips)
first_trip = next(iter_trips)
directions = [
(timepoint_stops(first_trip), [first_trip])
]
for trip in iter_trips:
trip_sequence = timepoint_stops(trip)
direction_sequences = [unite(common_sequence, trip_sequence)
for common_sequence, trips in directions]
direction_add_lengths = [len(direction_sequences[idx])
- len(common_sequence)
for idx, (common_sequence, trips)
in enumerate(directions)]
min_idx, min_add_length = min(enumerate(direction_add_lengths),
key=lambda (i, v): v)
if min_add_length >= len(trip_sequence) - 1:
directions.append((trip_sequence, [trip]))
else:
min_sequence, min_trips = directions[min_idx]
directions[min_idx] = (direction_sequences[min_idx],
min_trips + [trip])
return [trip_list for sequence, trip_list in directions]
def __eq__(self, other):
return self.directions == other.directions
class Timetable(object):
NO_SERVICE = object()
SKIP = object()
def __init__(self, trips):
stop_times = {trip: [st for st in timepoint_stop_times(trip)]
for trip in trips}
timepoints = {trip: [st.stop for st in trip_stop_times]
for trip, trip_stop_times in stop_times.iteritems()}
# Find common timepoints for the header.
self.timepoints = unite(*timepoints.values())
# Populate rows.
self.rows = []
for trip in self._sort(trips):
row = []
timepoint = -1
for stop_time in stop_times[trip]:
slice_timepoint = (self.timepoints[timepoint + 1:]
.index(stop_time.stop))
if timepoint == -1:
row = [Timetable.NO_SERVICE]*slice_timepoint
else:
row += [Timetable.SKIP]*slice_timepoint
row += [self._time(stop_time)]
timepoint += slice_timepoint + 1
row += [Timetable.NO_SERVICE]*(len(self.timepoints) - timepoint - 1)
self.rows.append(row)
def _sort(self, trips):
# Create a directed graph where each node is a trip.
graph = networkx.DiGraph()
node_trip = {n: trip for n, trip in enumerate(trips)}
trip_node = {trip: n for n, trip in enumerate(trips)}
for node in node_trip.keys():
graph.add_node(node)
# Add edges to represent relative orderings at common timepoints.
stop_times = {trip: [st for st in timepoint_stop_times(trip)]
for trip in trips}
timepoints = {trip: [st.stop for st in trip_stop_times]
for trip, trip_stop_times in stop_times.iteritems()}
for this_trip in trips:
this_node = trip_node[this_trip]
other_trips = list(trips)
for stop_time in stop_times[this_trip]:
for other_trip in (trip for trip in list(other_trips)
if stop_time.stop in timepoints[trip]):
other_stop_time = next(st for st in stop_times[other_trip]
if st.stop == stop_time.stop)
other_node = trip_node[other_trip]
if (self._time(stop_time) < self._time(other_stop_time)
and not networkx.has_path(graph, other_node, this_node)
and not graph.has_edge(this_node, other_node)):
graph.add_edge(this_node, other_node)
other_trips.remove(other_trip)
if len(other_trips) == 0:
break
# Sort the graph, recreate the list.
topo_sort = networkx.topological_sort(graph)
return [node_trip[node] for node in topo_sort]
def _time(self, stop_time):
return stop_time.departure_secs or stop_time.arrival_secs
def __eq__(self, other):
return self.timepoints == other.timepoints and self.rows == other.rows
def render(gtfs, date=datetime.today(), outdir=Path('.')):
clear_out(outdir)
env = jinja2.Environment(
loader=jinja2.PackageLoader('busbook', 'templates'),
autoescape=jinja2.select_autoescape(['html']),
trim_blocks=True,
lstrip_blocks=True)
@jinja2.evalcontextfilter
def make_breaks(eval_ctx, s):
res = re.sub(
r'(&|@|-|/)', r'\1' + jinja2.Markup('​'), jinja2.escape(s))
if eval_ctx.autoescape:
res = jinja2.Markup(res)
return res
env.filters['break'] = make_breaks
@jinja2.evalcontextfilter
def format_time(eval_ctx, secs):
hours = secs // 3600 % 24
minutes = secs // 60 % 60
if hours == 0:
s = '12:%02d' % minutes
elif hours > 12:
s = '%d:%02d' % (hours - 12, minutes)
else:
s = '%d:%02d' % (hours, minutes)
if hours < 12:
res = jinja2.Markup('<span class="time-am">%s</span>' % s)
else:
res = jinja2.Markup('<span class="time-pm">%s</span>' % s)
if eval_ctx.autoescape:
res = jinja2.Markup(res)
return res
env.filters['time'] = format_time
def route_css(route):
if route.route_color and route.route_text_color:
return ('--route-color: #%s; --route-text-color: #%s;'
% (route.route_color, route.route_text_color))
elif route.route_color:
return '--route-color: #%s;' % route.route_color
else:
return ''
env.filters['route_css'] = route_css
render_index(env, gtfs, outdir=outdir)
for route in gtfs.GetRouteList():
render_route(env, gtfs, effective_services(gtfs, date), route,
outdir=outdir)
def clear_out(path):
try:
rmtree(str(path.resolve()))
except OSError as err:
if err.errno != errno.ENOENT:
raise
copytree(str(STATIC_DIR), str(path/'static'))
def effective_services(gtfs, date):
def parse(datestr):
return datetime.strptime(datestr, '%Y%m%d')
return [sp for sp in gtfs.GetServicePeriodList()
if sp.start_date is None
or (date >= parse(sp.start_date)
and date <= parse(sp.end_date).replace(hour=23, minute=59))]
def render_index(env, gtfs, outdir=Path('.')):
def get_routes(agency_id):
all_routes = gtfs.GetRouteList()
routes = [route for route in all_routes
if route.agency_id == agency_id]
# Handle routes without a specified agency.
default_agency = gtfs.GetDefaultAgency()
if agency_id == default_agency.agency_id:
routes += [route for route in all_routes
if route.agency_id == '']
return sorted(routes, key=lambda route: route.route_id)
write_out(
outdir/'index.html',
env.get_template('index.html').render(
gtfs=gtfs,
agencies=', '.join(agency.agency_name
for agency in gtfs.GetAgencyList()),
get_routes=get_routes))
def render_route(env, gtfs, service_periods, route, outdir=Path('.')):
if len(service_periods) == 0:
print('WARNING: No service scheduled for %s %s.'
% (route.route_short_name, route.route_long_name))
return
else:
print('Processing %s %s.'
% (route.route_short_name, route.route_long_name))
schedule = RouteSchedule(gtfs, route, services=service_periods)
write_out(
outdir/'routes'/('%s-%s.html' % (schedule.agency.agency_id, route.route_id)),
env.get_template('route.html').render(
gtfs=gtfs,
schedule=schedule,
Timetable=Timetable))
def timepoint_stops(trip):
return [stop_time.stop for stop_time in timepoint_stop_times(trip)]
def timepoint_stop_times(trip):
timepoint_flag = (
lambda stop_times: all(st.timepoint is not None for st in stop_times),
lambda stop_time: stop_time.timepoint == 1
)
human_times = (
lambda stop_times: any(st.departure_time[-2:] != '00' for st in stop_times),
lambda stop_time: ((stop_time.arrival_time is not None
and stop_time.arrival_time[-2:] == '00')
or (stop_time.departure_time is not None
and stop_time.departure_time[-2:] == '00'))
)
strategies = [timepoint_flag, human_times]
# Assume GetStopTimes() returns stops in order (see trip.GetPattern).
stop_times = trip.GetStopTimes()
for precondition, classifier in strategies:
if precondition(stop_times):
timepoints = [st for st in stop_times if classifier(st)]
if len(timepoints) >= 2:
return timepoints
return stop_times
def unite(*sequences):
# Create a directed graph where each node is an item in a sequence.
graph = networkx.DiGraph()
# Add edges to represent relative orderings at common items.
all_items = reduce(lambda s1, s2: s1.union(s2),
(set(sequence) for sequence in sequences))
node_item = {n: item for n, item in enumerate(all_items)}
for node in node_item.keys():
graph.add_node(node)
next_node = len(all_items)
for sequence in sequences:
items = iter(sequence)
try:
first_item = next(items)
except StopIteration:
break
last_node = min(node for node, item in node_item.iteritems()
if item == first_item)
for this_item in items:
try:
new_node = min(node for node, item in node_item.iteritems()
if item == this_item
and not networkx.has_path(graph, node, last_node))
except ValueError: # No node found.
new_node = next_node
node_item[next_node] = this_item
next_node += 1
if not graph.has_edge(last_node, new_node):
graph.add_edge(last_node, new_node)
last_node = new_node
# Sort the graph, return the sorted list.
topo_sort = networkx.topological_sort(graph)
return [node_item[node] for node in topo_sort]
def write_out(path, contents):
path.parent.mkdir(exist_ok=True)
with path.resolve().open('wt') as fd:
fd.write(contents)
| StarcoderdataPython |
4807994 | import inspect
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Tuple
from openff.toolkit.typing.engines.smirnoff import (
ForceField,
ParameterAttribute,
ParameterHandler,
)
from openff.toolkit.utils import all_subclasses
from openff.units import unit
from openff.units.openmm import from_openmm
from interchange_regression_utilities.models import Perturbation
from interchange_regression_utilities.utilities import use_openff_units
IGNORED_HANDLERS = [
# Abstract classes.
"ParameterHandler",
"_NonbondedHandler",
]
IGNORED_ATTRIBUTES = [
# No effect on OpenMM system.
"version",
"smirks",
"id",
"name",
"parent_id",
]
PerturbationFunc = Callable[[str, Any], Tuple[Any, bool]]
def get_parameter_attributes(parent_class) -> Dict[str, List[str]]:
attributes = dict(
(name, descriptor)
for c in reversed(inspect.getmro(parent_class))
for name, descriptor in c.__dict__.items()
if isinstance(descriptor, ParameterAttribute)
)
attributes_by_type = defaultdict(list)
for name, attribute in attributes.items():
if name in IGNORED_ATTRIBUTES:
continue
attributes_by_type[attribute.__class__.__name__].append(name)
return attributes_by_type
def get_all_attributes():
built_in_handler_classes = all_subclasses(ParameterHandler)
attributes_by_type = defaultdict(list)
for handler_class in built_in_handler_classes:
handler_name = handler_class.__name__
if handler_name in IGNORED_HANDLERS:
continue
for attribute_type, attributes in get_parameter_attributes(
handler_class
).items():
attributes_by_type[attribute_type].extend(
(handler_name, name) for name in attributes
)
parameter_class = handler_class._INFOTYPE
if parameter_class is None:
continue
for attribute_type, attributes in get_parameter_attributes(
parameter_class
).items():
attributes_by_type[attribute_type].extend(
(handler_name, handler_class._TAGNAME, name) for name in attributes
)
return {**attributes_by_type}
def default_perturbation(path: str, old_value: Any) -> Tuple[Any, bool]:
if path == "ConstraintHandler/Constraints/distance" and old_value is None:
new_value = 0.1234 * unit.angstrom
elif isinstance(old_value, str) or old_value is None:
return None, False
else:
value_multiplier = (
old_value.units if isinstance(old_value, unit.Quantity) else 1.0
)
new_value = old_value + 1.0 * value_multiplier
return new_value, True
def enumerate_perturbations(
force_field: ForceField, perturbation_func: Optional[PerturbationFunc] = None
) -> Tuple[List[Perturbation], List[str]]:
handlers_by_type = {
handler.__class__.__name__: handler
for handler in force_field._parameter_handlers.values()
}
attributes_by_type = get_all_attributes()
perturbations = []
warning_messages = []
for attribute_type, attributes in attributes_by_type.items():
for attribute_path_split in attributes:
handler_type, *parameter_types, attribute_name = attribute_path_split
attribute_path = "/".join([handler_type, *parameter_types, attribute_name])
if attribute_type not in {
"ParameterAttribute",
"IndexedParameterAttribute",
}:
warning_messages.append(
f"skipping {attribute_path} - unsupported attribute type of "
f"{attribute_type}"
)
continue
handler = handlers_by_type.get(handler_type, None)
if handler is None:
warning_messages.append(
f"skipping {attribute_path} - {handler_type} not in force field"
)
continue
attribute_parent = (
handler if len(attribute_path_split) == 2 else handler.parameters[0]
)
default_to_none = False
if attribute_type == "IndexedParameterAttribute":
default_to_none = getattr(attribute_parent, attribute_name) is None
attribute_name = f"{attribute_name}1"
attribute_path = "/".join(
[handler_type, *parameter_types, attribute_name]
)
if default_to_none:
# Indexed attributes whose values are currently not set
old_value = None
else:
old_value = getattr(attribute_parent, attribute_name)
if not use_openff_units():
from openmm import unit as openmm_unit
if isinstance(old_value, openmm_unit.Quantity):
old_value = from_openmm(old_value)
if perturbation_func is None:
new_value, successful = default_perturbation(attribute_path, old_value)
if not successful:
warning_messages.append(
f"skipping {attribute_path} - can only perturb numeric values "
f"using the default perturbation function"
)
continue
else:
new_value, successful = perturbation_func(attribute_path, old_value)
if not successful:
warning_messages.append(
f"skipping {attribute_path} - could not perturb with custom "
f"function"
)
continue
if isinstance(new_value, unit.Quantity):
expected_unit = f"{new_value.units:D}"
new_value = new_value.m_as(new_value.units)
else:
expected_unit = None
perturbations.append(
Perturbation(
path=attribute_path,
new_value=new_value,
new_units=expected_unit,
)
)
return perturbations, warning_messages
| StarcoderdataPython |
1799379 | from enum import Enum
import copy
from abc import ABC, abstractmethod
import numbers
from itertools import count
import numpy as np
import scipy
class Type(Enum):
Continuous = 'c'
Discrete = 'o'
class DuplicateHyperparameterError(Exception):
pass
class MissingHyperparameterError(Exception):
pass
class Configuration:
def __init__(self, hyperparameters):
idxs = np.argsort([x._init_idx for x in hyperparameters])
hyperparameters = np.array(hyperparameters)[idxs]
self.hyperparameters = []
self.hyperparameter_map = {}
self.max_length = 0
self.kde_vartypes = ''
names = set()
for hyperparameter in hyperparameters:
names.add(hyperparameter.name)
length = len(hyperparameter.name)
if length > self.max_length:
self.max_length = length
if hyperparameter.cond is not None:
if not hyperparameter.cond.compare(self):
continue
if hyperparameter.name in self.hyperparameter_map:
raise DuplicateHyperparameterError(
f'Conflicting Hyperparameter: {hyperparameter.name}')
self.hyperparameter_map[hyperparameter.name] = hyperparameter
self.hyperparameters.append(hyperparameter)
self.kde_vartypes += hyperparameter.vartype
missing = names - set(self.hyperparameter_map)
if len(missing):
raise MissingHyperparameterError(
f'Parameters: {missing} are missing. '
'Implement the default case if using conditions.\n'
'E.g.\nparameter = UniformHyperparameter("paramater", 0, 10, a == b)\n'
'not_parameter = UniformHyperparameter("paramater", 0, 0, '
'~parameter.cond)')
def to_dict(self):
config = {}
for hyperparameter in self.hyperparameters:
if not hyperparameter.dont_pass:
config[hyperparameter.name] = hyperparameter.value
return config
def to_list(self):
array = []
for hyperparameter in self.hyperparameters:
if hyperparameter.type == Type.Continuous:
array.append(hyperparameter.value)
elif hyperparameter.type == Type.Discrete:
array.append(hyperparameter.index)
else:
raise NotImplementedError
return array
def __getitem__(self, idx):
return self.hyperparameters[idx]
def __str__(self):
string = ["Configuration:\n"]
for hyperparameter in self.hyperparameters:
string.append(
(f'{"Name:":>8} {hyperparameter.name: <{self.max_length}} | '
f"Value: {hyperparameter.value}\n").ljust(10))
return ''.join(string)
class Hyperparameter(ABC):
_init_count = count()
def __init__(self, name, value, cond=None, dont_pass=False):
self._value = None
self.name = name
self.value = value
self.cond = cond
self._init_idx = next(Hyperparameter._init_count)
self.dont_pass = dont_pass
def new(self, value=None):
new_hyperparameter = copy.deepcopy(self)
if value is not None:
new_hyperparameter.value = value
return new_hyperparameter
@abstractmethod
def sample(self):
...
@property
def type(self):
return self._type
@type.setter
def type(self, type):
self.vartype = type.value
self._type = type
def __eq__(self, other):
if isinstance(other, Hyperparameter):
return Condition(
lambda configs: (configs[self.name].value == other.value))
else:
return Condition(
lambda configs: (configs[self.name].value == other))
def __lt__(self, other):
if isinstance(other, numbers.Number):
return Condition(
lambda configs: (configs[self.name].value < other))
elif isinstance(other, Hyperparameter):
return Condition(
lambda configs: (configs[self.name].value < other.value))
else:
raise NotImplementedError
def __le__(self, other):
if isinstance(other, numbers.Number):
return Condition(
lambda configs: (configs[self.name].value <= other))
elif isinstance(other, Hyperparameter):
return Condition(
lambda configs: (configs[self.name].value <= other.value))
else:
raise NotImplementedError
def __ne__(self, other):
if isinstance(other, Hyperparameter):
return Condition(
lambda configs: (configs[self.name].value != other.value))
else:
return Condition(
lambda configs: (configs[self.name].value != other))
def __gt__(self, other):
if isinstance(other, numbers.Number):
return Condition(
lambda configs: (configs[self.name].value > other))
elif isinstance(other, Hyperparameter):
return Condition(
lambda configs: (configs[self.name].value > other.value))
else:
raise NotImplementedError
def __ge__(self, other):
if isinstance(other, numbers.Number):
return Condition(
lambda configs: (configs[self.name].value >= other))
elif isinstance(other, Hyperparameter):
return Condition(
lambda configs: (configs[self.name].value >= other.value))
else:
raise NotImplementedError
class ConfigurationSpace:
def __init__(self, hyperparameters, seed=None):
self.hyperparameters = hyperparameters
self.rng = np.random.default_rng(seed)
discrete_map = {}
for hyperparameter in self.hyperparameters:
if hyperparameter.type == Type.Discrete:
if hyperparameter.name in discrete_map:
m = list(np.unique(discrete_map[hyperparameter.name]._choices +
hyperparameter.choices))
discrete_map[hyperparameter.name]._choices = m
hyperparameter._choices = m
else:
discrete_map[hyperparameter.name] = hyperparameter
def sample_configuration(self):
hyperparameters = []
for hyperparameter in self.hyperparameters:
hyperparameters.append(hyperparameter.sample(self.rng))
return Configuration(hyperparameters)
def __len__(self):
return len(self.hyperparameters)
class Condition:
def __init__(self, comp):
self.comp = comp
def compare(self, configuration):
return self.comp(configuration.hyperparameter_map)
def __and__(self, other):
return Condition(lambda configs: self.comp(configs) and other.comp(configs))
def __or__(self, other):
return Condition(lambda configs: self.comp(configs) or other.comp(configs))
def __invert__(self):
return Condition(lambda configs: not self.comp(configs))
class UniformHyperparameter(Hyperparameter):
def __init__(self, name, lower, upper, cond=None, log=False, dont_pass=False):
self.type = Type.Continuous
self._lower = lower
self._upper = upper
self.lower = np.log(lower) if log else lower
self.upper = np.log(upper) if log else upper
self.log = log
value = (self.lower + self.upper) / 2
super().__init__(name, np.exp(value) if log else value, cond, dont_pass)
def sample(self, rng):
value = rng.uniform(self.lower, self.upper)
return self.new(np.exp(value) if self.log else value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = min(max(self._lower, value), self._upper)
class IntegerUniformHyperparameter(UniformHyperparameter):
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = int(round(min(max(self._lower, value), self._upper)))
class NormalHyperparameter(Hyperparameter):
def __init__(self, name, mean, sigma, cond=None, dont_pass=False):
self.type = Type.Continuous
self.mean = mean
self.sigma = sigma
super().__init__(name, self.mean, cond, dont_pass)
def sample(self, rng):
return self.new(rng.normal(self.mean, self.sigma))
class IntegerNormalHyperparameter(NormalHyperparameter):
def __init__(self, name, mean, sigma, cond=None, dont_pass=False):
self.rv = scipy.stats.truncnorm(a=-sigma, b=sigma, scale=sigma, loc=mean)
super().__init__(name, mean, sigma, cond, dont_pass)
def sample(self, rng):
return self.new(self.rv.rvs(random_state=rng))
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = int(round(value))
class CategoricalHyperparameter(Hyperparameter):
def __init__(self, name, choices, cond=None, dont_pass=False):
self.type = Type.Discrete
self.index = 0
self.choices = choices
self._choices = choices
super().__init__(name, self.index, cond, dont_pass)
def sample(self, rng):
index = rng.integers(0, len(self.choices))
if len(self._choices) == len(self.choices):
_index = index
else:
_index = self._choices.index(self.choices[index])
return self.new(_index)
@property
def value(self):
return self._value
@value.setter
def value(self, index):
self.index = index
self._value = self._choices[index]
| StarcoderdataPython |
3337108 | <filename>ex022/ex022.py
print('-=-=- Analisando de Textos -=-=-')
nome = str(input('Digite seu nome aqui: ')).strip()
print(f'Seu nome em letra maiúscula é {nome.upper()}')
print(f'Seu nome em letras minúsculas é {nome.lower()}')
print('Seu nome tem ao todo {} letras'.format(len(nome) - nome.count(' ')))
#print(f'Seu primeiro nome é {} e ele tem [{}] letras') | StarcoderdataPython |
1615161 | import pytest
from django.urls import reverse
@pytest.mark.django_db(transaction=True, reset_sequences=True)
def test_healthcheck(app):
url = reverse("healthcheck")
response = app.get(url).json
assert response["detail"] == "Everything works"
@pytest.mark.django_db(transaction=True, reset_sequences=True)
def test_healthcheck_error(app, mock_connection_error):
url = reverse("healthcheck")
with pytest.raises(Exception):
app.get(url)
| StarcoderdataPython |
127433 | """Example demonstrating a basic usage of choke package."""
from time import sleep
from redis import StrictRedis
from choke import RedisChokeManager, CallLimitExceededError
REDIS = StrictRedis() # Tweak this to reflect your setup
CHOKE_MANAGER = RedisChokeManager(redis=REDIS)
# Example configuration: enforce limit of no more than 10 calls in two seconds window
@CHOKE_MANAGER.choke(limit=10, window_length=2)
def foo(x, y):
"""Just print something to show that foo was called."""
print(f'foo called with ({x}, {y})')
if __name__ == '__main__':
# We expect pattern of 10 successes followed by 10 failures followed again by 10 successes
# Some deviations from this pattern may obviously occur as calling foo takes nonzero time
for i in range(30):
try:
foo(i, y=i ** 2)
except CallLimitExceededError:
print('Foo not called. Limit exceeded!')
sleep(0.1)
| StarcoderdataPython |
3272602 | import logging
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, ctx):
logger.info("Handling as %s", ctx.function_name)
logger.info("env %s", os.environ)
return {
"statusCode": 200,
"body": "hello python"
} | StarcoderdataPython |
1773203 | <gh_stars>0
def superDigit(n, k):
if len(n) == 1:
return int(n)
cur_sum = 0
for d in n:
cur_sum += int(d)
cur_sum *= k
return superDigit(str(cur_sum), 1)
# Test cases:
print(superDigit('123', 3))
print(superDigit('148', 3))
print(superDigit('9875', 4))
| StarcoderdataPython |
3344449 | <reponame>jadarve/lluvia<filename>lluvia/bazel/node/macros.bzl
"""
"""
load("@rules_vulkan//glsl:defs.bzl", "glsl_shader")
load("@rules_pkg//:pkg.bzl", "pkg_zip")
load("@rules_pkg//experimental:pkg_filegroup.bzl", "pkg_filegroup")
def ll_node(
name,
shader,
builder,
deps = None,
visibility = None):
"""
Declares a new node
Args:
name:
shader:
builder:
deps:
visibility:
"""
shader_name = name + "_shader"
glsl_shader(
name = shader_name,
shader = shader,
deps = deps,
visibility = visibility,
)
pkg_filegroup(
name = name,
srcs = [
shader_name,
builder,
],
visibility = visibility,
)
native.filegroup(
name = name + "_runfiles",
srcs = [
shader_name,
builder,
],
visibility = visibility,
)
def ll_node_library(
name,
nodes = [],
strip_prefix = "",
visibility = None):
"""
Declares a node library
Args:
name: name of the library
nodes: list of ll_node targets
strip_prefix:
visibility: library visibility
"""
pkg_zip(
name = name,
strip_prefix = strip_prefix,
srcs = nodes,
visibility = visibility,
)
| StarcoderdataPython |
142731 | import os
from typing import List
from asm_utils import hex_to_bin
def read_obj(obj_file: str) -> List[str]:
"""Reads object file and returns list of instructions
Parameters
----------
obj_file : str
path to object file
Returns
-------
List[str]
list of instructions in the form of binary arrays
Raises
------
FileNotFoundError
raises FileNotFoundError if object file does not exist at supplied path
"""
# ensure that file exists
if not os.path.isfile(obj_file):
raise FileNotFoundError(f"No object file found at {obj_file}, ensure that your path is correct")
# load hex instructions into memory
with open(obj_file, "r") as obj_input:
hex_instructions = obj_input.readlines()
# convert hex instructions into binary and return result
return [
hex_to_bin(hex_instruction[:-1])
for hex_instruction in hex_instructions
]
| StarcoderdataPython |
61558 | <reponame>twerkmeister/table-segmenter
import argparse
from typing import Text
import os
import table_segmenter.model
import table_segmenter.io
import table_segmenter.preprocessing
import table_segmenter.metrics
import tensorflow
from tensorflow import keras
def load_data_for_training(data_path: Text):
"""Convenience method."""
image_names, images = table_segmenter.io.load_images(data_path)
targets = table_segmenter.io.load_targets(data_path, image_names)
original_image_shapes = [image.shape for image in images]
x = table_segmenter.preprocessing.preprocess_images(images)
x_augmented, augmented_targets = \
table_segmenter.preprocessing.augment_multi(x, targets, original_image_shapes)
y = table_segmenter.preprocessing.preprocess_targets(augmented_targets)
return x_augmented, y
def train(train_data_path: Text, val_data_path: Text, experiment_dir: Text):
tensorflow.compat.v1.disable_eager_execution()
# tensorflow.config.run_functions_eagerly(True)
os.makedirs(experiment_dir, exist_ok=True)
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=experiment_dir)
early_stopping_callback = keras.callbacks.EarlyStopping("val_loss", patience=7,
verbose=1,
restore_best_weights=True)
print("Loading training data")
x_train, y_train = load_data_for_training(train_data_path)
print("Loading validation data")
x_val, y_val = load_data_for_training(val_data_path)
model = table_segmenter.model.build()
model.compile(loss=table_segmenter.metrics.combined_loss,
optimizer='adam',
# run_eagerly=True,
metrics=[table_segmenter.metrics.regression_mean_absolute_error,
table_segmenter.metrics.decision_accuracy,
table_segmenter.metrics.regression_mean_error,
table_segmenter.metrics.regression_error_stddev])
model.fit(x_train,
y_train,
validation_data=(x_val, y_val),
epochs=80,
batch_size=16,
verbose=True,
callbacks=[tensorboard_callback, early_stopping_callback])
model.save(experiment_dir)
# MAE evaluation
score = model.evaluate(x_val,
y_val,
batch_size=16,
verbose=True)
print("nTest MAE: %.1f%%" % (score[1]))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='train the table segmenter.')
parser.add_argument("train_data_path",
help='Path to the training data folder.')
parser.add_argument("val_data_path",
help='Path to the validation data folder.')
parser.add_argument("experiment_folder",
help='Path to the output folder for the model and logs.')
args = parser.parse_args()
train(args.train_data_path, args.val_data_path, args.experiment_folder)
| StarcoderdataPython |
7905 | from flask import Flask, render_template, request, redirect, url_for
from os.path import join
from stego import Steganography
app = Flask(__name__)
UPLOAD_FOLDER = 'static/files/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
@app.route("/")
def home():
return render_template('home.html')
@app.route("/encrypt", methods=['GET', 'POST'])
def get_image():
if request.method == 'GET':
return render_template('encrypt.html')
# Check if the user has entered the secret message
if 'file' in request.files and 'Secret' in request.values:
uploaded_image = request.files['file']
message = request.values.get('Secret')
password = <PASSWORD>("key")
filepath = join(app.config['UPLOAD_FOLDER'], "cover_image.png")
uploaded_image.save(filepath)
im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password)
im.encode(message=message)
return render_template('encrypt.html', value=filepath, image_flag=True, secret_flag=True)
return redirect(url_for('encrypt'))
@app.route("/decrypt", methods=['GET', 'POST'])
def get_image_to_decrypt():
if request.method == 'GET':
return render_template('decrypt.html')
if 'key' in request.values:
password = request.values.get('key')
filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png")
im = Steganography(filepath=app.config['UPLOAD_FOLDER'], key=password)
message = im.decode()
return render_template('decrypt.html', value=filepath, message=message)
if 'file' in request.files:
uploaded_image = request.files['file']
filepath = join(app.config['UPLOAD_FOLDER'], "stego_image.png")
uploaded_image.save(filepath)
return render_template('decrypt.html', value=filepath)
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
1797763 | # -*- coding: utf-8 -*-
import socket
import time
from selectors import DefaultSelector, EVENT_WRITE, EVENT_READ
def get(path):
s = socket.socket()
s.connect(('localhost', 5000))
request = 'GET {} HTTP1.0\r\n\r\n'.format(path)
s.send(request.encode())
chunks = []
while True:
chunk = s.recv(1000)
if chunk:
chunks.append(chunk)
else:
body = b''.join(chunks).decode()
print(body.split('\n')[0])
return
get('foo')
get('/bar')
selector = DefaultSelector()
def get(path):
s = socket.socket()
s.setblocking(False)
try:
s.connect(('localhost', 5000))
except BlockingIOError:
pass
request = 'GET {} HTTP1.0\r\n\r\n'.format(path)
selector.register(s.fileno(), EVENT_WRITE)
selector.select()
selector.unregister(s.fileno())
s.send(request.encode())
chunks = []
while True:
selector.register(s.fileno(), EVENT_READ)
selector.select()
selector.unregister(s.fileno())
chunk = s.recv(1000)
if chunk:
chunks.append(chunk)
else:
body = b''.join(chunks).decode()
print(body.split('\n')[0])
return
| StarcoderdataPython |
1723482 | import numpy
from matchms import Fragments, Spectrum
from matchms.filtering import normalize_intensities
def _create_test_spectrum():
intensities = numpy.array([1, 1, 5, 5, 5, 5, 7, 7, 7, 9, 9], dtype="float")
return _create_test_spectrum_with_intensities(intensities)
def _create_test_spectrum_with_intensities(intensities):
mz = numpy.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110], dtype="float")
return Spectrum(mz=mz, intensities=intensities)
def test_peak_comments_after_filter():
spectrum_in: Spectrum = _create_test_spectrum()
spectrum_in.set("peak_comments", {10: "blub"})
spectrum = normalize_intensities(spectrum_in)
assert spectrum.get("peak_comments")[10] == "blub"
def test_reiterating_peak_comments():
mz = numpy.array([100.0003, 100.0004, 100.0005, 110., 200., 300., 400.0176], dtype='float')
intensities = numpy.array([1, 2, 3, 4, 5, 6, 7], dtype='float')
peak_comments = ["m/z 100.0003", None, "m/z 100.0005", "m/z 110.", "m/z 200.", "m/z 300.", "m/z 400.0176"]
peak_comments = {mz[i]: peak_comments[i] for i in range(len(mz))}
spectrum = Spectrum(mz=mz, intensities=intensities,
metadata={"peak_comments": peak_comments})
spectrum.peaks = Fragments(mz=numpy.array([100.0004, 110., 400.018], dtype='float'),
intensities=numpy.array([5, 4, 7], dtype='float'))
assert spectrum.peak_comments == {100.0004: "m/z 100.0003; m/z 100.0005", 110.: "m/z 110.", 400.018: "m/z 400.0176"}
| StarcoderdataPython |
3281305 | <gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Email
class LoginForm(FlaskForm):
email = StringField('Email', validators=[InputRequired(), Email()])
password = PasswordField('Password', validators=[InputRequired()])
submit = SubmitField('Login') | StarcoderdataPython |
4820098 | from planner import *
from othertools import *
import matplotlib.pyplot as plt
def main():
score_2t = readfile('rq1_TimeLIME.csv')
score_2f = readfile('rq1_LIME.csv')
scores2_x = readfile('rq1_XTREE.csv')
scores2_alve = readfile('rq1_Alves.csv')
scores2_shat = readfile('rq1_Shat.csv')
scores2_oliv = readfile('rq1_Oliv.csv')
score2_rw = readfile('rq1_Random.csv')
plt.subplots(figsize=(7, 7))
plt.rcParams.update({'font.size': 16})
# ind=np.arange(10)
N = len(scores2_x)
width = 0.25
dummy1, dummy2, dummy3, dummy4, dummy5, dummy6, dummy7 = [], [], [], [], [], [], []
for i in range(0, len(scores2_x)):
dummy1.append(np.round(1 - np.mean(score_2t[i]), 3) * 20)
dummy2.append(np.round(1 - np.mean(score_2f[i]), 3) * 20)
dummy3.append(np.round(1 - np.mean(scores2_x[i]), 3) * 20)
dummy4.append(np.round(1 - np.mean(scores2_alve[i]), 3) * 20)
dummy5.append(np.round(1 - np.mean(scores2_shat[i]), 3) * 20)
dummy6.append(np.round(1 - np.mean(scores2_oliv[i]), 3) * 20)
dummy7.append(np.round(1 - np.mean(score2_rw[i]), 3) * 20)
plt.scatter(np.arange(N), dummy2, label='Classical LIME', s=100, marker='o')
plt.scatter(np.arange(N), dummy3, label='XTREE', s=100, marker='o')
plt.scatter(np.arange(N), dummy4, label='Alves', s=100, marker='o')
plt.scatter(np.arange(N), dummy5, label='Shatnawi', s=100, marker='o')
plt.scatter(np.arange(N), dummy6, label='Oliveira', s=100, marker='o')
plt.scatter(np.arange(N), dummy7, label='RandomWalk', s=100, marker='v')
plt.plot(np.arange(N), dummy1, label='TimeLIME', marker='^', markersize=10, color='#22406D')
# plt.ylim(-11,130)
plt.xticks(np.arange(N), ['jedit', 'camel1', 'camel2', 'log4j', 'xalan', 'ant', 'velocity', 'poi', 'synapse'])
plt.yticks([0, 2, 4, 6, 8, 10, 12])
plt.subplots_adjust(bottom=0.2, left=0, right=1.1)
plt.grid(axis='y')
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.1), fancybox=True, shadow=True, ncol=3)
plt.savefig("rq1", dpi=200, bbox_inches='tight')
plt.show()
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
1637920 | """Support for Stateless Exterior Heating device."""
import logging
from pyoverkiz.enums import OverkizCommand
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import HVAC_MODE_HEAT, HVAC_MODE_OFF
from homeassistant.const import TEMP_CELSIUS
from ..entity import OverkizEntity
_LOGGER = logging.getLogger(__name__)
class StatelessExteriorHeating(OverkizEntity, ClimateEntity):
"""Representation of TaHoma Stateless Exterior Heating device."""
_attr_hvac_mode = None
_attr_hvac_modes = [HVAC_MODE_OFF, HVAC_MODE_HEAT]
_attr_preset_mode = None
_attr_temperature_unit = TEMP_CELSIUS # Not used but climate devices need a recognized temperature unit...
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode == HVAC_MODE_HEAT:
await self.executor.async_execute_command(OverkizCommand.ON)
else:
await self.executor.async_execute_command(OverkizCommand.OFF)
| StarcoderdataPython |
1637530 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import models, _
class StockPicking(models.Model):
_inherit = "stock.picking"
def _pre_action_done_hook(self):
res = super()._pre_action_done_hook()
# We use the 'skip_expired' context key to avoid to make the check when
# user did already confirmed the wizard about expired lots.
if res is True and not self.env.context.get('skip_expired'):
pickings_to_warn_expired = self._check_expired_lots()
if pickings_to_warn_expired:
return pickings_to_warn_expired._action_generate_expired_wizard()
return res
def _check_expired_lots(self):
expired_pickings = self.move_line_ids.filtered(lambda ml: ml.lot_id.product_expiry_alert).picking_id
return expired_pickings
def _action_generate_expired_wizard(self):
expired_lot_ids = self.move_line_ids.filtered(lambda ml: ml.lot_id.product_expiry_alert).lot_id.ids
context = dict(self.env.context)
context.update({
'default_picking_ids': [(6, 0, self.ids)],
'default_lot_ids': [(6, 0, expired_lot_ids)],
})
return {
'name': _('Confirmation'),
'type': 'ir.actions.act_window',
'res_model': 'expiry.picking.confirmation',
'view_mode': 'form',
'target': 'new',
'context': context,
}
| StarcoderdataPython |
163222 | <filename>django-rgd-imagery/rgd_imagery/rest/download.py
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import api_view
from rgd.permissions import check_read_perm
from rgd_imagery import models
@swagger_auto_schema(
method='GET',
operation_summary='Download the associated Image data for this Image directly from S3.',
)
@api_view(['GET'])
def download_image_file(request, pk):
instance = get_object_or_404(models.Image, pk=pk)
check_read_perm(request.user, instance)
url = instance.file.get_url()
return HttpResponseRedirect(url)
| StarcoderdataPython |
3320575 | from .css_format import CSSFormatter
| StarcoderdataPython |
3308708 | <gh_stars>0
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from requests import get
import helper
import data
class Details(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.get(data.Register.registerURL)
def tearDown(self):
self.addCleanup(self.browser.quit)
def testPageStatusCode(self):
request = get(data.Register.registerURL)
self.assertEqual(request.status_code, 200)
def testPageLoaded(self):
response = self.browser.current_url
self.assertTrue(response == data.Register.registerURL)
def testUsernameFieldExists(self):
elem = self.browser.find_element_by_xpath(data.Register.usernameField)
self.assertTrue(elem)
def testEmailFieldExists(self):
elem = self.browser.find_element_by_xpath(data.Register.emailField)
self.assertTrue(elem)
def testPasswordFieldExists(self):
elem = self.browser.find_element_by_xpath(data.Register.passwordField)
self.assertTrue(elem)
def testSubmitButtonExists(self):
elem = self.browser.find_element_by_xpath(data.Register.submitBtn)
self.assertTrue(elem)
class DetailsNoSetUp(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Chrome()
def tearDown(self):
self.addCleanup(self.browser.quit)
def testLoadPageWhileLoggedIn(self):
self.browser.get(data.Register.loginURL)
helper.login(self)
self.browser.get(data.Register.registerURL)
response = self.browser.current_url
self.assertFalse(response == data.Register.registerURL)
if __name__ == '__main__':
unittest.main(verbosity=2)
| StarcoderdataPython |
89572 | # Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""contains classes that allow to configure code generation for free\\member functions, operators and etc."""
import os
from . import user_text
from . import algorithm
from . import decl_wrapper
from pyplusplus import messages
from pygccxml import declarations
from pyplusplus import function_transformers as ft
class calldef_t(decl_wrapper.decl_wrapper_t):
"""base class, for code generator configuration, for function declaration classes."""
BOOST_PYTHON_MAX_ARITY = 10
"""Boost.Python configuration macro value.
A function has more than BOOST_PYTHON_MAX_ARITY arguments, will not compile.
You should adjust BOOST_PYTHON_MAX_ARITY macro.
For more information see: http://mail.python.org/pipermail/c++-sig/2002-June/001554.html
"""
def __init__(self, *arguments, **keywords):
decl_wrapper.decl_wrapper_t.__init__( self, *arguments, **keywords )
self._call_policies = None
self._use_keywords = True
self._use_default_arguments = True
self._create_with_signature = None
self._overridable = None
self._non_overridable_reason = None
self._transformations = None
def get_call_policies(self):
return self._call_policies
def set_call_policies(self, call_policies):
self._call_policies = call_policies
call_policies = property( get_call_policies, set_call_policies
, doc="reference to :class:`decl_wrappers.call_policy_t` class." \
+"Default value is calculated at runtime, based on return value.")
def _get_use_keywords(self):
return self._use_keywords and bool( self.arguments )
def _set_use_keywords(self, use_keywords):
self._use_keywords = use_keywords
use_keywords = property( _get_use_keywords, _set_use_keywords
, doc="boolean, if True, allows to call function from Python using keyword arguments." \
+"Default value is True.")
def _get_create_with_signature(self):
if None is self._create_with_signature:
self._create_with_signature = bool( self.overloads )
if not self._create_with_signature and declarations.templates.is_instantiation( self.name ):
self._create_with_signature = True
if not self._create_with_signature and isinstance( self.parent, declarations.class_t ):
for hi in self.parent.recursive_bases:
if hi.access_type == 'private':
continue
funcs = hi.related_class.calldefs( self.name, recursive=False, allow_empty=True )
for f in funcs:
if f.argument_types != self.argument_types:
self._create_with_signature = True
break
if self._create_with_signature:
break
if not self._create_with_signature:
self._create_with_signature \
= bool( self.parent.calldefs( self.name, recursive=False, allow_empty=True ) )
return self._create_with_signature
def _set_create_with_signature(self, create_with_signature):
self._create_with_signature = create_with_signature
create_with_signature = property( _get_create_with_signature, _set_create_with_signature
, doc="boolean, if True `Py++` will generate next code: def( ..., function type( function ref )"\
+"Thus, the generated code is safe, when a user creates function overloading." \
+"Default value is computed, based on information from the declarations tree" )
def _get_use_default_arguments(self):
return self._use_default_arguments
def _set_use_default_arguments(self, use_default_arguments):
self._use_default_arguments = use_default_arguments
use_default_arguments = property( _get_use_default_arguments, _set_use_default_arguments
, doc="boolean, if True `Py++` will generate code that will set default arguments" \
+"Default value is True.")
def has_wrapper( self ):
"""returns True, if function - wrapper is needed
The functionality by this function is incomplete. So please don't
use it in your code.
"""
if not isinstance( self, declarations.member_calldef_t ):
return False
elif self.virtuality == declarations.VIRTUALITY_TYPES.PURE_VIRTUAL:
return True
elif self.access_type == declarations.ACCESS_TYPES.PROTECTED:
return True
else:
return False
def get_overridable( self ):
"""Check if the method can be overridden."""
if None is self._overridable:
if isinstance( self, declarations.member_calldef_t ) \
and self.virtuality != declarations.VIRTUALITY_TYPES.NOT_VIRTUAL \
and declarations.is_reference( self.return_type ):
self._overridable = False
self._non_overridable_reason = messages.W1049
else:
self._overridable = True
self._non_overridable_reason = ""
return self._overridable
def set_overridable( self, overridable ):
self._overridable = overridable
overridable = property( get_overridable, set_overridable
, doc = get_overridable.__doc__ )
@property
def non_overridable_reason( self ):
"""returns the reason the function could not be overridden"""
return self._non_overridable_reason
def mark_as_non_overridable( self, reason ):
"""
mark this function as final - user will not be able to override it from Python
Not all functions could be overridden from Python, for example virtual function
that returns non const reference to a member variable. `Py++` allows you to
mark these functions and provide and explanation to the user.
"""
self.overridable = False
self._non_overridable_reason = messages.W0000 % reason
@property
def transformations(self):
"""return list of function transformations that should be applied on the function"""
if None is self._transformations:
#TODO: for trivial cases get_size( int&, int& ) `Py++` should guess
#function transformers
self._transformations = []
return self._transformations
def add_transformation(self, *transformer_creators, **keywd):
"""add new function transformation.
transformer_creators - list of transformer creators, which should be applied on the function
keywd - keyword arguments for :class:`function_transformers.function_transformation_t` class initialization
"""
self.transformations.append( ft.function_transformation_t( self, transformer_creators, **keywd ) )
def _exportable_impl_derived( self ):
return ''
def _exportable_impl( self ):
if self.transformations:
#It is possible that the function asked for the user attention.
#The user paid attention and created a transformation.
#Py++ should be silent in this case.
return ''
if not self.parent.name:
return messages.W1057 % str( self )
all_types = [ arg.type for arg in self.arguments ]
all_types.append( self.return_type )
for some_type in all_types:
if isinstance( some_type, declarations.ellipsis_t ):
return messages.W1053 % str( self )
units = declarations.decompose_type( some_type )
ptr2functions = [unit for unit in units if isinstance( unit, declarations.calldef_type_t )]
if ptr2functions:
return messages.W1004
#Function that take as agrument some instance of non public class
#will not be exported. Same to the return variable
if isinstance( units[-1], declarations.declarated_t ):
dtype = units[-1]
if isinstance( dtype.declaration.parent, declarations.class_t ):
if dtype.declaration not in dtype.declaration.parent.public_members:
return messages.W1005
no_ref = declarations.remove_reference( some_type )
no_ptr = declarations.remove_pointer( no_ref )
no_const = declarations.remove_const( no_ptr )
if declarations.is_array( no_const ):
return messages.W1006
return self._exportable_impl_derived()
def _readme_impl( self ):
def is_double_ptr( type_ ):
#check for X**
if not declarations.is_pointer( type_ ):
return False
base = declarations.remove_pointer( type_ )
return declarations.is_pointer( base )
def suspicious_type( type_ ):
if not declarations.is_reference( type_ ):
return False
type_no_ref = declarations.remove_reference( type_ )
return not declarations.is_const( type_no_ref ) \
and ( declarations.is_fundamental( type_no_ref )
or declarations.is_enum( type_no_ref ) )
msgs = []
#TODO: functions that takes as argument pointer to pointer to smth, could not be exported
#see http://www.boost.org/libs/python/doc/v2/faq.html#funcptr
if len( self.arguments ) > calldef_t.BOOST_PYTHON_MAX_ARITY:
msgs.append( messages.W1007 % ( calldef_t.BOOST_PYTHON_MAX_ARITY, len( self.arguments ) ) )
if self.transformations:
#if user defined transformation, than I think it took care of the problems
ft = self.transformations[0]
if ft.alias == ft.unique_name:
msgs.append( messages.W1044 % ft.alias )
return msgs
if suspicious_type( self.return_type ) and None is self.call_policies:
msgs.append( messages.W1008 )
if ( declarations.is_pointer( self.return_type ) or is_double_ptr( self.return_type ) ) \
and None is self.call_policies:
msgs.append( messages.W1050 % str(self.return_type) )
for index, arg in enumerate( self.arguments ):
if suspicious_type( arg.type ):
msgs.append( messages.W1009 % ( arg.name, index ) )
if is_double_ptr( arg.type ):
msgs.append( messages.W1051 % ( arg.name, index, str(arg.type) ) )
if False == self.overridable:
msgs.append( self._non_overridable_reason)
problematics = algorithm.registration_order.select_problematics( self )
if problematics:
tmp = []
for f in problematics:
tmp.append( os.linesep + '\t' + str(f) )
msgs.append( messages.W1010 % os.linesep.join( tmp ) )
return msgs
class member_function_t( declarations.member_function_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the member function"""
def __init__(self, *arguments, **keywords):
declarations.member_function_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._use_overload_macro = False
self._override_precall_code = []
self._overide_native_precall_code = []
self._default_precall_code = []
self._adaptor = None
def _get_adaptor(self):
return self._adaptor
def _set_adaptor(self, adaptor):
self._adaptor = adaptor
adaptor = property( _get_adaptor, _set_adaptor
, doc="string, if contains value `Py++` will generate code the following code: " \
+".def(<name>, <adaptor>(<function reference>), <other args> ) " \
+". The property is relevant for public, non virtual member functions." )
def add_override_precall_code(self, code):
"""add code, which should be executed, before overridden member function call"""
self._override_precall_code.append( code )
@property
def override_precall_code(self):
"""code, which should be executed, before overrided member function call"""
return self._override_precall_code
def add_override_native_precall_code(self, code):
"""add code, which should be executed, before native member function call"""
self._overide_native_precall_code.append( code )
@property
def override_native_precall_code(self):
"""code, which should be executed, before overrided member function call"""
return self._overide_native_precall_code
def add_default_precall_code(self, code):
"""add code, which should be executed, before this member function call"""
self._default_precall_code.append( code )
@property
def default_precall_code(self):
"""code, which should be executed, before this member function call"""
return self._default_precall_code
def get_use_overload_macro(self):
return self._use_overload_macro
def set_use_overload_macro(self, use_macro):
self._use_overload_macro = use_macro
use_overload_macro = property( get_use_overload_macro, set_use_overload_macro
, doc="boolean, if True, will use BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS macro to expose declarations" \
+"Default value is False.")
def _exportable_impl_derived(self):
if self.access_type == declarations.ACCESS_TYPES.PRIVATE \
and self.virtuality == declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
return messages.W1011
return ''
def _readme_impl( self ):
msgs = super( member_function_t, self )._readme_impl()
if self.does_throw == False \
and self.virtuality != declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
msgs.append( messages.W1046 )
return msgs
class constructor_t( declarations.constructor_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the constructor"""
def __init__(self, *arguments, **keywords):
declarations.constructor_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._body = ''
self._allow_implicit_conversion = True
def _get_body(self):
return self._body
def _set_body(self, body):
self._body = body
body = property( _get_body, _set_body
, doc="string, class-wrapper constructor body" )
def _exportable_impl_derived( self ):
if self.is_artificial:
return messages.W1012
if self.access_type == declarations.ACCESS_TYPES.PRIVATE:
return messages.W1013
return ''
def does_define_implicit_conversion( self ):
""" returns true if the constructor can take part in implicit conversions.
For more information see:
* http://boost.org/libs/python/doc/v2/implicit.html#implicitly_convertible-spec
* http://msdn2.microsoft.com/en-us/library/h1y7x448.aspx
* http://msdn.microsoft.com/en-us/library/s2ff0fz8%28VS.100%29.aspx
"""
if self.parent.is_abstract: #user is not able to create an instance of the class
return False
if self.is_copy_constructor:
return False
if not( len( self.arguments) and len( self.required_args ) < 2 ):
return False
if self.parent.find_out_member_access_type( self ) != declarations.ACCESS_TYPES.PUBLIC:
return False
return True
def _get_allow_implicit_conversion(self):
return self._allow_implicit_conversion and self.does_define_implicit_conversion()
def _set_allow_implicit_conversion(self, allow_implicit_conversion):
self._allow_implicit_conversion = allow_implicit_conversion
allow_implicit_conversion = property( _get_allow_implicit_conversion, _set_allow_implicit_conversion
, doc="boolean, indicates whether `Py++` should generate implicitly_convertible code or not" \
"Default value is calculated from the constructor type." )
class destructor_t( declarations.destructor_t, calldef_t ):
"""you may ignore this class for he time being.
In future it will contain "body" property, that will allow to insert user
code to class-wrapper destructor.
"""
#TODO: add body property
def __init__(self, *arguments, **keywords):
declarations.destructor_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
class operators_helper:
"""helps `Py++` to deal with C++ operators"""
inplace = [ '+=', '-=', '*=', '/=', '%=', '>>=', '<<=', '&=', '^=', '|=' ]
comparison = [ '==', '!=', '<', '>', '<=', '>=' ]
non_member = [ '+', '-', '*', '/', '%', '&', '^', '|', ]
unary = [ '!', '~', '+', '-' ]
all = inplace + comparison + non_member + unary
@staticmethod
def is_supported( oper ):
"""returns True if Boost.Python support the operator"""
if oper.symbol == '*' and len( oper.arguments ) == 0:
#dereference does not make sense
return False
if oper.symbol != '<<':
return oper.symbol in operators_helper.all
args_len = len( oper.arguments )
if isinstance( oper, declarations.member_operator_t ):# and args_len != 1:
return False #Boost.Python does not support member operator<< :-(
if isinstance( oper, declarations.free_operator_t ) and args_len != 2:
return False
if not declarations.is_same( oper.return_type, oper.arguments[0].type ):
return False
type_ = oper.return_type
if not declarations.is_reference( type_ ):
return False
type_ = declarations.remove_reference( type_ )
if declarations.is_const( type_ ):
return False
if args_len == 2:
#second argument should has "T const &" type, otherwise the code will not compile
tmp = oper.arguments[1].type
if not declarations.is_reference( tmp ):
return False
tmp = declarations.remove_reference( tmp )
if not declarations.is_const( tmp ):
return False
return declarations.is_std_ostream( type_ ) or declarations.is_std_wostream( type_ )
@staticmethod
def exportable( oper ):
"""returns True if Boost.Python or `Py++` know how to export the operator"""
if isinstance( oper, declarations.member_operator_t ) and oper.symbol in ( '()', '[]', '=' ):
return ''
if not operators_helper.is_supported( oper ):
return messages.W1014 % oper.name
if isinstance( oper, declarations.free_operator_t ):
#`Py++` should find out whether the relevant class is exposed to Python
#and if not, than this operator should not be exposed too
included = [decl for decl in oper.class_types if decl.ignore == False]
if not included:
return messages.W1052 % str(oper)
return ''
@staticmethod
def target_class( oper ):
"""this functions returns reference to class/class declaration
in scope of which, the operator should be exposed."""
if isinstance( oper.parent, declarations.class_t ):
return oper.parent
#now we deal with free operators
def find_class( type_ ):
type_ = declarations.remove_reference( type_ )
if declarations.is_class( type_ ):
return declarations.class_traits.get_declaration( type_ )
elif declarations.is_class_declaration( type_ ):
return declarations.class_declaration_traits.get_declaration( type_ )
else:
return None
arg_1_class = find_class( oper.arguments[0].type )
arg_2_class = None
if 2 == len( oper.arguments ):
arg_2_class = find_class( oper.arguments[1].type )
if arg_1_class:
if declarations.is_std_ostream( arg_1_class ) or declarations.is_std_wostream( arg_1_class ):
#in most cases users doesn't expose std::ostream class
return arg_2_class
else:
return arg_1_class
else:
return arg_2_class
class member_operator_t( declarations.member_operator_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the member operator"""
def __init__(self, *arguments, **keywords):
declarations.member_operator_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._override_precall_code = []
self._default_precall_code = []
self._overide_native_precall_code = []
def add_override_precall_code(self, code):
self._override_precall_code.append( code )
@property
def override_precall_code(self):
return self._override_precall_code
def add_default_precall_code(self, code):
self._default_precall_code.append( code )
@property
def default_precall_code(self):
return self._default_precall_code
def add_override_native_precall_code(self, code):
"""add code, which should be executed, before native member function call"""
self._overide_native_precall_code.append( code )
@property
def override_native_precall_code(self):
"""code, which should be executed, before overrided member function call"""
return self._overide_native_precall_code
def _get_alias( self):
alias = super( member_operator_t, self )._get_alias()
if alias == self.name:
if self.symbol == '()':
alias = '__call__'
elif self.symbol == '[]':
alias = '__getitem__'
elif self.symbol == '=':
alias = 'assign'
else:
pass
return alias
alias = property( _get_alias, decl_wrapper.decl_wrapper_t._set_alias
, doc="Gives right alias for operator()( __call__ ) and operator[]( __getitem__ )" )
def _exportable_impl_derived( self ):
if self.access_type == declarations.ACCESS_TYPES.PRIVATE \
and self.virtuality == declarations.VIRTUALITY_TYPES.NOT_VIRTUAL:
return messages.W1015
return operators_helper.exportable( self )
@property
def target_class( self ):
return self.parent
class casting_operator_t( declarations.casting_operator_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the casting operator"""
def prepare_special_cases():
"""
Creates a map of special cases ( aliases ) for casting operator.
"""
special_cases = {}
const_t = declarations.const_t
pointer_t = declarations.pointer_t
for type_ in list(declarations.FUNDAMENTAL_TYPES.values()):
alias = None
if declarations.is_same( type_, declarations.bool_t() ):
alias = '__int__'
elif declarations.is_integral( type_ ):
if 'long' in type_.decl_string:
alias = '__long__'
else:
alias = '__int__'
elif declarations.is_floating_point( type_ ):
alias = '__float__'
else:
continue #void
if alias:
special_cases[ type_ ] = alias
special_cases[ const_t( type_ ) ] = alias
special_cases[ pointer_t( const_t( declarations.char_t() ) ) ] = '__str__'
std_string = '::std::basic_string<char,std::char_traits<char>,std::allocator<char> >'
std_wstring1 = '::std::basic_string<wchar_t,std::char_traits<wchar_t>,std::allocator<wchar_t> >'
std_wstring2 = '::std::basic_string<wchar_t, std::char_traits<wchar_t>, std::allocator<wchar_t> >'
special_cases[ std_string ] = '__str__'
special_cases[ std_wstring1 ] = '__str__'
special_cases[ std_wstring2 ] = '__str__'
special_cases[ '::std::string' ] = '__str__'
special_cases[ '::std::wstring' ] = '__str__'
#TODO: add
# std::complex<SomeType> some type should be converted to double
return special_cases
SPECIAL_CASES = prepare_special_cases()
#casting_member_operator_t.prepare_special_cases()
def __init__(self, *arguments, **keywords):
declarations.casting_operator_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
def _get_alias( self):
if not self._alias or self.name == super( casting_operator_t, self )._get_alias():
return_type = declarations.remove_alias( self.return_type )
decl_string = return_type.decl_string
for type_, alias in list(self.SPECIAL_CASES.items()):
if isinstance( type_, declarations.type_t ):
if declarations.is_same( return_type, type_ ):
self._alias = alias
break
else:
if decl_string == type_:
self._alias = alias
break
else:
self._alias = 'as_' + self._generate_valid_name(self.return_type.decl_string)
return self._alias
alias = property( _get_alias, decl_wrapper.decl_wrapper_t._set_alias
, doc="Gives right alias for casting operators: __int__, __long__, __str__." \
+"If there is no built-in type, creates as_xxx alias" )
def _exportable_impl_derived( self ):
if not declarations.is_fundamental( self.return_type ) and not self.has_const:
return messages.W1016
if self.access_type != declarations.ACCESS_TYPES.PUBLIC:
return messages.W1017
return ''
class free_function_t( declarations.free_function_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the free function"""
def __init__(self, *arguments, **keywords):
declarations.free_function_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._use_overload_macro = False
self._declaration_code = []
self._adaptor = None
def _get_adaptor(self):
return self._adaptor
def _set_adaptor(self, adaptor):
self._adaptor = adaptor
adaptor = property( _get_adaptor, _set_adaptor
, doc="string, if contains value `Py++` will generate code the following code: " \
+"def(<name>, <adaptor>(<function reference>), <other args> ) " )
def add_declaration_code( self, code ):
"""adds the code to the declaration section"""
self.declaration_code.append( user_text.user_text_t( code ) )
@property
def declaration_code( self ):
"""
List of strings, that contains valid C++ code, that will be added to
the same file in which the registration code for the function will be
generated
"""
return self._declaration_code
def get_use_overload_macro(self):
return self._use_overload_macro
def set_use_overload_macro(self, use_macro):
self._use_overload_macro = use_macro
use_overload_macro = property( get_use_overload_macro, set_use_overload_macro
, doc="boolean, if True, will use BOOST_PYTHON_FUNCTION_OVERLOADS macro to expose declarations" \
+"Default value is False.")
class free_operator_t( declarations.free_operator_t, calldef_t ):
"""defines a set of properties, that will instruct `Py++` how to expose the free operator"""
def __init__(self, *arguments, **keywords):
declarations.free_operator_t.__init__( self, *arguments, **keywords )
calldef_t.__init__( self )
self._target_class = None
def _exportable_impl_derived( self ):
return operators_helper.exportable( self )
def get_target_class( self ):
if self._target_class is None:
self._target_class = operators_helper.target_class( self )
return self._target_class
def set_target_class( self, class_ ):
self._target_class = class_
_target_class_doc_ = "reference to class_t or class_declaration_t object." \
+ " There are use cases, where `Py++` doesn't guess right, in what scope" \
+ " free operator should be registered( exposed ). If this is your use case " \
+ " than setting the class will allow you to quickly fix the situation. "
target_class = property( get_target_class, set_target_class, doc=_target_class_doc_ )
| StarcoderdataPython |
3215223 | <reponame>borninfreedom/vismod<filename>vismod.py<gh_stars>1-10
import tkinter as tk
from tkinter import ttk
from view_model import ViewModel
from add_slidebar import AddSlideBar
from config import WINDOW_WIDTH, WINDOW_HEIGHT
WINDOW_SIZE = str(WINDOW_WIDTH) + 'x' + str(WINDOW_HEIGHT)
class Vismod(tk.Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
notebook = ttk.Notebook(parent)
add_slidebar_tab = AddSlideBar(notebook)
view_model_tab = ViewModel(notebook)
notebook.add(add_slidebar_tab, text='添加滑条')
notebook.add(view_model_tab, text='查看模型')
notebook.pack()
operation_msg = tk.Label(parent, text="按住Ctrl键,配合鼠标进行旋转平移", anchor='center').pack(fill='both', side=tk.BOTTOM)
if __name__ == "__main__":
root = tk.Tk()
root.title('vismod')
root.geometry(WINDOW_SIZE)
Vismod(root).pack(fill="both", expand=True)
root.mainloop()
| StarcoderdataPython |
44247 | """Wikidump reader and processor module.
"""
import os
with open(os.path.join(
os.path.dirname(__file__),
'scripts',
'DUMP_VERSION')) as f:
DUMP_VERSION = f.readline().strip()
with open(os.path.join(
os.path.dirname(__file__),
'scripts',
'TORRENT_HASH')) as f:
HASH = f.readline().strip()
BZ_FILE = 'enwiki-%s-pages-articles-multistream.xml.bz2' % DUMP_VERSION
BZ_PATH = os.path.join('data', BZ_FILE)
DEFAULT_NAMESPACE = 'http://www.mediawiki.org/xml/export-0.10/'
# Known namespaces used by Database Exporter
NSMAP = {
None: DEFAULT_NAMESPACE,
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
__all__ = ['DUMP_VERSION', 'HASH', 'BZ_FILE',
'BZ_PATH', 'DEFAULT_NAMESPACE', 'NSMAP']
| StarcoderdataPython |
3381120 | #
# Class for particle-size distributions, one distribution at every
# x location of the electrode, with uniform concentration in each
# particle
#
import pybamm
from .base_distribution import BaseSizeDistribution
class UniformProfile(BaseSizeDistribution):
"""
Class for molar conservation in particle-size distributions, one
distribution at every x location of the electrode,
with a uniform concentration within each particle (in r). Concentration varies
with R (particle size), and x (electrode coordinate).
Parameters
----------
param : parameter class
The parameters to use for this submodel
domain : str
The domain of the model either 'Negative' or 'Positive'
**Extends:** :class:`pybamm.particle.size_distribution.BaseSizeDistribution`
"""
def __init__(self, param, domain):
super().__init__(param, domain)
pybamm.citations.register("Kirk2021")
def get_fundamental_variables(self):
# The concentration is uniform throughout each particle, so we
# can just use the surface value.
if self.domain == "Negative":
# distribution variables
c_s_surf_distribution = pybamm.Variable(
"Negative particle surface concentration distribution",
domain="negative particle size",
auxiliary_domains={
"secondary": "negative electrode",
"tertiary": "current collector",
},
bounds=(0, 1),
)
R = pybamm.standard_spatial_vars.R_n
elif self.domain == "Positive":
# distribution variables
c_s_surf_distribution = pybamm.Variable(
"Positive particle surface concentration distribution",
domain="positive particle size",
auxiliary_domains={
"secondary": "positive electrode",
"tertiary": "current collector",
},
bounds=(0, 1),
)
R = pybamm.standard_spatial_vars.R_p
variables = self._get_distribution_variables(R)
# Standard concentration distribution variables (size-dependent)
variables.update(
self._get_standard_concentration_distribution_variables(
c_s_surf_distribution
)
)
# Flux variables (size-dependent)
variables.update(
self._get_standard_flux_distribution_variables(pybamm.Scalar(0))
)
# Standard size-averaged variables. Average concentrations using
# the volume-weighted distribution since they are volume-based
# quantities. Necessary for output variables "Total lithium in
# negative electrode [mol]", etc, to be calculated correctly
f_v_dist = variables[
self.domain + " volume-weighted particle-size distribution"
]
c_s_surf = pybamm.Integral(f_v_dist * c_s_surf_distribution, R)
c_s = pybamm.PrimaryBroadcast(c_s_surf, [self.domain.lower() + " particle"])
c_s_xav = pybamm.x_average(c_s)
variables.update(self._get_standard_concentration_variables(c_s, c_s_xav))
# Size-averaged flux variables
N_s = pybamm.FullBroadcastToEdges(
0,
[self.domain.lower() + " particle"],
auxiliary_domains={
"secondary": self.domain.lower() + " electrode",
"tertiary": "current collector",
},
)
N_s_xav = pybamm.FullBroadcastToEdges(
0, self.domain.lower() + " particle", "current collector"
)
variables.update(self._get_standard_flux_variables(N_s, N_s_xav))
return variables
def get_coupled_variables(self, variables):
variables.update(self._get_total_concentration_variables(variables))
return variables
def set_rhs(self, variables):
c_s_surf_distribution = variables[
self.domain + " particle surface concentration distribution"
]
j_distribution = variables[
self.domain + " electrode interfacial current density distribution"
]
R = variables[self.domain + " particle sizes"]
if self.domain == "Negative":
self.rhs = {
c_s_surf_distribution: -3
* j_distribution
/ self.param.a_R_n
/ self.param.gamma_n
/ R
}
elif self.domain == "Positive":
self.rhs = {
c_s_surf_distribution: -3
* j_distribution
/ self.param.a_R_p
/ self.param.gamma_p
/ R
}
def set_initial_conditions(self, variables):
c_s_surf_distribution = variables[
self.domain + " particle surface concentration distribution"
]
if self.domain == "Negative":
c_init = pybamm.PrimaryBroadcast(
pybamm.r_average(self.param.c_n_init), "negative particle size"
)
elif self.domain == "Positive":
c_init = pybamm.PrimaryBroadcast(
pybamm.r_average(self.param.c_p_init), "positive particle size"
)
self.initial_conditions = {c_s_surf_distribution: c_init}
| StarcoderdataPython |
3363139 | class butter_filters:
def __init__(self, data=[0.0], cutoff=2.0, fs=30.0, order=2, type="enumerate(('low', 'high'))"):
from scipy.signal import butter,filtfilt
nyq = 0.5 * fs # Nyquist Frequency
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype=type, analog=False)
self.y = filtfilt(b, a, data)
def out_filtered(self: 'list_float'):
return self.y | StarcoderdataPython |
159184 | <gh_stars>1-10
'''
Created by <NAME>
Feb 1, 2018
'''
from sys import stdout
import re, json, requests
from bs4 import BeautifulSoup
from pyvirtualdisplay import Display
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver import Chrome, ChromeOptions
from selenium import webdriver
def stat(judge,probs):
print '{: <4}: {}'.format(judge,probs)
return probs
def codechef(handle):
usr='http://www.codechef.com/users/' + handle
request=requests.get(usr)
content=request.content
soup=BeautifulSoup(content,'html.parser')
element=soup.find('section',{'class':'rating-data-section problems-solved'})
prb = int(re.findall(r'\d+',element.findAll('h5')[0].text)[0])
return stat('CC',prb)
def codeforces(handle): # using codeforces api
# http://codeforces.com/api/help/methods#user.info
usr = 'http://codeforces.com/api/user.status?handle='
usr += handle
usr += '&from=1&count=100000'
stdout.write('Wait 30s!')
stdout.flush()
stdout.write('\r')
request=requests.get(usr)
content=request.content
data = json.loads(content)
probs = set()
for a in data['result']:
if a['verdict']=='OK':
probs.add(a['problem']['name'])
return stat('CF',len(probs))
def spoj(handle):
usr='http://www.spoj.com/users/' + handle
request=requests.get(usr)
content=request.content
soup=BeautifulSoup(content,'html.parser')
element=soup.find('dl',{'class':'dl-horizontal profile-info-data profile-info-data-stats'})
prb = int(element.findAll('dd')[0].text)
return stat('SP',prb)
def uri(jid):
usr='https://www.urionlinejudge.com.br/judge/en/profile/' + jid
request=requests.get(usr)
content=request.content
soup=BeautifulSoup(content,'html.parser')
element=soup.find('span',string='Solved:')
prb = int(re.findall(r'\d+',element.parent.text)[0])
return stat('URI',prb)
def csacademy(handle,browser):
usr='https://csacademy.com/user/' + handle
browser.get(usr)
wait = WebDriverWait(browser,8)
by = By.CSS_SELECTOR
sel = 'span[style="font-size: 1.3em; margin-bottom: 10px;"]'
tup = (by,sel)
wait.until(EC.visibility_of_element_located(tup))
element = browser.find_element_by_css_selector(sel)
prb = int(re.findall(r'\d+',element.text)[0])
return stat('CSA',prb)
def timus(jid):
usr='http://acm.timus.ru/author.aspx?id=' + jid
request=requests.get(usr)
content=request.content
soup=BeautifulSoup(content,'html.parser')
element=soup.findAll('td',{'class':'author_stats_value'})[1]
prb = int(re.findall(r'\d+',element.text)[0])
return stat('TI',prb)
def poj(handle):
usr='http://poj.org/userstatus?user_id=' + handle
request=requests.get(usr)
content=request.content
soup=BeautifulSoup(content,'html.parser')
element=soup.find('a',href='status?result=0&user_id=' + handle)
prb=int(element.string)
return stat('PKU',prb)
def uhunt(judge,jid):
if judge=='UVa':
usr='https://uhunt.onlinejudge.org/api/subs-user-last/'
elif judge=='LA':
usr='https://icpcarchive.ecs.baylor.edu/uhunt/api/subs-user-last/'
usr+=jid
usr+='/100000'
request=requests.get(usr)
content=request.content
data = json.loads(content)
s = set()
for a in data['subs']:
if a[-1]!=-1: s.add(a[1])
return stat(judge,len(s))
def hackerrank(handle,browser):
usr='https://www.hackerrank.com/' + handle
browser.get(usr)
wait = WebDriverWait(browser,8)
by = By.CSS_SELECTOR
sel = 'a[data-analytics="ProfileChallengesLoadMore"]'
tup = (by,sel)
while True:
try:
wait.until(EC.visibility_of_element_located(tup))
browser.find_element_by_css_selector(sel).click()
except Exception as e:
break
sel = 'a[data-analytics="ProfileChallengesLink"]'
prb = len(browser.find_elements_by_css_selector(sel))
return stat('HR',prb)
def hackerearth(handle,browser):
usr='https://www.hackerearth.com/' + 'submissions/' + handle
browser.get(usr)
wait = WebDriverWait(browser,8)
by = By.CSS_SELECTOR
page = 1
probs = set()
browser.find_element_by_xpath("//select[@name='result']/option[text()='Accepted']").click()
while True:
# prevent class="loader-overlay"
wait.until(EC.visibility_of_element_located((by,'div[class=""]')))
stdout.write('Page %d'%(page))
stdout.flush()
stdout.write('\r')
page += 1
content = browser.page_source
soup=BeautifulSoup(content,'html.parser')
element=soup.findAll('i',{'class':'fa fa-check-circle fa-green result-icon tool-tip'})
for x in element:
y=x.parent.find_previous_sibling()
probs.add(y['title'])
sel = 'i[class="fa fa-angle-right dark"]'
el = browser.find_element_by_css_selector(sel)
pel = el.find_element_by_xpath('..')
if pel.get_attribute('class')=='disabled-arrow arrow':
break
browser.find_element_by_css_selector(sel).click()
return stat('HE',len(probs))
def main():
print 'Total Problems Solved Statistics'
print 'Press Enter to Skip'
data = {}
handle = raw_input('CodeChef Handle: ')
if handle != '':
data['CodeChef'] = codechef(handle)
handle = raw_input('SPOJ Handle: ')
if handle != '':
data['SPOJ'] = spoj(handle)
handle = raw_input('Codeforces Handle: ')
if handle != '':
data['Codeforces'] = codeforces(handle)
handle = raw_input('URI ID: ')
if handle != '':
data['URI'] = uri(handle)
handle = raw_input('UVa ID: ')
if handle != '':
data['UVa'] = uhunt('UVa',handle)
handle = raw_input('LiveArchive ID: ')
if handle != '':
data['LiveArchive'] = uhunt('LA',handle)
handle = raw_input('Timus ID: ')
if handle != '':
data['Timus'] = timus(handle)
handle = raw_input('POJ ID: ')
if handle != '':
data['POJ'] = poj(handle)
display = Display(visible=0,size=(800,600))
display.start()
browser = webdriver.Chrome('/usr/local/bin/chromedriver')
handle = raw_input('CSAcademy Handle: ')
if handle != '':
data['CSAcademy'] = csacademy(handle,browser)
handle = raw_input('HackerRank Handle: ')
if handle != '':
data['HackerRank'] = hackerrank(handle,browser)
handle = raw_input('HackerEarth Handle: ')
if handle != '':
data['HackerEarth'] = hackerearth(handle,browser)
browser.quit()
display.stop()
with open("data.json", "w") as outfile:
json.dump(data, outfile, indent=4)
probs = sum(data.itervalues())
print 'TOT : {}'.format(probs)
main()
| StarcoderdataPython |
3395608 | """Implements get process fot master playlist."""
from logging import getLogger
from typing import Mapping, Union
import m3u8 # type: ignore
from radikoplaylist.authorization import Authorization
from radikoplaylist.master_playlist import MasterPlaylist
from radikoplaylist.master_playlist_request import MasterPlaylistRequest
from radikoplaylist.requester import Requester
__all__ = ["MasterPlaylistClient"]
class MasterPlaylistClient:
"""Implements get process fot master playlist."""
@classmethod
def get(
cls, master_playlist_request: MasterPlaylistRequest, *, area_id: str = Authorization.ARIA_ID_DEFAULT
) -> MasterPlaylist:
headers = Authorization(area_id=area_id).auth()
url_master_playlist = cls._get_url(master_playlist_request, headers)
return MasterPlaylist(url_master_playlist, headers)
@classmethod
def _get_url(cls, master_playlist_request: MasterPlaylistRequest, headers: Mapping[str, Union[str, bytes]]) -> str:
"""Gets URL of master playlist."""
logger = getLogger(__name__)
response = Requester.get(master_playlist_request.build_url(headers), headers)
master_playlist_url = m3u8.loads(response.content.decode("utf-8")).playlists[0].uri
logger.debug("master_playlist_url: %s", master_playlist_url)
return master_playlist_url
| StarcoderdataPython |
11318 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_versionedobjects import fixture as object_fixture
from mogan.notifications import base as notification_base
from mogan.notifications.objects import base as notification
from mogan.objects import base
from mogan.objects import fields
from mogan.objects import server as server_obj
from mogan.tests import base as test_base
from mogan.tests.unit.db import utils as db_utils
class TestNotificationBase(test_base.TestCase):
@base.MoganObjectRegistry.register_if(False)
class TestObject(base.MoganObject):
VERSION = '1.0'
fields = {
'field_1': fields.StringField(),
'field_2': fields.IntegerField(),
'not_important_field': fields.IntegerField(),
}
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
def populate_schema(self, source_field):
super(TestNotificationBase.TestNotificationPayload,
self).populate_schema(source_field=source_field)
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayloadEmptySchema(
notification.NotificationPayloadBase):
VERSION = '1.0'
fields = {
'extra_field': fields.StringField(), # filled by ctor
}
@notification.notification_sample('test-update-1.json')
@notification.notification_sample('test-update-2.json')
@base.MoganObjectRegistry.register_if(False)
class TestNotification(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayload')
}
@base.MoganObjectRegistry.register_if(False)
class TestNotificationEmptySchema(notification.NotificationBase):
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('TestNotificationPayloadEmptySchema')
}
expected_payload = {
'mogan_object.name': 'TestNotificationPayload',
'mogan_object.data': {
'extra_field': 'test string',
'field_1': 'test1',
'field_2': 42},
'mogan_object.version': '1.0',
'mogan_object.namespace': 'mogan'}
def setUp(self):
super(TestNotificationBase, self).setUp()
self.my_obj = self.TestObject(field_1='test1',
field_2=42,
not_important_field=13)
self.payload = self.TestNotificationPayload(
extra_field='test string')
self.payload.populate_schema(source_field=self.my_obj)
self.notification = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE,
phase=fields.NotificationPhase.START),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
def _verify_notification(self, mock_notifier, mock_context,
expected_event_type,
expected_payload):
mock_notifier.prepare.assert_called_once_with(
publisher_id='mogan-fake:fake-host')
mock_notify = mock_notifier.prepare.return_value.info
self.assertTrue(mock_notify.called)
self.assertEqual(mock_notify.call_args[0][0], mock_context)
self.assertEqual(mock_notify.call_args[1]['event_type'],
expected_event_type)
actual_payload = mock_notify.call_args[1]['payload']
self.assertJsonEqual(expected_payload, actual_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_notification(self, mock_notifier):
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
self.notification.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update.start',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_with_host_and_binary_as_publisher(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_emit_event_type_without_phase(self, mock_notifier):
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=self.payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload=self.expected_payload)
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_not_possible_to_emit_if_not_populated(self, mock_notifier):
non_populated_payload = self.TestNotificationPayload(
extra_field='test string')
noti = self.TestNotification(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
self.assertRaises(AssertionError, noti.emit, mock_context)
mock_notifier.assert_not_called()
@mock.patch('mogan.common.rpc.NOTIFIER')
def test_empty_schema(self, mock_notifier):
non_populated_payload = self.TestNotificationPayloadEmptySchema(
extra_field='test string')
noti = self.TestNotificationEmptySchema(
event_type=notification.EventType(
object='test_object',
action=fields.NotificationAction.UPDATE),
publisher=notification.NotificationPublisher(
host='fake-host', binary='mogan-fake'),
priority=fields.NotificationPriority.INFO,
payload=non_populated_payload)
mock_context = mock.Mock()
mock_context.to_dict.return_value = {}
noti.emit(mock_context)
self._verify_notification(
mock_notifier,
mock_context,
expected_event_type='test_object.update',
expected_payload={
'mogan_object.name': 'TestNotificationPayloadEmptySchema',
'mogan_object.data': {'extra_field': u'test string'},
'mogan_object.version': '1.0',
'mogan_object.namespace': 'mogan'})
def test_sample_decorator(self):
self.assertEqual(2, len(self.TestNotification.samples))
self.assertIn('test-update-1.json', self.TestNotification.samples)
self.assertIn('test-update-2.json', self.TestNotification.samples)
notification_object_data = {
'ServerPayload': '1.0-30fefa8478f1b9b35c66868377fb6dfd',
'ServerAddressesPayload': '1.0-69caf4c36f36756bb1f6970d093ee1f6',
'ServerActionPayload': '1.0-8dc4429afa34d86ab92c9387e3ccd0c3',
'ServerActionNotification': '1.0-20087e599436bd9db62ae1fb5e2dfef2',
'ExceptionPayload': '1.0-7c31986d8d78bed910c324965c431e18',
'EventType': '1.0-589894aac7c98fb640eca394f67ad621',
'NotificationPublisher': '1.0-4b0b0d662b21eeed0b23617f3f11794b'
}
class TestNotificationObjectVersions(test_base.TestCase):
def setUp(self):
super(test_base.TestCase, self).setUp()
base.MoganObjectRegistry.register_notification_objects()
def test_versions(self):
noti_class = base.MoganObjectRegistry.notification_classes
classes = {cls.__name__: [cls] for cls in noti_class}
checker = object_fixture.ObjectVersionChecker(obj_classes=classes)
# Compute the difference between actual fingerprints and
# expect fingerprints. expect = actual = {} if there is no change.
expect, actual = checker.test_hashes(notification_object_data)
self.assertEqual(expect, actual,
"Some objects fields or remotable methods have been "
"modified. Please make sure the version of those "
"objects have been bumped and then update "
"expected_object_fingerprints with the new hashes. ")
def test_notification_payload_version_depends_on_the_schema(self):
@base.MoganObjectRegistry.register_if(False)
class TestNotificationPayload(notification.NotificationPayloadBase):
VERSION = '1.0'
SCHEMA = {
'field_1': ('source_field', 'field_1'),
'field_2': ('source_field', 'field_2'),
}
fields = {
'extra_field': fields.StringField(), # filled by ctor
'field_1': fields.StringField(), # filled by the schema
'field_2': fields.IntegerField(), # filled by the schema
}
checker = object_fixture.ObjectVersionChecker(
{'TestNotificationPayload': (TestNotificationPayload,)})
old_hash = checker.get_hashes(extra_data_func=get_extra_data)
TestNotificationPayload.SCHEMA['field_3'] = ('source_field',
'field_3')
new_hash = checker.get_hashes(extra_data_func=get_extra_data)
self.assertNotEqual(old_hash, new_hash)
def get_extra_data(obj_class):
extra_data = tuple()
# Get the SCHEMA items to add to the fingerprint
# if we are looking at a notification
if issubclass(obj_class, notification.NotificationPayloadBase):
schema_data = collections.OrderedDict(
sorted(obj_class.SCHEMA.items()))
extra_data += (schema_data,)
return extra_data
class TestServerActionNotification(test_base.TestCase):
@mock.patch('mogan.notifications.objects.server.'
'ServerActionNotification._emit')
def test_send_version_server_action(self, mock_emit):
# Make sure that the notification payload chooses the values in
# server.flavor.$value instead of server.$value
fake_server_values = db_utils.get_test_server()
server = server_obj.Server(**fake_server_values)
notification_base.notify_about_server_action(
mock.MagicMock(),
server,
'test-host',
fields.NotificationAction.CREATE,
fields.NotificationPhase.START,
'mogan-compute')
self.assertEqual('server.create.start',
mock_emit.call_args_list[0][1]['event_type'])
self.assertEqual('mogan-compute:test-host',
mock_emit.call_args_list[0][1]['publisher_id'])
payload = mock_emit.call_args_list[0][1]['payload'][
'mogan_object.data']
self.assertEqual(fake_server_values['uuid'], payload['uuid'])
self.assertEqual(fake_server_values['flavor_uuid'],
payload['flavor_uuid'])
self.assertEqual(fake_server_values['status'], payload['status'])
self.assertEqual(fake_server_values['user_id'], payload['user_id'])
self.assertEqual(fake_server_values['availability_zone'],
payload['availability_zone'])
self.assertEqual(fake_server_values['name'], payload['name'])
self.assertEqual(fake_server_values['image_uuid'],
payload['image_uuid'])
self.assertEqual(fake_server_values['project_id'],
payload['project_id'])
self.assertEqual(fake_server_values['description'],
payload['description'])
self.assertEqual(fake_server_values['power_state'],
payload['power_state'])
| StarcoderdataPython |
1638728 | import maya.cmds as mc
import data
class SetData( data.Data ):
'''
SetData class object.
Contains functions to save, load and rebuild maya sets.
'''
def __init__(self,setNode=None):
'''
SetData class initializer.
'''
# Execute Super Class Initilizer
super(SetData, self).__init__()
# Initialize Default Class Data Members
self._data['name'] = ''
self._data['membership'] = []
self.mode = ['add','replace']
# Build Data
if setNode: self.buildData(setNode)
def verifySet(self,setNode):
'''
Run standard checks on the specified set
@param setNode: Set to verify
@type setNode: str
'''
# Check Set Exists
if not mc.objExists(setNode):
raise Exception('Set "'+setNode+'" does not exists!')
# Check Set Node Type
if mc.objectType(setNode) != 'objectSet':
raise Exception('Object "'+setNode+'" is not a vaild "set" node!')
def buildData(self,setNode):
'''
Build setData class.
@param setNode: Set to initialize data for
@type setNode: str
'''
# ==========
# - Checks -
# ==========
if not setNode:
raise Exception('Invalid set node! Unable to build setData...')
return
self.verifySet(setNode)
# ==============
# - Build Data -
# ==============
# Start timer
timer = mc.timerX()
# Reset Data
self.reset()
# Get basic set info
self._data['name'] = setNode
self._data['membership'] = mc.sets(setNode,q=True)
# Print timer result
buildTime = mc.timerX(st=timer)
print('SetData: Data build time for set "'+setNode+'": '+str(buildTime))
# =================
# - Return Result -
# =================
return self._data['name']
def rebuild(self,mode='add',forceMembership=True):
'''
Rebuild the set from the stored setData.
@param mode: Membership mode if the specified set already exists. Accepted values are "add" and "replace".
@type mode: str
@param forceMembership: Forces addition of items to the set. If items are in another set which is in the same partition as the given set, the items will be removed from the other set in order to keep the sets in the partition mutually exclusive with respect to membership.
@type forceMembership: bool
'''
# ==========
# - Checks -
# ==========
# Set Name
if not self._data['name']:
raise Exception('SetData has not been initialized!')
# Member Items
memberList = self._data['membership'] or []
for obj in memberList:
if not mc.objExists(obj):
print('Set member item "'+obj+'" does not exist! Unable to add to set...')
memberList.remove(obj)
# Flatten Membership List
memberList = mc.ls(memberList,fl=True) or []
# Mode
if not mode in self.mode:
raise Exception('Invalid set membership mode "'+mode+'"! Use "add" or "replace"!')
# ===============
# - Rebuild Set -
# ===============
# Start timer
timer = mc.timerX()
# Create Set
setName = self._data['name']
# Delete Set (REPLACE only)
if mc.objExists(setName) and mode == 'replace': mc.delete(setName)
# Create Set
if not mc.objExists(setName): setName = mc.sets(n=setName)
# Add Members
if memberList:
if forceMembership:
for obj in memberList:
try: mc.sets(obj,e=True,fe=setName)
except Exception, e:
print('Error adding item "'+obj+'" to set "'+setName+'"! Skipping')
print(str(e))
else:
for obj in memberList:
try: mc.sets(obj,e=True,add=setName)
except Exception, e:
print('Error adding item "'+obj+'" to set "'+setName+'"! Skipping')
print(str(e))
# Print Timer Result
buildTime = mc.timerX(st=timer)
print('SetData: Rebuild time for set "'+setName+'": '+str(buildTime))
# =================
# - Return Result -
# =================
self.setName = setName
result = {}
result['set'] = setName
result['membership'] = memberList
return result
| StarcoderdataPython |
3391905 | from autovirt.structs import Message
def build_innovations_renewal_list(messages: list[Message]) -> list:
renewal = []
for message in messages:
for attach in message.attaches:
renewal.append(attach)
return renewal
| StarcoderdataPython |
27320 | from nndct_shared.utils import registry
| StarcoderdataPython |
3309382 | <reponame>imabackstabber/segment-with-nn<gh_stars>1-10
# coding=utf-8
import unittest
import logging
import tensorflow as tf
from segelectri.data_loader.utils.parse_img_op import parse_img_and_mask
class TestParseImgOp(unittest.TestCase):
def test_true(self):
self.assertTrue(True)
def parse_sample_file(self):
with tf.device('cpu:0'):
origin_img_path = tf.constant('/opt/dataset/ccf-train/2.png')
mask_img_path = tf.constant('/opt/dataset/ccf-train/2_class.png')
images = parse_img_and_mask(origin_img_path=origin_img_path,
mask_img_path=mask_img_path)
logging.info('shape of img is {}'.format(images[0].shape))
return images
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
| StarcoderdataPython |
1648589 | <filename>preprocessing.py
"""
Modulo
Neural Modular Networks in PyTorch
preprocessing.py
"""
import argparse
import os
import pickle
import numpy as np
from itertools import chain
from torch.autograd import Variable
import torch
from torch import LongTensor, FloatTensor
from torch.cuda import LongTensor as CudaLongTensor, \
FloatTensor as CudaFloatTensor
parser = argparse.ArgumentParser()
parser.add_argument('--outpath', help='Preprocessed pickle path',
type=str, default='modulo.pkl')
parser.add_argument('-task_type', required=True, help='Type of training task',
type=str, default='vqa', choices={'vqa'})
parser.add_argument('--gpu_support', help='Enable GPU support', type=bool,
default=False)
parser.add_argument('-query_train', required=True,
help='Source file for training queries.Query words are to '
'be separated by spaces with one query per line',
type=str)
parser.add_argument('-query_valid', required=True,
help='Source file for validation queries', type=str)
parser.add_argument('-query_test', required=True,
help='Source file for testing queries', type=str)
parser.add_argument('-layout_train', required=True,
help='Source file for training layouts in Reverse-Polish '
'Notation. Layout tokens are to be separated by '
'spaces with one layout per line', type=str)
parser.add_argument('-layout_valid', required=True,
help='Source file for validation queries.', type=str)
parser.add_argument('-layout_test', required=True,
help='Source file for testing queries.', type=str)
parser.add_argument('-answer_train', required=True,
help='Source file for training answers, one per line',
type=str)
parser.add_argument('-answer_valid', required=True,
help='Source file for validation answers', type=str)
parser.add_argument('-answer_test', required=True,
help='Source file for testing answers', type=str)
parser.add_argument('--img_train',
help='Source npy file for training images. '
'Expects [N, C, H, W]', type=str)
parser.add_argument('--img_valid', help='Source npy file for validation images',
type=str)
parser.add_argument('--img_test', help='Source npy file for testing images',
type=str)
parser.add_argument('--batch_size',
help='To enable batching, the same network must be '
'constructed each time, so the layout tokens must be '
'the same for every batch_size chunk of each of the '
'training, validation and test sets. Otherwise, use '
'the default batch size of 1 if the data are shuffled '
'or inconsistent', type=int, default=1)
def unique_words(lines):
return set(chain(*(line.split() for line in lines)))
def read_datasets(query_path, layout_path, answer_path, return_unique=True):
query_list, layout_list, answer_list = [], [], []
with open(query_path) as f:
for line in f.readlines():
query_list.append(line.strip())
with open(layout_path) as f:
for line in f.readlines():
layout_list.append(line.strip())
with open(answer_path) as f:
for line in f.readlines():
assert(line.strip() in ['0', '1']),\
'Only binary classification is supported at this time'
answer_list.append(int(line.strip()))
if return_unique:
word_list = unique_words(query_list)
token_list = unique_words(layout_list)
return query_list, layout_list, answer_list, word_list, token_list
else:
return query_list, layout_list, answer_list
def make_emb_idxs(query_list, layout_list, answer_list, batch_size,
stoi_words, stoi_tokens, use_cuda):
count = len(answer_list)
q_batches, l_batches, o_batches = [], [], []
for i in range(count // batch_size):
s, e = i * batch_size, (i + 1) * batch_size
qbatch, lbatch, onehot = [], [], []
for query in query_list[s:e]:
qs = query.split()
q_ = [stoi_words[w] for w in qs]
qbatch.append(q_)
for layout in layout_list[s:e]:
ls = layout.split()
l_ = [stoi_tokens[w] for w in ls]
ans_batch = np.array(l_)
zmat = np.zeros((ans_batch.size, len(stoi_tokens)))
zmat[np.arange(ans_batch.size), ans_batch] = 1
if use_cuda:
onehot.append(Variable(CudaFloatTensor(zmat)))
else:
onehot.append(Variable(FloatTensor(zmat)))
lbatch.append(l_)
if use_cuda:
q_batches.append(Variable(CudaLongTensor(qbatch)))
l_batches.append(Variable(CudaLongTensor(lbatch)))
else:
q_batches.append(Variable(LongTensor(qbatch)))
l_batches.append(Variable(LongTensor(lbatch)))
o_batches.append(torch.stack(onehot, dim=2).permute(2, 0, 1))
return q_batches, l_batches, o_batches
def main():
args = parser.parse_args()
state_dict = dict()
state_dict['GPU_SUPPORT'] = args.gpu_support
if args.gpu_support:
print('Enabling GPU Support')
for file_type in ['query', 'layout']:
for dataset_type in ['train', 'valid', 'test']:
arg = '{}_{}'.format(file_type, dataset_type)
assert(os.path.exists(getattr(args, arg))),\
'{} path is invalid'.format(arg)
state_dict['QUERY_TRAIN'] = args.query_train
state_dict['QUERY_VALID'] = args.query_valid
state_dict['QUERY_TEST'] = args.query_test
state_dict['LAYOUT_TRAIN'] = args.layout_train
state_dict['LAYOUT_VALID'] = args.layout_valid
state_dict['LAYOUT_TEST'] = args.layout_test
state_dict['ANSWER_TRAIN'] = args.answer_train
state_dict['ANSWER_VALID'] = args.answer_valid
state_dict['ANSWER_TEST'] = args.answer_test
state_dict['BATCH_SIZE'] = args.batch_size
if args.task_type == 'vqa':
assert args.img_train, 'Must provide training images for VQA'
assert(os.path.exists(args.img_train)), 'img_train path is invalid'
assert args.img_valid, 'Must provide validation image for VQA'
assert os.path.exists(args.img_valid), 'img_valid path is invalid'
assert args.img_test, 'Must provide test images for VQA'
assert(os.path.exists(args.img_test)), 'img_test path is invalid'
state_dict['IMG_TRAIN'] = args.img_train
state_dict['IMG_VALID'] = args.img_valid
state_dict['IMG_TEST'] = args.img_test
train_queries, train_layouts, train_answers, train_words, train_tokens = \
read_datasets(args.query_train, args.layout_train, args.answer_train)
print('Detected {} vocabulary words'.format(len(train_words)))
print('Detected {} layout tokens'.format(len(train_tokens)))
valid_queries, valid_layouts, valid_answers, valid_words, valid_tokens = \
read_datasets(args.query_valid, args.layout_valid, args.answer_valid)
test_queries, test_layouts, test_answers, test_words, test_tokens = \
read_datasets(args.query_test, args.layout_test, args.answer_test)
extra_valid_words = valid_words - train_words
assert(len(extra_valid_words) == 0), \
'Validation set has the following words not seen ' \
'in the training set: {}'.format(extra_valid_words)
extra_valid_tokens = valid_tokens - train_tokens
assert(len(extra_valid_tokens) == 0), \
'Validation set has the following tokens not seen ' \
'in the training set: {}'.format(extra_valid_tokens)
extra_test_words = test_words - train_words
assert(len(extra_test_words) == 0), \
'Test set has the following words not seen ' \
'in the training set: {}'.format(extra_test_words)
extra_test_tokens = test_tokens - train_tokens
assert(len(extra_test_tokens) == 0), \
'Test set has the following tokens not seen ' \
'in the training set: {}'.format(extra_test_tokens)
stoi_words = dict(zip(train_words, range(len(train_words))))
stoi_tokens = dict(zip(train_tokens, range(len(train_tokens))))
print('Creating query and layout batches...')
train_qbatches, train_lbatches, train_obatches = \
make_emb_idxs(train_queries, train_layouts, train_answers,
args.batch_size, stoi_words, stoi_tokens,
args.gpu_support)
valid_qbatches, valid_lbatches, valid_obatches = \
make_emb_idxs(valid_queries, valid_layouts, valid_answers,
args.batch_size, stoi_words, stoi_tokens,
args.gpu_support)
test_qbatches, test_lbatches, test_obatches = \
make_emb_idxs(test_queries, test_layouts, test_answers,
args.batch_size, stoi_words, stoi_tokens,
args.gpu_support)
print('...done')
state_dict['VOCAB'] = list(train_words)
state_dict['TOKENS'] = list(train_tokens)
state_dict['STOI_WORDS'] = stoi_words
state_dict['STOI_TOKENS'] = stoi_tokens
state_dict['TRAIN_QBATCHES'] = train_qbatches
state_dict['VALID_QBATCHES'] = valid_qbatches
state_dict['TEST_QBATCHES'] = test_qbatches
state_dict['TRAIN_LBATCHES'] = train_lbatches
state_dict['VALID_LBATCHES'] = valid_lbatches
state_dict['TEST_LBATCHES'] = test_lbatches
state_dict['TRAIN_OBATCHES'] = train_obatches
state_dict['VALID_OBATCHES'] = valid_obatches
state_dict['TEST_OBATCHES'] = test_obatches
print('Writing to pickle file...')
with open(args.outpath, 'wb') as outpath:
pickle.dump(state_dict, outpath, pickle.HIGHEST_PROTOCOL)
print('...done!')
if __name__ == '__main__':
main()
| StarcoderdataPython |
22940 | #%% [markdown]
#
# We will load EEG data from the lab and attemp to build a classifier that distinguishes between learners and non-learners
#%%
import mne
import numpy as np
import os.path
import glob
import re
import pandas as pd
# try to enable cuda support to speed up filtering, make sure the MNE_USE_CUDA environment variable is set to true
mne.cuda.init_cuda()
DATA_DIR = "../../EEGdata/Fish_5Block"
event_dict = {
"cat":{
"1": 20,
"2": 21
}
}
data_path = os.path.join(DATA_DIR, "Tail/Learner/126670_EXP_FISH.bdf")
test_data = mne.io.read_raw_edf(data_path, preload=True)
# find the related behavioral data
participant_number = re.search(r"^(\d+)_EXP_FISH", os.path.basename(data_path))[1]
behav_path = [filename for filename in glob.glob(os.path.join(DATA_DIR, "EXP_fish2_Tomy/Cat_data/*.csv")) if participant_number in filename][0]
behav_df = pd.read_csv(behav_path)
learning_curve = behav_df["Resultat"].rolling(20).mean() # our in house definition of current learning performance
learning_time = (learning_curve >= 0.8).idxmax() # using a 80% correct categorization threshold
#%% [markdown]
# We now need to find the event times and give the same code to all stimulus presentation events since we don't want to differentiate among category 1 or 2
#%%
events = mne.find_events(test_data)
events = np.array(events)
events[events[:,2]==event_dict["cat"]["2"],2] = 20
events = events.tolist()
#%% [markdown]
# visualize data
#%%
#test_data.plot()
#%%
test_data.set_eeg_reference("average", projection=False)
test_data.filter(0.1, 50.0, n_jobs="cuda")
stim_epochs = mne.Epochs(test_data, events=events, event_id={"stimulus presentation":20}, tmin=-0.2, tmax=0.8, reject={"eeg":200-6})
# do basic cleaning by bandpass filtering, we will need to load the data
stim_epochs.load_data()
stim_epochs.resample(256)
#%% building the pytorch model
pass
| StarcoderdataPython |
3312501 | import numpy as np
import torch
from utils import *
class MuRP(torch.nn.Module):
def __init__(self, d, dim):
super(MuRP, self).__init__()
self.Eh = torch.nn.Embedding(len(d.entities), dim, padding_idx=0)
self.Eh.weight.data = (1e-3 * torch.randn((len(d.entities), dim), dtype=torch.double, device="cuda"))
self.rvh = torch.nn.Embedding(len(d.relations), dim, padding_idx=0)
self.rvh.weight.data = (1e-3 * torch.randn((len(d.relations), dim), dtype=torch.double, device="cuda"))
self.Wu = torch.nn.Parameter(torch.tensor(np.random.uniform(-1, 1, (len(d.relations),
dim)), dtype=torch.double, requires_grad=True, device="cuda"))
self.bs = torch.nn.Parameter(torch.zeros(len(d.entities), dtype=torch.double, requires_grad=True, device="cuda"))
self.bo = torch.nn.Parameter(torch.zeros(len(d.entities), dtype=torch.double, requires_grad=True, device="cuda"))
self.loss = torch.nn.BCEWithLogitsLoss()
def forward(self, u_idx, r_idx, v_idx):
u = self.Eh.weight[u_idx]
v = self.Eh.weight[v_idx]
Ru = self.Wu[r_idx]
rvh = self.rvh.weight[r_idx]
u = torch.where(torch.norm(u, 2, dim=-1, keepdim=True) >= 1,
u/(torch.norm(u, 2, dim=-1, keepdim=True)-1e-5), u)
v = torch.where(torch.norm(v, 2, dim=-1, keepdim=True) >= 1,
v/(torch.norm(v, 2, dim=-1, keepdim=True)-1e-5), v)
rvh = torch.where(torch.norm(rvh, 2, dim=-1, keepdim=True) >= 1,
rvh/(torch.norm(rvh, 2, dim=-1, keepdim=True)-1e-5), rvh)
u_e = p_log_map(u)
u_W = u_e * Ru
u_m = p_exp_map(u_W)
v_m = p_sum(v, rvh)
u_m = torch.where(torch.norm(u_m, 2, dim=-1, keepdim=True) >= 1,
u_m/(torch.norm(u_m, 2, dim=-1, keepdim=True)-1e-5), u_m)
v_m = torch.where(torch.norm(v_m, 2, dim=-1, keepdim=True) >= 1,
v_m/(torch.norm(v_m, 2, dim=-1, keepdim=True)-1e-5), v_m)
sqdist = (2.*artanh(torch.clamp(torch.norm(p_sum(-u_m, v_m), 2, dim=-1), 1e-10, 1-1e-5)))**2
return -sqdist + self.bs[u_idx] + self.bo[v_idx]
class MuRE(torch.nn.Module):
def __init__(self, d, dim):
super(MuRE, self).__init__()
self.E = torch.nn.Embedding(len(d.entities), dim, padding_idx=0)
self.E.weight.data = self.E.weight.data.double()
self.E.weight.data = (1e-3 * torch.randn((len(d.entities), dim), dtype=torch.double, device="cuda"))
self.Wu = torch.nn.Parameter(torch.tensor(np.random.uniform(-1, 1, (len(d.relations),
dim)), dtype=torch.double, requires_grad=True, device="cuda"))
self.rv = torch.nn.Embedding(len(d.relations), dim, padding_idx=0)
self.rv.weight.data = self.rv.weight.data.double()
self.rv.weight.data = (1e-3 * torch.randn((len(d.relations), dim), dtype=torch.double, device="cuda"))
self.bs = torch.nn.Parameter(torch.zeros(len(d.entities), dtype=torch.double, requires_grad=True, device="cuda"))
self.bo = torch.nn.Parameter(torch.zeros(len(d.entities), dtype=torch.double, requires_grad=True, device="cuda"))
self.loss = torch.nn.BCEWithLogitsLoss()
def forward(self, u_idx, r_idx, v_idx):
u = self.E.weight[u_idx]
v = self.E.weight[v_idx]
Ru = self.Wu[r_idx]
rv = self.rv.weight[r_idx]
u_size = u.size()
u_W = u * Ru
sqdist = torch.sum(torch.pow(u_W - (v+rv), 2), dim=-1)
return -sqdist + self.bs[u_idx] + self.bo[v_idx]
| StarcoderdataPython |
3321325 | '''Given an array of strings strs, group the anagrams together. You can return the answer in any order.
An Anagram is a word or phrase formed by rearranging the letters of a different word or phrase, typically using all the original letters exactly once.
Example 1:
Input: strs = ["eat","tea","tan","ate","nat","bat"]
Output: [["bat"],["nat","tan"],["ate","eat","tea"]]
Example 2:
Input: strs = [""]
Output: [[""]]
'''
class Solution(object):
def groupAnagrams(self, strs):
List = list(set(map(lambda w: str(sorted(w)), strs))) # We make a second list containing the unique(using set()) letter arrangements, sorted alphabetically
anagrams = [] # We shall use this to store our grouped anagrams
i=0
for s in List:
anagrams.append([]) # For every unique set of letters we append an empty list to our answer
for w in strs:
if str(sorted(w))==s: # If a word in the list contains the exact set of letters
anagrams[i].append(w) # it is appended to that list which is in the same index position of 'anagrams'
i+=1
return anagrams
| StarcoderdataPython |
3249353 | # -*- coding: utf-8 -*-
"""
Codes for GPU Caculation.
Created on Thu Apr 27 19:47:13 2017
@author: cheny
"""
import numpy as np
import math
def calc_density_gpu(xs,ys,weights,kernel_type,cutoffd=0,sigma=0):
from numba import cuda, float64,float32
@cuda.jit
def calc_density_gauss_cuda(xs,ys,weights,densitys,sigma,n):
'''
Numba Cuda code for calculate density.
xs: lons' array
ys: lats' array
weights: weights' array
densitys: results' array
sigma: param in gauss kernel
n: array size
'''
i=cuda.grid(1)
if i<n:
xi=xs[i]
yi=ys[i]
density=float64(0)
threads_hold=float32((3*sigma)**2)
for j in range(n):
xd=xs[j]-xi
yd=ys[j]-yi
weightj=weights[j]
distpow2=xd**2+yd**2
if distpow2<threads_hold:
density+=math.exp(-float64(distpow2)/(sigma**2))*weightj
densitys[i]=density
@cuda.jit
def calc_density_cutoff_cuda(xs,ys,weights,densitys,cutoffd,n):
'''
Numba Cuda code for calculate density.
xs: lons' array
ys: lats' array
weights: weights' array
densitys: results' array
cutoffd: param in cut-off kernel
n: array size
'''
i=cuda.grid(1)
if i<n:
xi=xs[i]
yi=ys[i]
density=float64(0)
threads_hold=cutoffd**2
for j in range(n):
xd=xs[j]-xi
yd=ys[j]-yi
weightj=weights[j]
distpow2=xd**2+yd**2
if distpow2<threads_hold:
density+=weightj
densitys[i]=density
xs=np.ascontiguousarray((xs-xs.min()).astype(np.float32))
ys=np.ascontiguousarray((ys-ys.min()).astype(np.float32))
n=xs.shape[0]
threadsperblock = 1024
blockspergrid = np.int((n + (threadsperblock - 1)) / threadsperblock)
dev_denss=cuda.device_array(n)
if kernel_type=='GAUSS':
calc_density_gauss_cuda[blockspergrid,threadsperblock](cuda.to_device(xs),cuda.to_device(ys),cuda.to_device(np.ascontiguousarray(weights)),dev_denss,sigma,n)
else:
calc_density_cutoff_cuda[blockspergrid,threadsperblock](cuda.to_device(xs),cuda.to_device(ys),cuda.to_device(np.ascontiguousarray(weights)),dev_denss,cutoffd,n)
return dev_denss.copy_to_host()
def calc_nrst_dist_gpu(gids,xs,ys,densities):
from numba import cuda, float64,float32
@cuda.jit
def calc_nrst_dist_cuda(gids,xs,ys,densities,nrst_dists,parent_gids,n):
'''
Numba Cuda code for calculate nearest point with higher density.
gids: identifier of geometry
xs: lons' array
ys: lats' array
densities: densities' array
nrst_dists: results of the nearest distance
parent_gids: results of gid of the nearest point with higher density
n: array size
'''
i=cuda.grid(1)
if i<n:
xi=xs[i]
yi=ys[i]
density=densities[i]
nrst_dist=float32(1e100)
parent_gid=np.int(-1)
for j in range(n):
xd=xs[j]-xi
yd=ys[j]-yi
gidd=gids[j]
distpow2=xd**2+yd**2
if densities[j]>density and distpow2<nrst_dist:
nrst_dist=distpow2
parent_gid=gidd
nrst_dists[i]=math.sqrt(float64(nrst_dist))
parent_gids[i]=parent_gid
n=xs.shape[0]
xs=np.ascontiguousarray((xs-xs.min()).astype(np.float32))
ys=np.ascontiguousarray((ys-ys.min()).astype(np.float32))
threadsperblock = 1024
blockspergrid = np.int((n + (threadsperblock - 1)) / threadsperblock)
dev_nrst_dists=cuda.device_array(n)
dev_parent_gids=cuda.device_array_like(gids)
calc_nrst_dist_cuda[blockspergrid,threadsperblock](cuda.to_device(np.ascontiguousarray(gids))\
,cuda.to_device(xs),cuda.to_device(ys),cuda.to_device(np.ascontiguousarray(densities))\
,dev_nrst_dists,dev_parent_gids,n)
return (dev_nrst_dists.copy_to_host(),dev_parent_gids.copy_to_host())
| StarcoderdataPython |
3267635 | # 이미지 주변에 padding을 넣고 300*300으로 변환, 저장하기.
import cv2
import numpy as np
from imutils import paths
filepath = "C:\\Users\\haram\\PycharmProjects\\OpenBankProject\\1024data\\1"
imagePaths = list(paths.list_images(filepath))
size = 500
for i, imagePath in enumerate(imagePaths):
save_dicname = imagePath.split('\\')
save_filename = "/".join(save_dicname[:-2]) + '/2/' + save_dicname[-1]
print(save_filename)
# print(path)
img = cv2.imread(imagePath)
# print(img.shape[1], img.shape[0])
print(imagePath)
# 가로와 세로 중 큰 값을 size로 맞추고, 빈 공간에 padding
if(img.shape[1] > size or img.shape[0] > size):
percent = 1
if(img.shape[1] > img.shape[0]) :
percent = size/img.shape[1]
else:
percent = size/img.shape[0]
img = cv2.resize(img, dsize=(0, 0), fx=percent, fy=percent, interpolation=cv2.INTER_LINEAR) # 양 선형 보간법
# 이미지 범위 지정
y, x, h, w = (0, 0, img.shape[0], img.shape[1])
# 그림 주변에 검은색으로 칠하기
w_x = (size-(w-x))/2 # w_x = (size - 그림)을 뺀 나머지 영역 크기 [ 그림나머지/2 [그림] 그림나머지/2 ]
h_y = (size-(h-y))/2
if(w_x < 0): # 크기가 -면 0으로 지정.
w_x = 0
elif(h_y < 0):
h_y = 0
M = np.float32([[1, 0, w_x], [0, 1, h_y]])
img_re = cv2.warpAffine(img, M, (size, size))
# print(img_re)
# print(img_re.shape)
# cv2.imshow("img_re", img_re)
# 이미지 저장하기
cv2.imwrite(save_filename, img_re) | StarcoderdataPython |
1688748 | <reponame>EagleMIT/m-i-d
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module) :
def __init__(self, gamma=0, alpha=None, size_average=True) :
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha, (float, int)) :
self.alpha = torch.Tensor([alpha, 1 - alpha])
if isinstance(alpha, list) :
self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target) :
if input.dim() > 2 :
input = input.view(input.size(0), input.size(1), -1) # N,C,H,W => N,C,H*W
input = input.transpose(1, 2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1, input.size(2)) # N,H*W,C => N*H*W,C
target = target[:, 1 :].contiguous()
target = target.view(-1, 1)
logpt = F.log_softmax(input, -1)
logpt = logpt.gather(1, target.to(torch.int64))
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
if self.alpha is not None :
if self.alpha.type() != input.data.type() :
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0, target.data.view(-1).to(torch.int64))
logpt = logpt * Variable(at)
loss = -(1 - pt) ** self.gamma * logpt
if self.size_average :
return loss.mean()
else :
return loss.sum()
def dice_loss(prediction, target) :
"""Calculating the dice loss
Args:
prediction = predicted image
target = Targeted image
Output:
dice_loss"""
smooth = 1.0
prediction = torch.softmax(prediction, dim=1)[:, 1:].contiguous()
target = target[:, 1:].contiguous()
i_flat = prediction.view(-1)
t_flat = target.view(-1)
intersection = (i_flat * t_flat).sum()
return 1 - ((2. * intersection + smooth) / (i_flat.sum() + t_flat.sum() + smooth))
def calc_loss(prediction, target, ce_weight=0.5) :
"""Calculating the loss and metrics
Args:
prediction = predicted image
target = Targeted image
ce_weight = 0.5 (default)
Output:
loss : dice loss of the epoch """
focal_loss = FocalLoss(gamma=2, alpha=torch.FloatTensor([1., 1.]))
ce = focal_loss(prediction, target)
dice = dice_loss(prediction, target)
loss = ce * ce_weight + dice * (1 - ce_weight)
return loss
def dice_score(prediction, target) :
prediction = torch.sigmoid(prediction)
smooth = 1.0
i_flat = prediction.view(-1)
t_flat = target.view(-1)
intersection = (i_flat * t_flat).sum()
return (2. * intersection + smooth) / (i_flat.sum() + t_flat.sum() + smooth)
def prediction_map_distillation(y, teacher_scores, T=4) :
"""
basic KD loss function based on "Distilling the Knowledge in a Neural Network"
https://arxiv.org/abs/1503.02531
:param y: student score map
:param teacher_scores: teacher score map
:param T: for softmax
:return: loss value
"""
p = F.log_softmax(y / T, dim=1)
q = F.softmax(teacher_scores / T, dim=1)
p = p.view(-1, 2)
q = q.view(-1, 2)
l_kl = F.kl_div(p, q, reduction='batchmean') * (T ** 2)
return l_kl
def at(x, exp):
"""
attention value of a feature map
:param x: feature
:return: attention value
"""
return F.normalize(x.pow(exp).mean(1).view(x.size(0), -1))
def importance_maps_distillation(s, t, exp=4):
"""
importance_maps_distillation KD loss, based on "Paying More Attention to Attention:
Improving the Performance of Convolutional Neural Networks via Attention Transfer"
https://arxiv.org/abs/1612.03928
:param exp: exponent
:param s: student feature maps
:param t: teacher feature maps
:return: imd loss value
"""
if s.shape[2] != t.shape[2]:
s = F.interpolate(s, t.size()[-2:], mode='bilinear')
return torch.sum((at(s, exp) - at(t, exp)).pow(2), dim=1).mean()
def region_contrast(x, gt):
"""
calculate region contrast value
:param x: feature
:param gt: mask
:return: value
"""
smooth = 1.0
mask0 = gt[:, 0].unsqueeze(1)
mask1 = gt[:, 1].unsqueeze(1)
region0 = torch.sum(x * mask0, dim=(2, 3)) / torch.sum(mask0, dim=(2, 3))
region1 = torch.sum(x * mask1, dim=(2, 3)) / (torch.sum(mask1, dim=(2, 3)) + smooth)
return F.cosine_similarity(region0, region1, dim=1)
def region_affinity_distillation(s, t, gt):
"""
region affinity distillation KD loss
:param s: student feature
:param t: teacher feature
:return: loss value
"""
gt = F.interpolate(gt, s.size()[2:])
return (region_contrast(s, gt) - region_contrast(t, gt)).pow(2).mean()
| StarcoderdataPython |
4840233 | from typing import List
from collections import defaultdict
class Solution:
def leastBricks(self, wall: List[List[int]]) -> int:
edge_count, max_count = defaultdict(int), 0
for row in wall:
s = 0
for brick in row[0:-1]:
s += brick
edge_count[s] += 1
max_count = max(max_count, edge_count[s])
return len(wall) - max_count
# TESTS
for wall, expected in [
([[1, 2, 2, 1], [3, 1, 2], [1, 3, 2], [2, 4], [3, 1, 2], [1, 3, 1, 1]], 2),
([[3], [3], [3]], 3),
]:
sol = Solution()
actual = sol.leastBricks(wall)
print("The least of crossed bricks in", wall, "->", actual)
assert actual == expected
| StarcoderdataPython |
41083 | """Checks if any of the latests tests has performed considerably different than
the previous ones. Takes the log directory as an argument."""
import os
import sys
from testsuite_common import Result, processLogLine, bcolors, getLastTwoLines
LOGDIR = sys.argv[1] #Get the log directory as an argument
PERCENTAGE = 5 #Default value for how much a test shoudl change
if len(sys.argv) == 3:
PERCENTAGE = float(sys.argv[2]) #Default is 5%, but we can specify more
#line parameter
def printResults(regressed, better, unchanged, firsttime):
"""Pretty print the results in different colours"""
if regressed != []:
for item in regressed:
print(bcolors.RED + "REGRESSION! " + item.testname + " Was: "\
+ str(item.previous) + " Is: " + str(item.current) + " Change: "\
+ str(abs(item.percentage)) + "%. Revision: " + item.revision\
+ bcolors.ENDC)
print('\n')
if unchanged != []:
for item in unchanged:
print(bcolors.BLUE + "UNCHANGED: " + item.testname + " Revision: " +\
item.revision + bcolors.ENDC)
print('\n')
if better != []:
for item in better:
print(bcolors.GREEN + "IMPROVEMENT! " + item.testname + " Was: "\
+ str(item.previous) + " Is: " + str(item.current) + " Change: "\
+ str(abs(item.percentage)) + "%. Revision: " + item.revision\
+ bcolors.ENDC)
if firsttime != []:
for item in firsttime:
print(bcolors.PURPLE + "First time test! " + item.testname +\
" Took: " + str(item.real) + " seconds. Revision: " +\
item.revision + bcolors.ENDC)
all_files = os.listdir(LOGDIR)
regressed = []
better = []
unchanged = []
firsttime = []
#Go through all log files and find which tests have performed better.
for logfile in all_files:
(line1, line2) = getLastTwoLines(logfile, LOGDIR)
log1 = processLogLine(line1)
if line2 == '\n': # Empty line, only one test ever run
firsttime.append(log1)
continue
log2 = processLogLine(line2)
res = Result(log1.testname, log1.real, log2.real, log2.revision,\
log2.branch, log1.revision, log1.branch)
if res.percentage < -PERCENTAGE:
regressed.append(res)
elif res.change > PERCENTAGE:
better.append(res)
else:
unchanged.append(res)
printResults(regressed, better, unchanged, firsttime)
| StarcoderdataPython |
167036 | import unittest
import pytest
from tfsnippet.utils import BaseRegistry, ClassRegistry
class RegistryTestCase(unittest.TestCase):
def test_base_registry(self):
a = object()
b = object()
# test not ignore case
r = BaseRegistry(ignore_case=False)
self.assertFalse(r.ignore_case)
r.register('a', a)
self.assertIs(r.get('a'), a)
with pytest.raises(KeyError, match='Object not registered: \'A\''):
_ = r.get('A')
self.assertListEqual(list(r), ['a'])
with pytest.raises(KeyError, match='Object already registered: \'a\''):
_ = r.register('a', a)
with pytest.raises(KeyError, match='Object not registered: \'b\''):
_ = r.get('b')
r.register('A', b)
self.assertIs(r.get('A'), b)
self.assertListEqual(list(r), ['a', 'A'])
# test ignore case
r = BaseRegistry(ignore_case=True)
self.assertTrue(r.ignore_case)
r.register('a', a)
self.assertIs(r.get('a'), a)
self.assertIs(r.get('A'), a)
self.assertListEqual(list(r), ['a'])
with pytest.raises(KeyError, match='Object already registered: \'A\''):
_ = r.register('A', a)
with pytest.raises(KeyError, match='Object not registered: \'b\''):
_ = r.get('b')
r.register('B', b)
self.assertIs(r.get('b'), b)
self.assertIs(r.get('B'), b)
self.assertListEqual(list(r), ['a', 'B'])
def test_class_registry(self):
r = ClassRegistry()
with pytest.raises(TypeError, match='`obj` is not a class: 123'):
r.register('int', 123)
class MyClass(object):
def __init__(self, value, message):
self.value = value
self.message = message
r.register('MyClass', MyClass)
self.assertIs(r.get('MyClass'), MyClass)
o = r.construct('MyClass', 123, message='message')
self.assertIsInstance(o, MyClass)
self.assertEqual(o.value, 123)
self.assertEqual(o.message, 'message')
| StarcoderdataPython |
1734848 | <reponame>codelovin/kaggle-utils
class BlenderBase:
def __init__(self, name):
self.name = name
def blend(self, scores, *args):
raise NotImplementedError
class PowerBlend(BlenderBase):
def __init__(self):
super().__init__("")
def blend(self, scores, lin_coefs, exp_coefs):
result = 0
for index, score in enumerate(scores):
result += lin_coefs[index] * np.power(score, exp_coefs[index])
return result
| StarcoderdataPython |
144451 | """Test gates defined in `qibo/core/gates.py`."""
import pytest
import numpy as np
from qibo import gates, K
from qibo.config import raise_error
from qibo.tests.utils import random_state, random_density_matrix
def apply_gates(gatelist, nqubits=None, initial_state=None):
if initial_state is None:
state = K.qnp.zeros(2 ** nqubits)
state[0] = 1
elif isinstance(initial_state, np.ndarray):
state = np.copy(initial_state)
if nqubits is None:
nqubits = int(np.log2(len(state)))
else: # pragma: no cover
assert nqubits == int(np.log2(len(state)))
else: # pragma: no cover
raise_error(TypeError, "Invalid initial state type {}."
"".format(type(initial_state)))
state = K.cast(state)
for gate in gatelist:
state = gate(state)
return state
def test__control_unitary(backend):
matrix = K.cast(np.random.random((2, 2)))
gate = gates.Unitary(matrix, 0)
unitary = gate._control_unitary(matrix)
target_unitary = np.eye(4, dtype=K._dtypes.get('DTYPECPX'))
target_unitary[2:, 2:] = K.to_numpy(matrix)
K.assert_allclose(unitary, target_unitary)
with pytest.raises(ValueError):
unitary = gate._control_unitary(np.random.random((16, 16)))
def test_h(backend):
final_state = apply_gates([gates.H(0), gates.H(1)], nqubits=2)
target_state = np.ones_like(final_state) / 2
K.assert_allclose(final_state, target_state)
def test_x(backend):
final_state = apply_gates([gates.X(0)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_y(backend):
final_state = apply_gates([gates.Y(1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[1] = 1j
K.assert_allclose(final_state, target_state)
def test_z(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.Z(0)], nqubits=2)
target_state = np.ones_like(final_state) / 2.0
target_state[2] *= -1.0
target_state[3] *= -1.0
K.assert_allclose(final_state, target_state)
def test_s(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.S(1)], nqubits=2)
target_state = np.array([0.5, 0.5j, 0.5, 0.5j])
K.assert_allclose(final_state, target_state)
def test_sdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.SDG(1)], nqubits=2)
target_state = np.array([0.5, -0.5j, 0.5, -0.5j])
K.assert_allclose(final_state, target_state)
def test_t(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.T(1)], nqubits=2)
target_state = np.array([0.5, (1 + 1j) / np.sqrt(8),
0.5, (1 + 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_tdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.TDG(1)], nqubits=2)
target_state = np.array([0.5, (1 - 1j) / np.sqrt(8),
0.5, (1 - 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_identity(backend):
gatelist = [gates.H(0), gates.H(1), gates.I(0), gates.I(1)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gatelist = [gates.H(0), gates.H(1), gates.I(0, 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
def test_align(backend):
gate = gates.Align(0, 1)
gatelist = [gates.H(0), gates.H(1), gate]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gate_matrix = gate._construct_unitary()
K.assert_allclose(gate_matrix, np.eye(4))
# :class:`qibo.core.cgates.M` is tested seperately in `test_measurement_gate.py`
def test_rx(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RX(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -1j * phase.imag],
[-1j * phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_ry(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RY(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -phase.imag],
[phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [True, False])
def test_rz(backend, applyx):
theta = 0.1234
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.RZ(0, theta))
final_state = apply_gates(gatelist, nqubits=1)
target_state = np.zeros_like(final_state)
p = int(applyx)
target_state[p] = np.exp((2 * p - 1) * 1j * theta / 2.0)
K.assert_allclose(final_state, target_state)
def test_u1(backend):
theta = 0.1234
final_state = apply_gates([gates.X(0), gates.U1(0, theta)], nqubits=1)
target_state = np.zeros_like(final_state)
target_state[1] = np.exp(1j * theta)
K.assert_allclose(final_state, target_state)
def test_u2(backend):
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U2(0, phi, lam)], initial_state=initial_state)
matrix = np.array([[np.exp(-1j * (phi + lam) / 2), -np.exp(-1j * (phi - lam) / 2)],
[np.exp(1j * (phi - lam) / 2), np.exp(1j * (phi + lam) / 2)]])
target_state = matrix.dot(initial_state) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_u3(backend):
theta = 0.1111
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U3(0, theta, phi, lam)],
initial_state=initial_state)
cost, sint = np.cos(theta / 2), np.sin(theta / 2)
ep = np.exp(1j * (phi + lam) / 2)
em = np.exp(1j * (phi - lam) / 2)
matrix = np.array([[ep.conj() * cost, - em.conj() * sint],
[em * sint, ep * cost]])
target_state = matrix.dot(initial_state)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [False, True])
def test_cnot(backend, applyx):
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.CNOT(0, 1))
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.zeros_like(final_state)
target_state[3 * int(applyx)] = 1.0
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("controlled_by", [False, True])
def test_cz(backend, controlled_by):
initial_state = random_state(2)
matrix = np.eye(4)
matrix[3, 3] = -1
target_state = matrix.dot(initial_state)
if controlled_by:
gate = gates.Z(1).controlled_by(0)
else:
gate = gates.CZ(0, 1)
final_state = apply_gates([gate], initial_state=initial_state)
assert gate.name == "cz"
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("name,params",
[("CRX", {"theta": 0.1}),
("CRY", {"theta": 0.2}),
("CRZ", {"theta": 0.3}),
("CU1", {"theta": 0.1}),
("CU2", {"phi": 0.1, "lam": 0.2}),
("CU3", {"theta": 0.1, "phi": 0.2, "lam": 0.3})])
def test_cun(backend, name, params):
initial_state = random_state(2)
gate = getattr(gates, name)(0, 1, **params)
final_state = apply_gates([gate], initial_state=initial_state)
target_state = np.dot(K.to_numpy(gate.matrix), initial_state)
K.assert_allclose(final_state, target_state)
def test_swap(backend):
final_state = apply_gates([gates.X(1), gates.SWAP(0, 1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_multiple_swap(backend):
gatelist = [gates.X(0), gates.X(2), gates.SWAP(0, 1), gates.SWAP(2, 3)]
final_state = apply_gates(gatelist, nqubits=4)
gatelist = [gates.X(1), gates.X(3)]
target_state = apply_gates(gatelist, nqubits=4)
K.assert_allclose(final_state, target_state)
def test_fsim(backend):
theta = 0.1234
phi = 0.4321
gatelist = [gates.H(0), gates.H(1), gates.fSim(0, 1, theta, phi)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(K.to_numpy(final_state)) / 2.0
rotation = np.array([[np.cos(theta), -1j * np.sin(theta)],
[-1j * np.sin(theta), np.cos(theta)]])
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state = matrix.dot(target_state)
K.assert_allclose(final_state, target_state)
def test_generalized_fsim(backend):
phi = np.random.random()
rotation = np.random.random((2, 2)) + 1j * np.random.random((2, 2))
gatelist = [gates.H(0), gates.H(1), gates.H(2)]
gatelist.append(gates.GeneralizedfSim(1, 2, rotation, phi))
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.ones_like(K.to_numpy(final_state)) / np.sqrt(8)
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state[:4] = matrix.dot(target_state[:4])
target_state[4:] = matrix.dot(target_state[4:])
K.assert_allclose(final_state, target_state)
def test_generalized_fsim_parameter_setter(backend):
phi = np.random.random()
matrix = np.random.random((2, 2))
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
K.assert_allclose(gate.parameters[0], matrix)
assert gate.parameters[1] == phi
matrix = np.random.random((4, 4))
with pytest.raises(ValueError):
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
@pytest.mark.parametrize("applyx", [False, True])
def test_toffoli(backend, applyx):
if applyx:
gatelist = [gates.X(0), gates.X(1), gates.TOFFOLI(0, 1, 2)]
else:
gatelist = [gates.X(1), gates.TOFFOLI(0, 1, 2)]
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.zeros_like(final_state)
if applyx:
target_state[-1] = 1
else:
target_state[2] = 1
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("nqubits", [2, 3])
def test_unitary(backend, nqubits):
initial_state = np.ones(2 ** nqubits) / np.sqrt(2 ** nqubits)
matrix = np.random.random(2 * (2 ** (nqubits - 1),))
target_state = np.kron(np.eye(2), matrix).dot(initial_state)
gatelist = [gates.H(i) for i in range(nqubits)]
gatelist.append(gates.Unitary(matrix, *range(1, nqubits), name="random"))
final_state = apply_gates(gatelist, nqubits=nqubits)
K.assert_allclose(final_state, target_state)
def test_unitary_initialization(backend):
matrix = np.random.random((4, 4))
gate = gates.Unitary(matrix, 0, 1)
K.assert_allclose(gate.parameters, matrix)
matrix = np.random.random((8, 8))
with pytest.raises(ValueError):
gate = gates.Unitary(matrix, 0, 1)
with pytest.raises(TypeError):
gate = gates.Unitary("abc", 0, 1)
def test_unitary_common_gates(backend):
target_state = apply_gates([gates.X(0), gates.H(1)], nqubits=2)
gatelist = [gates.Unitary(np.array([[0, 1], [1, 0]]), 0),
gates.Unitary(np.array([[1, 1], [1, -1]]) / np.sqrt(2), 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
thetax = 0.1234
thetay = 0.4321
gatelist = [gates.RX(0, theta=thetax), gates.RY(1, theta=thetay),
gates.CNOT(0, 1)]
target_state = apply_gates(gatelist, nqubits=2)
rx = np.array([[np.cos(thetax / 2), -1j * np.sin(thetax / 2)],
[-1j * np.sin(thetax / 2), np.cos(thetax / 2)]])
ry = np.array([[np.cos(thetay / 2), -np.sin(thetay / 2)],
[np.sin(thetay / 2), np.cos(thetay / 2)]])
cnot = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
gatelist = [gates.Unitary(rx, 0), gates.Unitary(ry, 1),
gates.Unitary(cnot, 0, 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
def test_unitary_multiqubit(backend):
gatelist = [gates.H(i) for i in range(4)]
gatelist.append(gates.CNOT(0, 1))
gatelist.append(gates.CNOT(2, 3))
gatelist.extend(gates.X(i) for i in range(4))
h = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
x = np.array([[0, 1], [1, 0]])
cnot = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
matrix = np.kron(np.kron(x, x), np.kron(x, x))
matrix = matrix @ np.kron(cnot, cnot)
matrix = matrix @ np.kron(np.kron(h, h), np.kron(h, h))
unitary = gates.Unitary(matrix, 0, 1, 2, 3)
if K.name == "qibotf":
with pytest.raises(NotImplementedError):
final_state = apply_gates([unitary], nqubits=4)
else:
final_state = apply_gates([unitary], nqubits=4)
target_state = apply_gates(gatelist, nqubits=4)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("nqubits", [5, 6])
def test_variational_layer(backend, nqubits):
theta = 2 * np.pi * np.random.random(nqubits)
gatelist = [gates.RY(i, t) for i, t in enumerate(theta)]
gatelist.extend(gates.CZ(i, i + 1) for i in range(0, nqubits - 1, 2))
target_state = apply_gates(gatelist, nqubits=nqubits)
pairs = list((i, i + 1) for i in range(0, nqubits - 1, 2))
gate = gates.VariationalLayer(range(nqubits), pairs,
gates.RY, gates.CZ,
theta)
final_state = apply_gates([gate], nqubits=nqubits)
K.assert_allclose(target_state, final_state)
def test_variational_layer__construct_unitary(backend):
pairs = list((i, i + 1) for i in range(0, 5, 2))
theta = 2 * np.pi * np.random.random(6)
gate = gates.VariationalLayer(range(6), pairs, gates.RY, gates.CZ, theta)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_flatten(backend):
target_state = np.ones(4) / 2.0
final_state = apply_gates([gates.Flatten(target_state)], nqubits=2)
K.assert_allclose(final_state, target_state)
target_state = np.ones(4) / 2.0
gate = gates.Flatten(target_state)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_callback_gate_errors():
from qibo import callbacks
entropy = callbacks.EntanglementEntropy([0])
gate = gates.CallbackGate(entropy)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_general_channel(backend):
a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
a2 = np.sqrt(0.6) * np.array([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 0, 1], [0, 0, 1, 0]])
a1, a2 = K.cast(a1), K.cast(a2)
initial_rho = random_density_matrix(2)
gate = gates.KrausChannel([((1,), a1), ((0, 1), a2)])
assert gate.target_qubits == (0, 1)
final_rho = gate(np.copy(initial_rho))
m1 = np.kron(np.eye(2), K.to_numpy(a1))
m2 = K.to_numpy(a2)
target_rho = (m1.dot(initial_rho).dot(m1.conj().T) +
m2.dot(initial_rho).dot(m2.conj().T))
K.assert_allclose(final_rho, target_rho)
def test_krauss_channel_errors(backend):
# bad Kraus matrix shape
a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
with pytest.raises(ValueError):
gate = gates.KrausChannel([((0, 1), a1)])
# Using KrausChannel on state vectors
channel = gates.KrausChannel([((0,), np.eye(2))])
with pytest.raises(ValueError):
channel._state_vector_call(np.random.random(4))
# Attempt to construct unitary for KrausChannel
with pytest.raises(ValueError):
channel._construct_unitary()
def test_controlled_by_channel_error():
with pytest.raises(ValueError):
gates.PauliNoiseChannel(0, px=0.5).controlled_by(1)
a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
a2 = np.sqrt(0.6) * np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1],
[0, 0, 1, 0]])
config = [((1,), a1), ((0, 1), a2)]
with pytest.raises(ValueError):
gates.KrausChannel(config).controlled_by(1)
def test_unitary_channel(backend):
a1 = np.array([[0, 1], [1, 0]])
a2 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
probs = [0.4, 0.3]
matrices = [((0,), a1), ((2, 3), a2)]
initial_state = random_density_matrix(4)
gate = gates.UnitaryChannel(probs, matrices)
gate.density_matrix = True
final_state = gate(K.cast(np.copy(initial_state)))
eye = np.eye(2)
ma1 = np.kron(np.kron(a1, eye), np.kron(eye, eye))
ma2 = np.kron(np.kron(eye, eye), a2)
target_state = (0.3 * initial_state
+ 0.4 * ma1.dot(initial_state.dot(ma1))
+ 0.3 * ma2.dot(initial_state.dot(ma2)))
K.assert_allclose(final_state, target_state)
def test_unitary_channel_errors():
"""Check errors raised by ``gates.UnitaryChannel``."""
a1 = np.array([[0, 1], [1, 0]])
a2 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
probs = [0.4, 0.3]
matrices = [((0,), a1), ((2, 3), a2)]
# Invalid probability length
with pytest.raises(ValueError):
gate = gates.UnitaryChannel([0.1, 0.3, 0.2], matrices)
# Probability > 1
with pytest.raises(ValueError):
gate = gates.UnitaryChannel([1.1, 0.2], matrices)
# Probability sum = 0
with pytest.raises(ValueError):
gate = gates.UnitaryChannel([0.0, 0.0], matrices)
def test_pauli_noise_channel(backend):
initial_rho = random_density_matrix(2)
gate = gates.PauliNoiseChannel(1, px=0.3)
gate.density_matrix = True
final_rho = gate(K.cast(np.copy(initial_rho)))
gate = gates.X(1)
gate.density_matrix = True
initial_rho = K.cast(initial_rho)
target_rho = 0.3 * gate(K.copy(initial_rho))
target_rho += 0.7 * initial_rho
K.assert_allclose(final_rho, target_rho)
def test_reset_channel(backend):
initial_rho = random_density_matrix(3)
gate = gates.ResetChannel(0, p0=0.2, p1=0.2)
gate.density_matrix = True
final_rho = gate(K.cast(np.copy(initial_rho)))
dtype = initial_rho.dtype
collapsed_rho = np.copy(initial_rho).reshape(6 * (2,))
collapsed_rho[0, :, :, 1, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho[1, :, :, 0, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho[1, :, :, 1, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho = collapsed_rho.reshape((8, 8))
collapsed_rho /= np.trace(collapsed_rho)
mx = np.kron(np.array([[0, 1], [1, 0]]), np.eye(4))
flipped_rho = mx.dot(collapsed_rho.dot(mx))
target_rho = 0.6 * initial_rho + 0.2 * (collapsed_rho + flipped_rho)
K.assert_allclose(final_rho, target_rho)
@pytest.mark.parametrize("t1,t2,time,excpop",
[(0.8, 0.5, 1.0, 0.4), (0.5, 0.8, 1.0, 0.4)])
def test_thermal_relaxation_channel(backend, t1, t2, time, excpop):
"""Check ``gates.ThermalRelaxationChannel`` on a 3-qubit random density matrix."""
initial_rho = random_density_matrix(3)
gate = gates.ThermalRelaxationChannel(0, t1, t2, time=time,
excited_population=excpop)
gate.density_matrix = True
final_rho = gate(K.cast(np.copy(initial_rho))) # pylint: disable=E1102
exp, p0, p1 = gate.calculate_probabilities(t1, t2, time, excpop)
if t2 > t1:
matrix = np.diag([1 - p1, p0, p1, 1 - p0])
matrix[0, -1], matrix[-1, 0] = exp, exp
matrix = matrix.reshape(4 * (2,))
# Apply matrix using Eq. (3.28) from arXiv:1111.6950
target_rho = np.copy(initial_rho).reshape(6 * (2,))
target_rho = np.einsum("abcd,aJKcjk->bJKdjk", matrix, target_rho)
target_rho = target_rho.reshape(initial_rho.shape)
else:
pz = exp
pi = 1 - pz - p0 - p1
dtype = initial_rho.dtype
collapsed_rho = np.copy(initial_rho).reshape(6 * (2,))
collapsed_rho[0, :, :, 1, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho[1, :, :, 0, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho[1, :, :, 1, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho = collapsed_rho.reshape((8, 8))
collapsed_rho /= np.trace(collapsed_rho)
mx = np.kron(np.array([[0, 1], [1, 0]]), np.eye(4))
mz = np.kron(np.array([[1, 0], [0, -1]]), np.eye(4))
z_rho = mz.dot(initial_rho.dot(mz))
flipped_rho = mx.dot(collapsed_rho.dot(mx))
target_rho = (pi * initial_rho + pz * z_rho + p0 * collapsed_rho +
p1 * flipped_rho)
K.assert_allclose(final_rho, target_rho)
# Try to apply to state vector if t1 < t2
if t1 < t2:
with pytest.raises(ValueError):
gate._state_vector_call(initial_rho) # pylint: disable=no-member
@pytest.mark.parametrize("t1,t2,time,excpop",
[(1.0, 0.5, 1.5, 1.5), (1.0, 0.5, -0.5, 0.5),
(1.0, -0.5, 1.5, 0.5), (-1.0, 0.5, 1.5, 0.5),
(1.0, 3.0, 1.5, 0.5)])
def test_thermal_relaxation_channel_errors(backend, t1, t2, time, excpop):
with pytest.raises(ValueError):
gate = gates.ThermalRelaxationChannel(
0, t1, t2, time, excited_population=excpop)
def test_fused_gate_init(backend):
gate = gates.FusedGate(0)
gate = gates.FusedGate(0, 1)
if K.is_custom:
with pytest.raises(NotImplementedError):
gate = gates.FusedGate(0, 1, 2)
def test_fused_gate_construct_unitary(backend):
gate = gates.FusedGate(0, 1)
gate.add(gates.H(0))
gate.add(gates.H(1))
gate.add(gates.CZ(0, 1))
hmatrix = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
czmatrix = np.diag([1, 1, 1, -1])
target_matrix = czmatrix @ np.kron(hmatrix, hmatrix)
K.assert_allclose(gate.matrix, target_matrix)
| StarcoderdataPython |
4815379 | <reponame>MateusGundel/python-utils
from random import shuffle
continue = 'S'
while continue.upper()
words = input("insira a frase: ").split(" ")
for word in words: shuffle(words)
print(" ".join(words))
continue = input('Digite S para continuar') | StarcoderdataPython |
1723062 | <filename>nrpc/__init__.py<gh_stars>1-10
from .lib import (
parse_subject,
parse_subject_tail,
streamed_reply_request,
streamed_reply_handler,
)
from .exc import ClientError
__all__ = [
"ClientError",
"parse_subject",
"parse_subject_tail",
"streamed_reply_request",
"streamed_reply_handler",
]
| StarcoderdataPython |
89078 | <gh_stars>0
#/* n=int(input("Enter the number to print the tables for:"))
#for i in range(1,11):
# print(n,"x",i,"=",n*i)
n=int(input("Enter the number"))
for i in range(1,11):
print (n ,"x", i, "=", n * i)
| StarcoderdataPython |
1661080 | <gh_stars>0
"""
Display drag from the app into the graph widget and the event bridge.
This is similar to the hello world sample.
"""
import sys
from PyQt5 import QtWidgets
from PyQt5.QtCore import QSize
from PyQt5.QtGui import QDrag
from PyQt5.QtWidgets import (
QGridLayout,
QMainWindow,
QPushButton,
QVBoxLayout,
QWidget,
)
import qmxgraph.mime
from qmxgraph.widget import EventsBridge, QmxGraph
def create_drag_button(text, qmx_style, parent=None):
button = DragButton(parent)
button.setText(text)
# # You can set an icon to the button with:
# button.setIcon(...)
button.setProperty('qmx_style', qmx_style)
button.setToolTip("Drag me into the graph widget")
return button
class DragButton(QPushButton):
"""
Start a drag even with custom data.
"""
def mousePressEvent(self, event):
mime_data = qmxgraph.mime.create_qt_mime_data(
{
'vertices': [
{
'dx': 0,
'dy': 0,
'width': 120,
'height': 40,
'label': self.text(),
'style': self.property('qmx_style'),
}
]
}
)
drag = QDrag(self)
drag.setMimeData(mime_data)
# # You can set icons like the following:
# w, h = self.property('component_size')
# # Image displayed while dragging.
# drag.setPixmap(self.icon().pixmap(w, h))
# # Position of the image where the mouse is centered.
# drag.setHotSpot(QPoint(w // 2, h // 2)
drag.exec_()
class DragAndDropWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setProperty('name', 'adas')
self.setMinimumSize(QSize(640, 480))
self.setWindowTitle("Drag&Drop Styles")
central_widget = QWidget(self)
self.setCentralWidget(central_widget)
self.button_pane = QWidget(self)
self.button_pane.setEnabled(False)
red_button = create_drag_button(
'RED', 'fillColor=#D88', self.button_pane
)
green_button = create_drag_button(
'GREEN', 'fillColor=#8D8', self.button_pane
)
blue_button = create_drag_button(
'BLUE', 'fillColor=#88D', self.button_pane
)
self.graph_widget = QmxGraph(parent=central_widget)
self.graph_widget.loadFinished.connect(self.graph_load_handler)
main_layout = QGridLayout(self)
central_widget.setLayout(main_layout)
main_layout.addWidget(self.graph_widget, 0, 0)
main_layout.addWidget(self.button_pane, 0, 1)
buttons_layout = QVBoxLayout(self.button_pane)
self.button_pane.setLayout(buttons_layout)
buttons_layout.addWidget(red_button)
buttons_layout.addWidget(green_button)
buttons_layout.addWidget(blue_button)
def graph_load_handler(self, is_loaded):
##################################
# Based in `EventsBridge` docstring.
def on_cells_added_handler(cell_ids):
print(f'added {cell_ids}')
qmx = widget.api
for cid in cell_ids:
label = qmx.get_label(cid)
qmx.set_label(cid, f'{label} ({cid})')
def on_terminal_changed_handler(
cell_id, terminal_type, new_terminal_id, old_terminal_id
):
print(
f'{terminal_type} of {cell_id} changed from'
f' {old_terminal_id} to {new_terminal_id}'
)
def on_cells_removed_handler(cell_ids):
print(f'removed {cell_ids}')
events_bridge = EventsBridge()
widget = self.graph_widget
widget.set_events_bridge(events_bridge)
events_bridge.on_cells_added.connect(on_cells_added_handler)
events_bridge.on_cells_removed.connect(on_cells_removed_handler)
events_bridge.on_terminal_changed.connect(on_terminal_changed_handler)
#
##################################
self.button_pane.setEnabled(is_loaded)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = DragAndDropWindow()
mainWin.show()
sys.exit(app.exec_())
| StarcoderdataPython |
4811470 | <filename>tests/musicxml/elements/note/test_rest.py
from unittest import TestCase
from musicscore.musicxml.elements.fullnote import DisplayStep, DisplayOctave, Rest
class TestRest(TestCase):
def setUp(self):
self.rest = Rest()
def test_rest(self):
result = '''<rest/>
'''
self.assertEqual(self.rest.to_string(), result)
def test_rest_2(self):
self.rest.add_child(DisplayStep('C'))
self.rest.add_child(DisplayOctave(4))
result = '''<rest>
<display-step>C</display-step>
<display-octave>4</display-octave>
</rest>
'''
self.assertEqual(self.rest.to_string(), result)
def test_rest_3(self):
self.rest.add_child(DisplayStep('C'))
self.rest.display_step.value = 'D'
self.rest.add_child(DisplayOctave(7))
result = '''<rest>
<display-step>D</display-step>
<display-octave>7</display-octave>
</rest>
'''
self.assertEqual(self.rest.to_string(), result)
| StarcoderdataPython |
3314733 | import numpy as np
from kalah.agents.randomagent import RandomAgent
from kalah.agents.maxscoreagent import MaxScoreAgent
from kalah.agents.maxscorerepeatagent import MaxScoreRepeatAgent
from kalah.agents.minimaxagent import MinimaxAgent
from kalah.kalahagentfactory import KalahAgentFactory
import unittest
class Test_KalahAgentFactory(unittest.TestCase):
def test_get_random_agent(self):
factory = KalahAgentFactory()
agent = factory.get_random_agent()
self.assertIsNotNone(agent)
agent_classes = np.array([
RandomAgent(),
MaxScoreAgent(),
MaxScoreRepeatAgent(),
MinimaxAgent()
])
weights = np.array([1, 0, 0, 0])
factory = KalahAgentFactory(agent_classes, weights)
agent = factory.get_random_agent()
self.assertEqual(agent.__class__.__name__, RandomAgent.__name__)
weights = np.array([0, 1, 0, 0])
factory = KalahAgentFactory(agent_classes, weights)
agent = factory.get_random_agent()
self.assertEqual(agent.__class__.__name__, MaxScoreAgent.__name__)
weights = np.array([0, 0, 1, 0])
factory = KalahAgentFactory(agent_classes, weights)
agent = factory.get_random_agent()
self.assertEqual(agent.__class__.__name__, MaxScoreRepeatAgent.__name__)
weights = np.array([0, 0, 0, 1])
factory = KalahAgentFactory(agent_classes, weights)
agent = factory.get_random_agent()
self.assertEqual(agent.__class__.__name__, MinimaxAgent.__name__)
def test_fail(self):
agent_classes = np.array([
RandomAgent,
MaxScoreAgent,
MaxScoreRepeatAgent,
MinimaxAgent
])
weights = np.array([1, 0, 0])
with self.assertRaises(ValueError):
KalahAgentFactory(agent_classes, weights)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4832336 | class Car(object):
condition = "new"
my_car = Car()
print(my_car.condition)
| StarcoderdataPython |
82554 | __all__ = ["euler_gamma_law", "sr_euler_gamma_law", "sr_mhd", "sr_rmhd", "sr_mf"]
| StarcoderdataPython |
1654489 | <reponame>puyomi/gostagram
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
class NotifyToUser(APIView):
def get(self, request, format=None):
user = request.user
notification = models.Notification.objects.filter(noti_to_user=user)[:20]
serializer = serializers.NotificationSerializers(notification, many=True)
return Response(data=serializer.data ,status=status.HTTP_200_OK)
def create_notification(noti_from, noti_to, noti_type, image=None, comment=None ):
notification = models.Notification.objects.create(
noti_from_user = noti_from,
noti_to_user = noti_to,
noti_type = noti_type,
noti_image = image,
noti_comment = comment,
)
notification.save() | StarcoderdataPython |
1632944 | <reponame>Princeton21/DSA
class newNode:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
def verticalSumUtil(root, hd, Map):
if root == None:
return
verticalSumUtil(root.left, hd - 1, Map)
if hd in Map.keys():
Map[hd] = Map[hd] + root.data
else:
Map[hd] = root.data
verticalSumUtil(root.right, hd + 1, Map)
def verticalSum(root):
Map = {}
verticalSumUtil(root, 0, Map)
for i, j in Map.items():
print(i, "=", j, end=", ")
if __name__ == "__main__":
"""
from timeit import timeit
root = newNode(1)
root.left = newNode(2)
root.right = newNode(3)
root.left.left = newNode(4)
root.left.right = newNode(5)
root.right.left = newNode(6)
root.right.right = newNode(7)
print(timeit(lambda: verticalSum(root), number=10000)) # 0.1557436530056293
"""
| StarcoderdataPython |
3246530 | #!/usr/bin/python3
import sklearn.datasets
# parameters are n, dimension, useful_dimension, noise level
n = 10**4
dim = 30
useful_dim = 25
sigma = 1.0
X, Y = sklearn.datasets.make_regression(n, dim, useful_dim, noise=sigma)
# maximum one-norm of any x or y
max_y_norm = max(Y)
max_x_norm = max([sum(list(map(abs,x))) for x in X])
for i in range(len(X)):
xmap = X[i] / max_x_norm
ymap = Y[i] / max_y_norm
print(" ".join(list(map(str,xmap))), ymap)
| StarcoderdataPython |
4815168 | #!/usr/bin/env python
'''
isobands_matplotlib.py is a script for creating isobands.
Works in a similar way as gdal_contour, but creating polygons
instead of polylines
This version requires matplotlib, but there is another one,
isobands_gdal.py that uses only GDAL python
Originally created by <NAME>, made available via his
blog post
http://geoexamples.blogspot.com.au/2013/08/creating-vectorial-isobands-with-python.html
and on Github at https://github.com/rveciana/geoexamples/tree/master/python/raster_isobands
'''
from numpy import arange
from numpy import meshgrid
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
from math import floor
from math import ceil
from os.path import exists
from os import remove
from argparse import ArgumentParser
import matplotlib.pyplot as plt
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def isobands(in_file, band, out_file, out_format, layer_name, attr_name,
offset, interval, min_level = None, upper_val_output = False):
'''
The method that calculates the isobands
'''
ds_in = gdal.Open(in_file)
band_in = ds_in.GetRasterBand(band)
xsize_in = band_in.XSize
ysize_in = band_in.YSize
geotransform_in = ds_in.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt( ds_in.GetProjectionRef() )
#Creating the output vectorial file
drv = ogr.GetDriverByName(out_format)
if exists(out_file):
remove(out_file)
dst_ds = drv.CreateDataSource( out_file )
dst_layer = dst_ds.CreateLayer(layer_name, geom_type = ogr.wkbPolygon,
srs = srs)
fdef = ogr.FieldDefn( attr_name, ogr.OFTReal )
dst_layer.CreateField( fdef )
# Use the geotransform pixel size value to avoid weird rounding errors in
# original approach.
x_pos = [geotransform_in[0]+geotransform_in[1]*ii \
for ii in range(xsize_in)]
y_pos = [geotransform_in[3]+geotransform_in[5]*ii \
for ii in range(ysize_in)]
#x_pos = arange(geotransform_in[0],
# geotransform_in[0] + xsize_in*geotransform_in[1], geotransform_in[1])
#y_pos = arange(geotransform_in[3],
# geotransform_in[3] + ysize_in*geotransform_in[5], geotransform_in[5])
x_grid, y_grid = meshgrid(x_pos, y_pos)
raster_values = band_in.ReadAsArray(0, 0, xsize_in, ysize_in)
#stats = band_in.GetStatistics(True, True)
min_value, max_value = band_in.ComputeRasterMinMax()
if min_level == None:
#min_value = stats[0]
min_level = offset + interval * floor((min_value - offset)/interval)
#max_value = stats[1]
#Due to range issues, a level is added
max_level = offset + interval * (1 + ceil((max_value - offset)/interval))
levels = arange(min_level, max_level, interval)
contours = plt.contourf(x_grid, y_grid, raster_values, levels)
for level in range(len(contours.collections)):
paths = contours.collections[level].get_paths()
for path in paths:
feat_out = ogr.Feature( dst_layer.GetLayerDefn())
if upper_val_output:
out_val = contours.levels[level] + interval
else:
out_val = contours.levels[level]
feat_out.SetField( attr_name, out_val )
pol = ogr.Geometry(ogr.wkbPolygon)
ring = None
for i in range(len(path.vertices)):
point = path.vertices[i]
if path.codes[i] == 1:
if ring != None:
pol.AddGeometry(ring)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint_2D(point[0], point[1])
pol.AddGeometry(ring)
feat_out.SetGeometry(pol)
if dst_layer.CreateFeature(feat_out) != 0:
print "Failed to create feature in shapefile.\n"
exit( 1 )
feat_out.Destroy()
if __name__ == "__main__":
PARSER = ArgumentParser(
description="Calculates the isobands from a raster into a vector file")
PARSER.add_argument("src_file", help="The raster source file")
PARSER.add_argument("out_file", help="The vectorial out file")
PARSER.add_argument("-b",
help="The band in the source file to process (default 1)",
type=int, default = 1, metavar = 'band')
PARSER.add_argument("-off",
help="The offset to start the isobands (default 0)",
type=float, default = 0.0, metavar = 'offset')
PARSER.add_argument("-i",
help="The interval (default 0)",
type=float, default = 0.0, metavar = 'interval')
PARSER.add_argument("-nln",
help="The out layer name (default bands)",
default = 'bands', metavar = 'layer_name')
PARSER.add_argument("-a",
help="The out layer attribute name (default h)",
default = 'h', metavar = 'attr_name')
PARSER.add_argument("-f",
help="The output file format name (default ESRI Shapefile)",
default = 'ESRI Shapefile', metavar = 'formatname')
PARSER.add_argument("-up",
help="In the output file, whether to use the upper value of an "
"isoband, as value name for polygons, rather than lower.",
default = "False", metavar='upper_val_output')
ARGS = PARSER.parse_args()
isobands(ARGS.src_file, ARGS.b, ARGS.out_file, ARGS.f, ARGS.nln, ARGS.a,
ARGS.off, ARGS.i, upper_val_output=str2bool(ARGS.up))
| StarcoderdataPython |
1709215 | """
Aggregations.
| Copyright 2017-2021, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import numpy as np
import eta.core.utils as etau
from fiftyone.core.expressions import ViewField as F
import fiftyone.core.media as fom
import fiftyone.core.utils as fou
class Aggregation(object):
"""Abstract base class for all aggregations.
:class:`Aggregation` instances represent an aggregation or reduction
of a :class:`fiftyone.core.collections.SampleCollection` instance.
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def __init__(self, field_name, expr=None):
self._field_name = field_name
self._expr = expr
@property
def field_name(self):
"""The field name being computed on."""
return self._field_name
@property
def expr(self):
"""The :class:`fiftyone.core.expressions.ViewExpression` or MongoDB
expression that will be applied to the field before aggregating, if any.
"""
return self._expr
def to_mongo(self, sample_collection):
"""Returns the MongoDB aggregation pipeline for this aggregation.
Args:
sample_collection: the
:class:`fiftyone.core.collections.SampleCollection` to which
the aggregation is being applied
Returns:
a MongoDB aggregation pipeline (list of dicts)
"""
raise NotImplementedError("subclasses must implement to_mongo()")
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the aggregation result
"""
raise NotImplementedError("subclasses must implement parse_result()")
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
the aggregation result
"""
raise NotImplementedError("subclasses must implement default_result()")
def _parse_field_and_expr(
self, sample_collection, auto_unwind=True, allow_missing=False
):
return _parse_field_and_expr(
sample_collection,
self._field_name,
self._expr,
auto_unwind,
allow_missing,
)
class AggregationError(Exception):
"""An error raised during the execution of an :class:`Aggregation`."""
pass
class Bounds(Aggregation):
"""Computes the bounds of a numeric field of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the bounds of a numeric field
#
aggregation = fo.Bounds("numeric_field")
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
#
# Compute the a bounds of a numeric list field
#
aggregation = fo.Bounds("numeric_list_field")
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
#
# Compute the bounds of a transformation of a numeric field
#
aggregation = fo.Bounds("numeric_field", expr=2 * (F() + 1))
bounds = dataset.aggregate(aggregation)
print(bounds) # (min, max)
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``(None, None)``
"""
return None, None
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the ``(min, max)`` bounds
"""
return d["min"], d["max"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append(
{
"$group": {
"_id": None,
"min": {"$min": "$" + path},
"max": {"$max": "$" + path},
}
}
)
return pipeline
class Count(Aggregation):
"""Counts the number of field values in a collection.
``None``-valued fields are ignored.
If no field is provided, the samples themselves are counted.
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
fo.Detection(label="squirrel"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Count the number of samples in the dataset
#
aggregation = fo.Count()
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of samples with `predictions`
#
aggregation = fo.Count("predictions")
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of objects in the `predictions` field
#
aggregation = fo.Count("predictions.detections")
count = dataset.aggregate(aggregation)
print(count) # the count
#
# Count the number of samples with more than 2 predictions
#
expr = (F("detections").length() > 2).if_else(F("detections"), None)
aggregation = fo.Count("predictions", expr=expr)
count = dataset.aggregate(aggregation)
print(count) # the count
Args:
field_name (None): the name of the field to operate on. If none is
provided, the samples themselves are counted
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def __init__(self, field_name=None, expr=None):
super().__init__(field_name, expr=expr)
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the count
"""
return d["count"]
def to_mongo(self, sample_collection):
if self._field_name is None:
return [{"$count": "count"}]
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
if sample_collection.media_type != fom.VIDEO or path != "frames":
pipeline.append({"$match": {"$expr": {"$gt": ["$" + path, None]}}})
pipeline.append({"$count": "count"})
return pipeline
class CountValues(Aggregation):
"""Counts the occurrences of field values in a collection.
This aggregation is typically applied to *countable* field types (or lists
of such types):
- :class:`fiftyone.core.fields.BooleanField`
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.StringField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
tags=["sunny"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
tags=["cloudy"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Compute the tag counts in the dataset
#
aggregation = fo.CountValues("tags")
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
#
# Compute the predicted label counts in the dataset
#
aggregation = fo.CountValues("predictions.detections.label")
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
#
# Compute the predicted label counts after some normalization
#
expr = F().map_values({"cat": "pet", "dog": "pet"}).upper()
aggregation = fo.CountValues("predictions.detections.label", expr=expr)
counts = dataset.aggregate(aggregation)
print(counts) # dict mapping values to counts
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``{}``
"""
return {}
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a dict mapping values to counts
"""
return {i["k"]: i["count"] for i in d["result"]}
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline += [
{"$group": {"_id": "$" + path, "count": {"$sum": 1}}},
{
"$group": {
"_id": None,
"result": {"$push": {"k": "$_id", "count": "$count"}},
}
},
]
return pipeline
class Distinct(Aggregation):
"""Computes the distinct values of a field in a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *countable* field types (or lists
of such types):
- :class:`fiftyone.core.fields.BooleanField`
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.StringField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
tags=["sunny"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="dog"),
]
),
),
fo.Sample(
filepath="/path/to/image2.png",
tags=["sunny", "cloudy"],
predictions=fo.Detections(
detections=[
fo.Detection(label="cat"),
fo.Detection(label="rabbit"),
]
),
),
fo.Sample(
filepath="/path/to/image3.png",
predictions=None,
),
]
)
#
# Get the distinct tags in a dataset
#
aggregation = fo.Distinct("tags")
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
#
# Get the distinct predicted labels in a dataset
#
aggregation = fo.Distinct("predictions.detections.label")
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
#
# Get the distinct predicted labels after some normalization
#
expr = F().map_values({"cat": "pet", "dog": "pet"}).upper()
aggregation = fo.Distinct("predictions.detections.label", expr=expr)
values = dataset.aggregate(aggregation)
print(values) # list of distinct values
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``[]``
"""
return []
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a sorted list of distinct values
"""
return sorted(d["values"])
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline += [
{"$match": {"$expr": {"$gt": ["$" + path, None]}}},
{"$group": {"_id": None, "values": {"$addToSet": "$" + path}}},
]
return pipeline
class HistogramValues(Aggregation):
"""Computes a histogram of the field values in a collection.
This aggregation is typically applied to *numeric* field types (or
lists of such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import numpy as np
import matplotlib.pyplot as plt
import fiftyone as fo
samples = []
for idx in range(100):
samples.append(
fo.Sample(
filepath="/path/to/image%d.png" % idx,
numeric_field=np.random.randn(),
numeric_list_field=list(np.random.randn(10)),
)
)
dataset = fo.Dataset()
dataset.add_samples(samples)
def plot_hist(counts, edges):
counts = np.asarray(counts)
edges = np.asarray(edges)
left_edges = edges[:-1]
widths = edges[1:] - edges[:-1]
plt.bar(left_edges, counts, width=widths, align="edge")
#
# Compute a histogram of a numeric field
#
aggregation = fo.HistogramValues("numeric_field", bins=50)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
#
# Compute the histogram of a numeric list field
#
aggregation = fo.HistogramValues("numeric_list_field", bins=50)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
#
# Compute the histogram of a transformation of a numeric field
#
aggregation = fo.HistogramValues(
"numeric_field", expr=2 * (F() + 1), bins=50
)
counts, edges, other = dataset.aggregate(aggregation)
plot_hist(counts, edges)
plt.show(block=False)
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
bins (None): can be either an integer number of bins to generate or a
monotonically increasing sequence specifying the bin edges to use.
By default, 10 bins are created. If ``bins`` is an integer and no
``range`` is specified, bin edges are automatically computed from
the bounds of the field
range (None): a ``(lower, upper)`` tuple specifying a range in which to
generate equal-width bins. Only applicable when ``bins`` is an
integer or ``None``
auto (False): whether to automatically choose bin edges in an attempt
to evenly distribute the counts in each bin. If this option is
chosen, ``bins`` will only be used if it is an integer, and the
``range`` parameter is ignored
"""
def __init__(
self, field_name, expr=None, bins=None, range=None, auto=False
):
super().__init__(field_name, expr=expr)
self._bins = bins
self._range = range
self._auto = auto
self._num_bins = None
self._edges = None
self._edges_last_used = None
self._parse_args()
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
a tuple of
- counts: ``[]``
- edges: ``[]``
- other: ``0``
"""
return [], [], 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
a tuple of
- counts: a list of counts in each bin
- edges: an increasing list of bin edges of length
``len(counts) + 1``. Note that each bin is treated as having an
inclusive lower boundary and exclusive upper boundary,
``[lower, upper)``, including the rightmost bin
- other: the number of items outside the bins
"""
if self._auto:
return self._parse_result_auto(d)
return self._parse_result_edges(d)
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
if self._auto:
pipeline.append(
{
"$bucketAuto": {
"groupBy": "$" + path,
"buckets": self._num_bins,
"output": {"count": {"$sum": 1}},
}
}
)
else:
if self._edges is not None:
edges = self._edges
else:
edges = self._compute_bin_edges(sample_collection)
self._edges_last_used = edges
pipeline.append(
{
"$bucket": {
"groupBy": "$" + path,
"boundaries": edges,
"default": "other", # counts documents outside of bins
"output": {"count": {"$sum": 1}},
}
}
)
pipeline.append({"$group": {"_id": None, "bins": {"$push": "$$ROOT"}}})
return pipeline
def _parse_args(self):
if self._bins is None:
bins = 10
else:
bins = self._bins
if self._auto:
if etau.is_numeric(bins):
self._num_bins = bins
else:
self._num_bins = 10
return
if not etau.is_numeric(bins):
# User-provided bin edges
self._edges = list(bins)
return
if self._range is not None:
# Linearly-spaced bins within `range`
self._edges = list(
np.linspace(self._range[0], self._range[1], bins + 1)
)
else:
# Compute bin edges from bounds
self._num_bins = bins
def _compute_bin_edges(self, sample_collection):
bounds = sample_collection.bounds(self._field_name, expr=self._expr)
if any(b is None for b in bounds):
bounds = (-1, -1)
return list(
np.linspace(bounds[0], bounds[1] + 1e-6, self._num_bins + 1)
)
def _parse_result_edges(self, d):
_edges_array = np.array(self._edges_last_used)
edges = list(_edges_array)
counts = [0] * (len(edges) - 1)
other = 0
for di in d["bins"]:
left = di["_id"]
if left == "other":
other = di["count"]
else:
idx = np.abs(_edges_array - left).argmin()
counts[idx] = di["count"]
return counts, edges, other
def _parse_result_auto(self, d):
counts = []
edges = []
for di in d["bins"]:
counts.append(di["count"])
edges.append(di["_id"]["min"])
edges.append(di["_id"]["max"])
return counts, edges, 0
class Mean(Aggregation):
"""Computes the arithmetic mean of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the mean of a numeric field
#
aggregation = fo.Mean("numeric_field")
mean = dataset.aggregate(aggregation)
print(mean) # the mean
#
# Compute the mean of a numeric list field
#
aggregation = fo.Mean("numeric_list_field")
mean = dataset.aggregate(aggregation)
print(mean) # the mean
#
# Compute the mean of a transformation of a numeric field
#
aggregation = fo.Mean("numeric_field", expr=2 * (F() + 1))
mean = dataset.aggregate(aggregation)
print(mean) # the mean
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the mean
"""
return d["mean"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append(
{"$group": {"_id": None, "mean": {"$avg": "$" + path}}}
)
return pipeline
class Std(Aggregation):
"""Computes the standard deviation of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the standard deviation of a numeric field
#
aggregation = fo.Std("numeric_field")
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
#
# Compute the standard deviation of a numeric list field
#
aggregation = fo.Std("numeric_list_field")
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
#
# Compute the standard deviation of a transformation of a numeric field
#
aggregation = fo.Std("numeric_field", expr=2 * (F() + 1))
std = dataset.aggregate(aggregation)
print(std) # the standard deviation
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
sample (False): whether to compute the sample standard deviation rather
than the population standard deviation
"""
def __init__(self, field_name, expr=None, sample=False):
super().__init__(field_name, expr=expr)
self._sample = sample
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the standard deviation
"""
return d["std"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
op = "$stdDevSamp" if self._sample else "$stdDevPop"
pipeline.append({"$group": {"_id": None, "std": {op: "$" + path}}})
return pipeline
class Sum(Aggregation):
"""Computes the sum of the field values of a collection.
``None``-valued fields are ignored.
This aggregation is typically applied to *numeric* field types (or lists of
such types):
- :class:`fiftyone.core.fields.IntField`
- :class:`fiftyone.core.fields.FloatField`
Examples::
import fiftyone as fo
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Compute the sum of a numeric field
#
aggregation = fo.Sum("numeric_field")
total = dataset.aggregate(aggregation)
print(total) # the sum
#
# Compute the sum of a numeric list field
#
aggregation = fo.Sum("numeric_list_field")
total = dataset.aggregate(aggregation)
print(total) # the sum
#
# Compute the sum of a transformation of a numeric field
#
aggregation = fo.Sum("numeric_field", expr=2 * (F() + 1))
total = dataset.aggregate(aggregation)
print(total) # the sum
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
"""
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``0``
"""
return 0
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the sum
"""
return d["sum"]
def to_mongo(self, sample_collection):
path, pipeline, _ = self._parse_field_and_expr(sample_collection)
pipeline.append({"$group": {"_id": None, "sum": {"$sum": "$" + path}}})
return pipeline
class Values(Aggregation):
"""Extracts the values of the field from all samples in a collection.
.. note::
Unlike other aggregations, :class:`Values` does not automatically
unwind list fields, which ensures that the returned values match the
potentially-nested structure of the documents.
You can opt-in to unwinding specific list fields using the ``[]``
syntax, or you can pass the optional ``unwind=True`` parameter to
unwind all supported list fields. See :ref:`aggregations-list-fields`
for more information.
Examples::
import fiftyone as fo
from fiftyone import ViewField as F
dataset = fo.Dataset()
dataset.add_samples(
[
fo.Sample(
filepath="/path/to/image1.png",
numeric_field=1.0,
numeric_list_field=[1, 2, 3],
),
fo.Sample(
filepath="/path/to/image2.png",
numeric_field=4.0,
numeric_list_field=[1, 2],
),
fo.Sample(
filepath="/path/to/image3.png",
numeric_field=None,
numeric_list_field=None,
),
]
)
#
# Get all values of a field
#
aggregation = fo.Values("numeric_field")
values = dataset.aggregate(aggregation)
print(values) # [1.0, 4.0, None]
#
# Get all values of a list field
#
aggregation = fo.Values("numeric_list_field")
values = dataset.aggregate(aggregation)
print(values) # [[1, 2, 3], [1, 2], None]
#
# Get all values of transformed field
#
aggregation = fo.Values("numeric_field", expr=2 * (F() + 1))
values = dataset.aggregate(aggregation)
print(values) # [4.0, 10.0, None]
Args:
field_name: the name of the field to operate on
expr (None): an optional
:class:`fiftyone.core.expressions.ViewExpression` or
`MongoDB expression <https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions>`_
to apply to the field before aggregating
missing_value (None): a value to insert for missing or ``None``-valued
fields
unwind (False): whether to automatically unwind all recognized list
fields
"""
def __init__(
self,
field_name,
expr=None,
missing_value=None,
unwind=False,
_allow_missing=False,
):
field_name, found_id_field = _handle_id_fields(field_name)
super().__init__(field_name, expr=expr)
self._missing_value = missing_value
self._unwind = unwind
self._allow_missing = _allow_missing
self._found_id_field = found_id_field
self._found_array_field = None
self._num_list_fields = None
def default_result(self):
"""Returns the default result for this aggregation.
Returns:
``[]``
"""
return []
def parse_result(self, d):
"""Parses the output of :meth:`to_mongo`.
Args:
d: the result dict
Returns:
the list of field values
"""
values = d["values"]
if self._found_id_field:
level = 1 + self._num_list_fields
return _transform_values(values, str, level=level)
if self._found_array_field:
fcn = fou.deserialize_numpy_array
level = 1 + self._num_list_fields
return _transform_values(values, fcn, level=level)
return values
def to_mongo(self, sample_collection):
path, pipeline, other_list_fields = self._parse_field_and_expr(
sample_collection,
auto_unwind=self._unwind,
allow_missing=self._allow_missing,
)
self._found_array_field = sample_collection._is_array_field(path)
self._num_list_fields = len(other_list_fields)
pipeline += _make_extract_values_pipeline(
path, other_list_fields, self._missing_value
)
return pipeline
def _handle_id_fields(field_name):
if field_name == "id":
field_name = "_id"
found_id_field = True
elif field_name.endswith(".id"):
field_name = field_name[: -len(".id")] + "._id"
found_id_field = True
else:
found_id_field = False
return field_name, found_id_field
def _transform_values(values, fcn, level=1):
if values is None:
return None
if level < 1:
return fcn(values)
return [_transform_values(v, fcn, level=level - 1) for v in values]
def _make_extract_values_pipeline(path, list_fields, missing_value):
if not list_fields:
root = path
else:
root = list_fields[0]
expr = (F() != None).if_else(F(), missing_value)
if list_fields:
subfield = path[len(list_fields[-1]) + 1 :]
expr = _extract_list_values(subfield, expr)
if len(list_fields) > 1:
for list_field1, list_field2 in zip(
reversed(list_fields[:-1]), reversed(list_fields[1:])
):
inner_list_field = list_field2[len(list_field1) + 1 :]
expr = _extract_list_values(inner_list_field, expr)
return [
{"$set": {root: expr.to_mongo(prefix="$" + root)}},
{"$group": {"_id": None, "values": {"$push": "$" + root}}},
]
def _extract_list_values(subfield, expr):
if subfield:
map_expr = F(subfield).apply(expr)
else:
map_expr = expr
return F().map(map_expr)
def _parse_field_and_expr(
sample_collection, field_name, expr, auto_unwind, allow_missing
):
if expr is not None:
pipeline, _ = sample_collection._make_set_field_pipeline(
field_name, expr
)
else:
pipeline = []
(
path,
is_frame_field,
unwind_list_fields,
other_list_fields,
) = sample_collection._parse_field_name(
field_name, auto_unwind=auto_unwind, allow_missing=allow_missing
)
if is_frame_field and auto_unwind:
pipeline.extend(
[{"$unwind": "$frames"}, {"$replaceRoot": {"newRoot": "$frames"}}]
)
for list_field in unwind_list_fields:
pipeline.append({"$unwind": "$" + list_field})
if other_list_fields:
# Don't unroll terminal lists unless explicitly requested
other_list_fields = [
lf for lf in other_list_fields if lf != field_name
]
if other_list_fields:
root = other_list_fields[0]
leaf = path[len(root) + 1 :]
else:
root = path
leaf = None
pipeline.append({"$project": {root: True}})
return path, pipeline, other_list_fields
| StarcoderdataPython |
4812459 | #
# Copyright (c) 2013,2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""Package containing all services available in the manager.
Services are interfaces accessible externally over the network.
"""
import inspect
import logging
import pkgutil
import mysql.fabric.protocols.xmlrpc as _protocol
from mysql.fabric.protocols.mysqlrpc import FabricMySQLServer
from mysql.fabric.utils import (
Singleton
)
from mysql.fabric.command import (
get_groups,
get_commands,
get_command,
)
from mysql.fabric.errors import (
ServiceError
)
_LOGGER = logging.getLogger(__name__)
def find_commands(config=None):
"""Find which are the available commands.
"""
for imp, name, ispkg in pkgutil.walk_packages(__path__, __name__ + "."):
mod = imp.find_module(name).load_module(name)
_LOGGER.debug("%s %s has got __name__ %s",
"Package" if ispkg else "Module", name, mod.__name__
)
if config is not None:
for (mem_name, mem_value) in inspect.getmembers(mod):
if mem_name == "configure" and inspect.isfunction(mem_value):
mem_value(config)
def find_client():
"""Return a proxy to access the Fabric server.
"""
return _protocol.MyClient()
class ServiceManager(Singleton):
"""This is the service manager, which processes service requests.
The service manager supports XML-RPC and MySQL-RPC using the
MySQL protocol.
Services are not automatically loaded when the service manager is
constructed, so the load_services have to be called explicitly to
load the services in the package.
"""
def __init__(self, services, number_threads, ssl=None):
"""Setup all protocol services.
"""
Singleton.__init__(self)
self.__services = services
# XMLRPC
self.__rpc_server = None
if 'protocol.xmlrpc' in services:
host, port = services['protocol.xmlrpc'].split(':')
self.__rpc_server = _protocol.MyServer(
host, int(port), number_threads, ssl
)
# MySQL Protocol
self.__mysql_server = None
if 'protocol.mysql' in services:
host, port = services['protocol.mysql'].split(':')
self.__mysql_server = FabricMySQLServer(
host, int(port), number_threads, ssl
)
def address(self, protocol=None):
"""Return addresses in use by the service.
:param protocol: Address in use by a protocol.
:return: Address as host:port.
:rtype: String.
"""
services = self.__services.copy()
if protocol is None:
return services
if protocol in services:
return {protocol : services[protocol]}
raise ServiceError("Protocol (%s) is not supported." % (protocol, ))
def get_number_sessions(self):
"""Return the number of concurrent sessions.
"""
return self.__rpc_server.get_number_sessions()
def start(self):
"""Start all services managed by the service manager.
"""
if self.__mysql_server:
try:
self.__mysql_server.start()
except Exception as error:
_LOGGER.error("Error starting thread: (%s).", error)
finally:
_LOGGER.debug("MySQL-RPC server thread created")
if self.__rpc_server:
try:
self.__rpc_server.start()
except Exception as error:
_LOGGER.error("Error starting thread: (%s).", error)
finally:
_LOGGER.debug("XML-RPC server thread created")
def shutdown(self):
"""Shut down all services managed by the service manager.
"""
if self.__mysql_server:
self.__mysql_server.shutdown()
if self.__rpc_server:
self.__rpc_server.shutdown()
def wait(self):
"""Wait until all the sevices are properly finished.
"""
if self.__rpc_server:
self.__rpc_server.wait()
def load_services(self, options, config):
"""Load services into each protocol server.
:param options: The options for the commands that shall be
created.
:param config: The configuration for the commands that shall
be created.
"""
_LOGGER.info("Loading Services.")
find_commands(config)
for group_name in get_groups():
for command_name in get_commands(group_name):
command = get_command(group_name, command_name)
if hasattr(command, "execute"):
_LOGGER.debug(
"Registering %s.", command.group_name + '.' + \
command.command_name
)
if self.__mysql_server:
cmd = command()
cmd.setup_server(self.__mysql_server, options, config)
self.__mysql_server.register_command(cmd)
if self.__rpc_server:
cmd = command()
cmd.setup_server(self.__rpc_server, options, config)
self.__rpc_server.register_command(cmd)
| StarcoderdataPython |
65845 | import logging
import os
import numpy as np
import xml.etree.ElementTree as ET
from PIL import Image
from paths import DATASETS_ROOT
log = logging.getLogger()
VOC_CATS = ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
class VOCLoader():
def __init__(self, year, split, segmentation=False, augmented_seg=False):
assert year in ['07', '12']
self.dataset = 'voc'
self.year = year
self.root = os.path.join(DATASETS_ROOT, 'VOCdevkit/VOC20%s/' % year)
self.split = split
assert split in ['train', 'val', 'trainval', 'test']
cats = VOC_CATS
self.cats_to_ids = dict(map(reversed, enumerate(cats)))
self.ids_to_cats = dict(enumerate(cats))
self.num_classes = len(cats)
self.categories = cats[1:]
self.segmentation = segmentation
self.augmented_seg = augmented_seg
assert not self.segmentation or self.segmentation and self.year == '12'
if self.augmented_seg:
filelist = 'ImageSets/SegmentationAug/%s.txt'
elif self.segmentation:
filelist = 'ImageSets/Segmentation/%s.txt'
else:
filelist = 'ImageSets/Main/%s.txt'
with open(os.path.join(self.root, filelist % self.split), 'r') as f:
self.filenames = f.read().split('\n')[:-1]
log.info("Created a loader VOC%s %s with %i images" % (year, split, len(self.filenames)))
def load_image(self, name):
im = Image.open('%sJPEGImages/%s.jpg' % (self.root, name)).convert('RGB')
im = np.array(im) / 255.0
im = im.astype(np.float32)
return im
def get_filenames(self):
return self.filenames
def read_annotations(self, name):
bboxes = []
cats = []
tree = ET.parse('%sAnnotations/%s.xml' % (self.root, name))
root = tree.getroot()
width = int(root.find('size/width').text)
height = int(root.find('size/height').text)
difficulty = []
for obj in root.findall('object'):
cat = self.cats_to_ids[obj.find('name').text]
difficult = (int(obj.find('difficult').text) != 0)
difficulty.append(difficult)
cats.append(cat)
bbox_tag = obj.find('bndbox')
x = int(bbox_tag.find('xmin').text)
y = int(bbox_tag.find('ymin').text)
w = int(bbox_tag.find('xmax').text)-x
h = int(bbox_tag.find('ymax').text)-y
bboxes.append((x, y, w, h))
gt_cats = np.array(cats)
gt_bboxes = np.array(bboxes).reshape((len(bboxes), 4))
difficulty = np.array(difficulty)
seg_gt = self.read_segmentations(name, height, width)
output = gt_bboxes, seg_gt, gt_cats, width, height, difficulty
return output
def read_segmentations(self, name, height, width):
if self.segmentation:
try:
seg_folder = self.root + 'SegmentationClass/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
except:
assert self.augmented_seg
seg_folder = self.root + 'SegmentationClassAug/'
seg_file = seg_folder + name + '.png'
seg_map = Image.open(seg_file)
segmentation = np.array(seg_map, dtype=np.uint8)
else:
# if there is no segmentation for a particular image we fill the mask
# with zeros to keep the same amount of tensors but don't learn from it
segmentation = np.zeros([height, width], dtype=np.uint8) + 255
return segmentation
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.