hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76d68279cb8f211b737dc00cf0b12b22eea688f2 | 9,971 | py | Python | vendor/github.com/elastic/beats/x-pack/libbeat/tests/system/test_management.py | PavelStupnitski/examplebeat | 14f9e8d3997f4dfae82281eb8c2b3e66a23faf7b | [
"Apache-2.0"
] | 3 | 2021-08-03T07:54:42.000Z | 2022-02-24T08:14:07.000Z | vendor/github.com/elastic/beats/x-pack/libbeat/tests/system/test_management.py | PavelStupnitski/examplebeat | 14f9e8d3997f4dfae82281eb8c2b3e66a23faf7b | [
"Apache-2.0"
] | null | null | null | vendor/github.com/elastic/beats/x-pack/libbeat/tests/system/test_management.py | PavelStupnitski/examplebeat | 14f9e8d3997f4dfae82281eb8c2b3e66a23faf7b | [
"Apache-2.0"
] | 4 | 2021-07-17T08:38:39.000Z | 2021-09-18T07:23:11.000Z | import sys
import os
import json
import requests
import string
import random
import unittest
import time
from elasticsearch import Elasticsearch
from os import path
from base import BaseTest
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
TIMEOUT = 2 * 60
class TestManagement(BaseTest):
def setUp(self):
super(TestManagement, self).setUp()
# NOTES: Theses options are linked to the specific of the docker compose environment for
# CM.
self.es_host = os.getenv('ES_HOST', 'localhost') + ":" + os.getenv('ES_POST', '9200')
self.es_user = "myelastic"
self.es_pass = "changeme"
self.es = Elasticsearch([self.get_elasticsearch_url()], verify_certs=True)
self.keystore_path = self.working_dir + "/data/keystore"
if path.exists(self.keystore_path):
os.Remove(self.keystore_path)
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_enroll(self):
"""
Enroll the beat in Kibana Central Management
"""
# We don't care about this as it will be replaced by enrollment
# process:
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
config_content = open(config_path, 'r').read()
exit_code = self.enroll(self.es_user, self.es_pass)
assert exit_code == 0
assert self.log_contains("Enrolled and ready to retrieve settings")
# Enroll creates a keystore (to store access token)
assert os.path.isfile(os.path.join(
self.working_dir, "data/keystore"))
# New settings file is in place now
new_content = open(config_path, 'r').read()
assert config_content != new_content
# Settings backup has been created
assert os.path.isfile(os.path.join(
self.working_dir, "mockbeat.yml.bak"))
backup_content = open(config_path + ".bak", 'r').read()
assert config_content == backup_content
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_enroll_bad_pw(self):
"""
Try to enroll the beat in Kibana Central Management with a bad password
"""
# We don't care about this as it will be replaced by enrollment
# process:
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
config_content = open(config_path, 'r').read()
exit_code = self.enroll("not", 'wrong password')
assert exit_code == 1
# Keystore wasn't created
assert not os.path.isfile(os.path.join(
self.working_dir, "data/keystore"))
# Settings hasn't changed
new_content = open(config_path, 'r').read()
assert config_content == new_content
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_fetch_configs(self):
"""
Config is retrieved from Central Management and updates are applied
"""
# Enroll the beat
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
exit_code = self.enroll(self.es_user, self.es_pass)
assert exit_code == 0
index = self.random_index()
# Configure an output
self.create_and_assing_tag([
{
"type": "output",
"config": {
"_sub_type": "elasticsearch",
"hosts": [self.es_host],
"username": self.es_user,
"password": self.es_pass,
"index": index,
},
"id": "myconfig",
}
])
# Start beat
proc = self.start_beat(extra_args=[
"-E", "management.period=1s",
"-E", "keystore.path=%s" % self.keystore_path,
])
# Wait for beat to apply new conf
self.wait_log_contains("Applying settings for output")
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index, 1)
index2 = self.random_index()
# Update output configuration
self.create_and_assing_tag([
{
"type": "output",
"config": {
"_sub_type": "elasticsearch",
"hosts": [self.es_host],
"username": self.es_user,
"password": self.es_pass,
"index": index2,
},
"id": "myconfig",
}
])
self.wait_log_contains("Applying settings for output")
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index2, 1)
proc.check_kill_and_wait()
@unittest.skipIf(not INTEGRATION_TESTS,
"integration tests are disabled, run with INTEGRATION_TESTS=1 to enable them.")
def test_configs_cache(self):
"""
Config cache is used if Kibana is not available
"""
# Enroll the beat
config_path = os.path.join(self.working_dir, "mockbeat.yml")
self.render_config_template("mockbeat", config_path, keystore_path=self.keystore_path)
exit_code = self.enroll(self.es_user, self.es_pass)
assert exit_code == 0
index = self.random_index()
# Update output configuration
self.create_and_assing_tag([
{
"type": "output",
"config": {
"_sub_type": "elasticsearch",
"hosts": [self.es_host],
"username": self.es_user,
"password": self.es_pass,
"index": index,
}
}
])
# Start beat
proc = self.start_beat(extra_args=[
"-E", "management.period=1s",
"-E", "keystore.path=%s" % self.keystore_path,
])
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index, 1)
proc.check_kill_and_wait()
# Remove the index
self.es.indices.delete(index)
# Cache should exists already, start with wrong kibana settings:
proc = self.start_beat(extra_args=[
"-E", "management.period=1s",
"-E", "management.kibana.host=wronghost",
"-E", "management.kibana.timeout=0.5s",
"-E", "keystore.path=%s" % self.keystore_path,
])
self.wait_until(lambda: self.log_contains("PublishEvents: "), max_timeout=TIMEOUT)
self.wait_documents(index, 1)
proc.check_kill_and_wait()
def enroll(self, user, password):
return self.run_beat(
extra_args=["enroll", self.get_kibana_url(),
"--password", "env:PASS", "--username", user, "--force"],
logging_args=["-v", "-d", "*"],
env={
'PASS': password,
})
def check_kibana_status(self):
headers = {
"kbn-xsrf": "1"
}
# Create tag
url = self.get_kibana_url() + "/api/status"
r = requests.get(url, headers=headers,
auth=(self.es_user, self.es_pass))
def create_and_assing_tag(self, blocks):
tag_name = "test%d" % int(time.time() * 1000)
headers = {
"kbn-xsrf": "1"
}
# Create tag
url = self.get_kibana_url() + "/api/beats/tag/" + tag_name
data = {
"color": "#DD0A73",
"name": tag_name,
}
r = requests.put(url, json=data, headers=headers,
auth=(self.es_user, self.es_pass))
assert r.status_code in (200, 201)
# Create blocks
url = self.get_kibana_url() + "/api/beats/configurations"
for b in blocks:
b["tag"] = tag_name
r = requests.put(url, json=blocks, headers=headers,
auth=(self.es_user, self.es_pass))
assert r.status_code in (200, 201)
# Retrieve beat ID
meta = json.loads(
open(os.path.join(self.working_dir, 'data', 'meta.json'), 'r').read())
# Assign tag to beat
data = {"assignments": [{"beatId": meta["uuid"], "tag": tag_name}]}
url = self.get_kibana_url() + "/api/beats/agents_tags/assignments"
r = requests.post(url, json=data, headers=headers,
auth=(self.es_user, self.es_pass))
assert r.status_code == 200
def get_elasticsearch_url(self):
return 'http://' + self.es_user + ":" + self.es_pass + '@' + os.getenv('ES_HOST', 'localhost') + ':' + os.getenv('ES_PORT', '5601')
def get_kibana_url(self):
return 'http://' + os.getenv('KIBANA_HOST', 'kibana') + ':' + os.getenv('KIBANA_PORT', '5601')
def random_index(self):
return ''.join(random.choice(string.ascii_lowercase) for i in range(10))
def check_document_count(self, index, count):
try:
self.es.indices.refresh(index=index)
return self.es.search(index=index, body={"query": {"match_all": {}}})['hits']['total'] >= count
except:
return False
def wait_documents(self, index, count):
self.wait_until(lambda: self.check_document_count(index, count), max_timeout=TIMEOUT, poll_interval=1)
| 34.863636 | 139 | 0.573664 |
d841c9f25e9b4fe724943ce831ba6bad95b800da | 4,728 | py | Python | src/python_old/boston.py | chdhr-harshal/MCMonitor | 330fc1a8f8cf83620fd6b0e503707c91e97af16d | [
"MIT"
] | 2 | 2020-11-04T20:35:18.000Z | 2021-09-05T09:06:43.000Z | src/python_old/boston.py | chdhr-harshal/MCMonitor | 330fc1a8f8cf83620fd6b0e503707c91e97af16d | [
"MIT"
] | null | null | null | src/python_old/boston.py | chdhr-harshal/MCMonitor | 330fc1a8f8cf83620fd6b0e503707c91e97af16d | [
"MIT"
] | 1 | 2021-09-05T09:10:41.000Z | 2021-09-05T09:10:41.000Z | #!/usr/local/bin/python
import pandas as pd
import numpy as np
import networkx as nx
from MarkovChain import *
import os
DATA_DIR = "/Users/harshal/Projects/markov_traffic/data/"
PLOTS_DATA_DIR = "/Users/harshal/Projects/markov_traffic/Plots_data/"
def read_gm_file(filename):
filename = DATA_DIR + filename
bos = nx.read_gml(filename)
return bos
def get_mc_attributes(G):
G = G.to_directed()
G = nx.stochastic_graph(G)
tm = nx.to_numpy_matrix(G)
tm = np.squeeze(np.asarray(tm))
return (G, tm)
def get_objective_evolution(mc, problem, object_distribution, method, k, method_name):
rows = mc.get_evolution(method, k)
for row in rows:
row['problem'] = problem
row['object_distribution'] = object_distribution
row['time'] = 0
row['method'] = method_name
global dataframe_rows
dataframe_rows = dataframe_rows + rows
if __name__ == "__main__":
bos = read_gm_file("Boston.gml")
G, tm = get_mc_attributes(bos)
num_nodes = len(G.nodes())
num_objects = 100000
total_time = 0
max_k = 50
# Uniform object distribution
print "uniform"
mc1 = MCNodeObjectives(num_nodes, num_objects, total_time, tm, 'uniform', G)
get_objective_evolution(mc1, 'node', 'uniform', mc1.smart_greedy, max_k, 'greedy')
get_objective_evolution(mc1, 'node', 'uniform', mc1.random_nodes, max_k, 'random')
get_objective_evolution(mc1, 'node', 'uniform', mc1.highest_in_probability_nodes, max_k, 'highest_in_probability')
get_objective_evolution(mc1, 'node', 'uniform', mc1.highest_in_degree_centrality_nodes, max_k, 'highest_in_degree_centrality')
get_objective_evolution(mc1, 'node', 'uniform', mc1.highest_closeness_centrality_nodes, max_k, 'highest_closeness_centrality')
get_objective_evolution(mc1, 'node', 'uniform', mc1.highest_item_nodes, max_k, 'highest_item_nodes')
# Directly proportional to out-degree object distribution
print "direct"
mc1 = MCNodeObjectives(num_nodes, num_objects, total_time, tm, 'direct', G)
get_objective_evolution(mc1, 'node', 'direct', mc1.smart_greedy, max_k, 'greedy')
get_objective_evolution(mc1, 'node', 'direct', mc1.random_nodes, max_k, 'random')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_in_probability_nodes, max_k, 'highest_in_probability')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_in_degree_centrality_nodes, max_k, 'highest_in_degree_centrality')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_closeness_centrality_nodes, max_k, 'highest_closeness_centrality')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_item_nodes, max_k, 'highest_item_nodes')
# Directly proportional to out-degree object distribution
print "direct"
mc1 = MCNodeObjectives(num_nodes, num_objects, total_time, tm, 'direct', G)
get_objective_evolution(mc1, 'node', 'direct', mc1.smart_greedy, max_k, 'greedy')
get_objective_evolution(mc1, 'node', 'direct', mc1.random_nodes, max_k, 'random')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_in_probability_nodes, max_k, 'highest_in_probability')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_in_degree_centrality_nodes, max_k, 'highest_in_degree_centrality')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_closeness_centrality_nodes, max_k, 'highest_closeness_centrality')
get_objective_evolution(mc1, 'node', 'direct', mc1.highest_item_nodes, max_k, 'highest_item_nodes')
# Inversely proportionaly to out-degree object distribution
print "inverse"
mc1 = MCNodeObjectives(num_nodes, num_objects, total_time, tm, 'inverse', G)
get_objective_evolution(mc1, 'node', 'inverse', mc1.smart_greedy, max_k, 'greedy')
get_objective_evolution(mc1, 'node', 'inverse', mc1.random_nodes, max_k, 'random')
get_objective_evolution(mc1, 'node', 'inverse', mc1.highest_in_probability_nodes, max_k, 'highest_in_probability')
get_objective_evolution(mc1, 'node', 'inverse', mc1.highest_in_degree_centrality_nodes, max_k, 'highest_in_degree_centrality')
get_objective_evolution(mc1, 'node', 'inverse', mc1.highest_closeness_centrality_nodes, max_k, 'highest_closeness_centrality')
get_objective_evolution(mc1, 'node', 'inverse', mc1.highest_item_nodes, max_k, 'highest_item_nodes')
# Create a dataframe
df = pd.DataFrame(dataframe_rows)
# Export data to a csv in DATA_DIR
df.to_csv(PLOTS_DATA_DIR + "k_objective_evolution_boston.csv.gz", sep=",", header=True, index=False, compression='gzip')
# Call the R script to make the plots
os.system("/Users/harshal/Projects/markov_traffic/src/R/k_variance_plot.r")
| 41.840708 | 136 | 0.741328 |
ecc782bb0e37c16cc1ddaa8854f2dd2b04d3216e | 464 | py | Python | pyxllib/xlcv.py | XLPRUtils/pyxllib | 7d559d0fc1f87ea26bdf44545c16417c9cf76318 | [
"Apache-2.0"
] | 15 | 2020-06-09T07:03:07.000Z | 2022-02-25T06:59:34.000Z | pyxllib/xlcv.py | XLPRUtils/pyxllib | 7d559d0fc1f87ea26bdf44545c16417c9cf76318 | [
"Apache-2.0"
] | 5 | 2020-08-08T07:11:21.000Z | 2020-08-08T07:11:24.000Z | pyxllib/xlcv.py | XLPRUtils/pyxllib | 7d559d0fc1f87ea26bdf44545c16417c9cf76318 | [
"Apache-2.0"
] | 2 | 2020-06-09T07:03:26.000Z | 2020-12-31T06:50:37.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : 877362867@qq.com
# @Date : 2021/06/03 22:23
from pyxllib.xl import *
from pyxllib.algo.geo import *
from pyxllib.cv.newbie import *
from pyxllib.cv.expert import *
# 把自定义的一些功能嵌入到PIL.Image.Image类中。
# 因为pyxllib.xlcv设计初衷本就是为了便捷而牺牲工程性。
# 如果这步没有给您“惊喜”而是“惊吓”,
# 可以使用 from pyxllib.cv.expert import * 代替 from pyxllib.xlcv import *。
# 然后显式使用 xlpil.imsize(im) 来代替 im.imsize 等用法。
xlpil.enchant()
| 25.777778 | 69 | 0.713362 |
96b8347a109808d15c066b7f92716279c874e474 | 17,523 | bzl | Python | bazel/grpc_deps.bzl | terracatta/grpc | f7591a3426becbed20dbd1a80beb9c1e2ca1a738 | [
"Apache-2.0"
] | 2 | 2020-03-27T17:01:34.000Z | 2020-03-27T19:38:58.000Z | bazel/grpc_deps.bzl | doc22940/grpc | 6640651bcff343c905d6fefa7ecab0eb1ba5f984 | [
"Apache-2.0"
] | 62 | 2020-02-27T00:53:36.000Z | 2021-02-05T06:10:53.000Z | bazel/grpc_deps.bzl | doc22940/grpc | 6640651bcff343c905d6fefa7ecab0eb1ba5f984 | [
"Apache-2.0"
] | null | null | null | """Load dependencies needed to compile and test the grpc library as a 3rd-party consumer."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@com_github_grpc_grpc//bazel:grpc_python_deps.bzl", "grpc_python_deps")
def grpc_deps():
"""Loads dependencies need to compile and test the grpc library."""
native.bind(
name = "upb_lib",
actual = "@upb//:upb",
)
native.bind(
name = "absl",
actual = "@com_google_absl//absl",
)
native.bind(
name = "absl-base",
actual = "@com_google_absl//absl/base",
)
native.bind(
name = "absl-time",
actual = "@com_google_absl//absl/time:time",
)
native.bind(
name = "libssl",
actual = "@boringssl//:ssl",
)
native.bind(
name = "madler_zlib",
actual = "@zlib//:zlib",
)
native.bind(
name = "protobuf",
actual = "@com_google_protobuf//:protobuf",
)
native.bind(
name = "protobuf_clib",
actual = "@com_google_protobuf//:protoc_lib",
)
native.bind(
name = "protobuf_headers",
actual = "@com_google_protobuf//:protobuf_headers",
)
native.bind(
name = "protocol_compiler",
actual = "@com_google_protobuf//:protoc",
)
native.bind(
name = "cares",
actual = "@com_github_cares_cares//:ares",
)
native.bind(
name = "gtest",
actual = "@com_github_google_googletest//:gtest",
)
native.bind(
name = "benchmark",
actual = "@com_github_google_benchmark//:benchmark",
)
native.bind(
name = "gflags",
actual = "@com_github_gflags_gflags//:gflags",
)
native.bind(
name = "grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
)
native.bind(
name = "grpc++_codegen_proto",
actual = "@com_github_grpc_grpc//:grpc++_codegen_proto",
)
native.bind(
name = "opencensus-context",
actual = "@io_opencensus_cpp//opencensus/context:context",
)
native.bind(
name = "opencensus-trace",
actual = "@io_opencensus_cpp//opencensus/trace:trace",
)
native.bind(
name = "opencensus-trace-context_util",
actual = "@io_opencensus_cpp//opencensus/trace:context_util",
)
native.bind(
name = "opencensus-stats",
actual = "@io_opencensus_cpp//opencensus/stats:stats",
)
native.bind(
name = "opencensus-stats-test",
actual = "@io_opencensus_cpp//opencensus/stats:test_utils",
)
native.bind(
name = "opencensus-with-tag-map",
actual = "@io_opencensus_cpp//opencensus/tags:with_tag_map",
)
native.bind(
name = "opencensus-tags",
actual = "@io_opencensus_cpp//opencensus/tags:tags",
)
native.bind(
name = "libuv",
actual = "@libuv//:libuv",
)
if "boringssl" not in native.existing_rules():
http_archive(
name = "boringssl",
# Use github mirror instead of https://boringssl.googlesource.com/boringssl
# to obtain a boringssl archive with consistent sha256
sha256 = "a3d4de4f03cb321ef943678d72a045c9a19d26b23d6f4e313f97600c65201a27",
strip_prefix = "boringssl-1c2769383f027befac5b75b6cedd25daf3bf4dcf",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/google/boringssl/archive/1c2769383f027befac5b75b6cedd25daf3bf4dcf.tar.gz",
"https://github.com/google/boringssl/archive/1c2769383f027befac5b75b6cedd25daf3bf4dcf.tar.gz",
],
)
if "zlib" not in native.existing_rules():
http_archive(
name = "zlib",
build_file = "@com_github_grpc_grpc//third_party:zlib.BUILD",
sha256 = "6d4d6640ca3121620995ee255945161821218752b551a1a180f4215f7d124d45",
strip_prefix = "zlib-cacf7f1d4e3d44d871b605da3b647f07d718623f",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/madler/zlib/archive/cacf7f1d4e3d44d871b605da3b647f07d718623f.tar.gz",
"https://github.com/madler/zlib/archive/cacf7f1d4e3d44d871b605da3b647f07d718623f.tar.gz",
],
)
if "com_google_protobuf" not in native.existing_rules():
http_archive(
name = "com_google_protobuf",
sha256 = "2435b7fb83b8a608c24ca677907aa9a35e482a7f018e65ca69481b3c8c9f7caf",
strip_prefix = "protobuf-d0bfd5221182da1a7cc280f3337b5e41a89539cf",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/google/protobuf/archive/d0bfd5221182da1a7cc280f3337b5e41a89539cf.tar.gz",
"https://github.com/google/protobuf/archive/d0bfd5221182da1a7cc280f3337b5e41a89539cf.tar.gz",
],
)
if "com_github_google_googletest" not in native.existing_rules():
http_archive(
name = "com_github_google_googletest",
sha256 = "443d383db648ebb8e391382c0ab63263b7091d03197f304390baac10f178a468",
strip_prefix = "googletest-c9ccac7cb7345901884aabf5d1a786cfa6e2f397",
urls = [
# 2019-08-19
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/google/googletest/archive/c9ccac7cb7345901884aabf5d1a786cfa6e2f397.tar.gz",
"https://github.com/google/googletest/archive/c9ccac7cb7345901884aabf5d1a786cfa6e2f397.tar.gz",
],
)
if "rules_cc" not in native.existing_rules():
http_archive(
name = "rules_cc",
sha256 = "35f2fb4ea0b3e61ad64a369de284e4fbbdcdba71836a5555abb5e194cf119509",
strip_prefix = "rules_cc-624b5d59dfb45672d4239422fa1e3de1822ee110",
urls = [
#2019-08-15
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/rules_cc/archive/624b5d59dfb45672d4239422fa1e3de1822ee110.tar.gz",
"https://github.com/bazelbuild/rules_cc/archive/624b5d59dfb45672d4239422fa1e3de1822ee110.tar.gz",
],
)
if "com_github_gflags_gflags" not in native.existing_rules():
http_archive(
name = "com_github_gflags_gflags",
sha256 = "63ae70ea3e05780f7547d03503a53de3a7d2d83ad1caaa443a31cb20aea28654",
strip_prefix = "gflags-28f50e0fed19872e0fd50dd23ce2ee8cd759338e",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/gflags/gflags/archive/28f50e0fed19872e0fd50dd23ce2ee8cd759338e.tar.gz",
"https://github.com/gflags/gflags/archive/28f50e0fed19872e0fd50dd23ce2ee8cd759338e.tar.gz",
],
)
if "com_github_google_benchmark" not in native.existing_rules():
http_archive(
name = "com_github_google_benchmark",
sha256 = "f68aec93154d010324c05bcd8c5cc53468b87af88d87acb5ddcfaa1bba044837",
strip_prefix = "benchmark-090faecb454fbd6e6e17a75ef8146acb037118d4",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/google/benchmark/archive/090faecb454fbd6e6e17a75ef8146acb037118d4.tar.gz",
"https://github.com/google/benchmark/archive/090faecb454fbd6e6e17a75ef8146acb037118d4.tar.gz",
],
)
if "com_github_cares_cares" not in native.existing_rules():
http_archive(
name = "com_github_cares_cares",
build_file = "@com_github_grpc_grpc//third_party:cares/cares.BUILD",
sha256 = "e8c2751ddc70fed9dc6f999acd92e232d5846f009ee1674f8aee81f19b2b915a",
strip_prefix = "c-ares-e982924acee7f7313b4baa4ee5ec000c5e373c30",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/c-ares/c-ares/archive/e982924acee7f7313b4baa4ee5ec000c5e373c30.tar.gz",
"https://github.com/c-ares/c-ares/archive/e982924acee7f7313b4baa4ee5ec000c5e373c30.tar.gz",
],
)
if "com_google_absl" not in native.existing_rules():
http_archive(
name = "com_google_absl",
sha256 = "f368a8476f4e2e0eccf8a7318b98dafbe30b2600f4e3cf52636e5eb145aba06a",
strip_prefix = "abseil-cpp-df3ea785d8c30a9503321a3d35ee7d35808f190d",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/df3ea785d8c30a9503321a3d35ee7d35808f190d.tar.gz",
],
)
if "bazel_toolchains" not in native.existing_rules():
# list of releases is at https://releases.bazel.build/bazel-toolchains.html
http_archive(
name = "bazel_toolchains",
sha256 = "0b36eef8a66f39c8dbae88e522d5bbbef49d5e66e834a982402c79962281be10",
strip_prefix = "bazel-toolchains-1.0.1",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/1.0.1.tar.gz",
"https://github.com/bazelbuild/bazel-toolchains/releases/download/1.0.1/bazel-toolchains-1.0.1.tar.gz",
],
)
if "bazel_skylib" not in native.existing_rules():
http_archive(
name = "bazel_skylib",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
],
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
)
if "io_opencensus_cpp" not in native.existing_rules():
http_archive(
name = "io_opencensus_cpp",
sha256 = "90d6fafa8b1a2ea613bf662731d3086e1c2ed286f458a95c81744df2dbae41b1",
strip_prefix = "opencensus-cpp-c9a4da319bc669a772928ffc55af4a61be1a1176",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/census-instrumentation/opencensus-cpp/archive/c9a4da319bc669a772928ffc55af4a61be1a1176.tar.gz",
"https://github.com/census-instrumentation/opencensus-cpp/archive/c9a4da319bc669a772928ffc55af4a61be1a1176.tar.gz",
],
)
if "upb" not in native.existing_rules():
http_archive(
name = "upb",
sha256 = "e9c136e56b98c8eb48ad1c9f8df4a6348e99f9f336ee6199c4259a312c2e3598",
strip_prefix = "upb-d8f3d6f9d415b31f3ce56d46791706c38fa311bc",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/protocolbuffers/upb/archive/d8f3d6f9d415b31f3ce56d46791706c38fa311bc.tar.gz",
"https://github.com/protocolbuffers/upb/archive/d8f3d6f9d415b31f3ce56d46791706c38fa311bc.tar.gz",
],
)
if "envoy_api" not in native.existing_rules():
http_archive(
name = "envoy_api",
sha256 = "9150f920abd3e710e0e58519cd769822f13d7a56988f2c34c2008815ec8d9c88",
strip_prefix = "data-plane-api-8dcc476be69437b505af181a6e8b167fdb101d7e",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/envoyproxy/data-plane-api/archive/8dcc476be69437b505af181a6e8b167fdb101d7e.tar.gz",
"https://github.com/envoyproxy/data-plane-api/archive/8dcc476be69437b505af181a6e8b167fdb101d7e.tar.gz",
],
)
if "io_bazel_rules_go" not in native.existing_rules():
http_archive(
name = "io_bazel_rules_go",
sha256 = "a82a352bffae6bee4e95f68a8d80a70e87f42c4741e6a448bec11998fcc82329",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/rules_go/releases/download/0.18.5/rules_go-0.18.5.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/0.18.5/rules_go-0.18.5.tar.gz",
],
)
if "build_bazel_rules_apple" not in native.existing_rules():
http_archive(
name = "build_bazel_rules_apple",
strip_prefix = "rules_apple-b869b0d3868d78a1d4ffd866ccb304fb68aa12c3",
sha256 = "bdc8e66e70b8a75da23b79f1f8c6207356df07d041d96d2189add7ee0780cf4e",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/rules_apple/archive/b869b0d3868d78a1d4ffd866ccb304fb68aa12c3.tar.gz",
"https://github.com/bazelbuild/rules_apple/archive/b869b0d3868d78a1d4ffd866ccb304fb68aa12c3.tar.gz",
],
)
if "build_bazel_apple_support" not in native.existing_rules():
http_archive(
name = "build_bazel_apple_support",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/bazelbuild/apple_support/releases/download/0.7.1/apple_support.0.7.1.tar.gz",
"https://github.com/bazelbuild/apple_support/releases/download/0.7.1/apple_support.0.7.1.tar.gz",
],
sha256 = "122ebf7fe7d1c8e938af6aeaee0efe788a3a2449ece5a8d6a428cb18d6f88033",
)
if "libuv" not in native.existing_rules():
http_archive(
name = "libuv",
build_file = "@com_github_grpc_grpc//third_party:libuv.BUILD",
sha256 = "dfb4fe1ff0b47340978490a14bf253475159ecfcbad46ab2a350c78f9ce3360f",
strip_prefix = "libuv-15ae750151ac9341e5945eb38f8982d59fb99201",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/libuv/libuv/archive/15ae750151ac9341e5945eb38f8982d59fb99201.tar.gz",
"https://github.com/libuv/libuv/archive/15ae750151ac9341e5945eb38f8982d59fb99201.tar.gz",
],
)
grpc_python_deps()
# TODO: move some dependencies from "grpc_deps" here?
def grpc_test_only_deps():
"""Internal, not intended for use by packages that are consuming grpc.
Loads dependencies that are only needed to run grpc library's tests."""
native.bind(
name = "twisted",
actual = "@com_github_twisted_twisted//:twisted",
)
native.bind(
name = "yaml",
actual = "@com_github_yaml_pyyaml//:yaml",
)
if "com_github_twisted_twisted" not in native.existing_rules():
http_archive(
name = "com_github_twisted_twisted",
sha256 = "ca17699d0d62eafc5c28daf2c7d0a18e62ae77b4137300b6c7d7868b39b06139",
strip_prefix = "twisted-twisted-17.5.0",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/twisted/twisted/archive/twisted-17.5.0.zip",
"https://github.com/twisted/twisted/archive/twisted-17.5.0.zip",
],
build_file = "@com_github_grpc_grpc//third_party:twisted.BUILD",
)
if "com_github_yaml_pyyaml" not in native.existing_rules():
http_archive(
name = "com_github_yaml_pyyaml",
sha256 = "6b4314b1b2051ddb9d4fcd1634e1fa9c1bb4012954273c9ff3ef689f6ec6c93e",
strip_prefix = "pyyaml-3.12",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/yaml/pyyaml/archive/3.12.zip",
"https://github.com/yaml/pyyaml/archive/3.12.zip",
],
build_file = "@com_github_grpc_grpc//third_party:yaml.BUILD",
)
if "com_github_twisted_incremental" not in native.existing_rules():
http_archive(
name = "com_github_twisted_incremental",
sha256 = "f0ca93359ee70243ff7fbf2d904a6291810bd88cb80ed4aca6fa77f318a41a36",
strip_prefix = "incremental-incremental-17.5.0",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/twisted/incremental/archive/incremental-17.5.0.zip",
"https://github.com/twisted/incremental/archive/incremental-17.5.0.zip",
],
build_file = "@com_github_grpc_grpc//third_party:incremental.BUILD",
)
if "com_github_zopefoundation_zope_interface" not in native.existing_rules():
http_archive(
name = "com_github_zopefoundation_zope_interface",
sha256 = "e9579fc6149294339897be3aa9ecd8a29217c0b013fe6f44fcdae00e3204198a",
strip_prefix = "zope.interface-4.4.3",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/zopefoundation/zope.interface/archive/4.4.3.zip",
"https://github.com/zopefoundation/zope.interface/archive/4.4.3.zip",
],
build_file = "@com_github_grpc_grpc//third_party:zope_interface.BUILD",
)
if "com_github_twisted_constantly" not in native.existing_rules():
http_archive(
name = "com_github_twisted_constantly",
sha256 = "2702cd322161a579d2c0dbf94af4e57712eedc7bd7bbbdc554a230544f7d346c",
strip_prefix = "constantly-15.1.0",
urls = [
"https://storage.googleapis.com/grpc-bazel-mirror/github.com/twisted/constantly/archive/15.1.0.zip",
"https://github.com/twisted/constantly/archive/15.1.0.zip",
],
build_file = "@com_github_grpc_grpc//third_party:constantly.BUILD",
)
| 43.160099 | 172 | 0.647149 |
3da10af5d47ec011aa200450c2856e53b472d246 | 401 | py | Python | furry/nn/__init__.py | Bill13579/furry | 3336ccd88996494c8bf0b8445bf7e7058a5f3397 | [
"MIT"
] | 1 | 2019-05-11T02:49:38.000Z | 2019-05-11T02:49:38.000Z | furry/nn/__init__.py | Bill13579/furry | 3336ccd88996494c8bf0b8445bf7e7058a5f3397 | [
"MIT"
] | null | null | null | furry/nn/__init__.py | Bill13579/furry | 3336ccd88996494c8bf0b8445bf7e7058a5f3397 | [
"MIT"
] | null | null | null | from furry.nn.batchnorm import BatchNormalization
from furry.nn.conv import Conv1d, Conv2d, Conv3d
from furry.nn.conv_t import ConvTranspose1d, ConvTranspose2d, ConvTranspose3d, DeConv1d, DeConv2d, DeConv3d
from furry.nn.dense import Dense
from furry.nn.recurrent import Recurrent
from furry.nn.gru import GRU
from furry.nn.pool import MaxPool1d, MaxPool2d, MaxPool3d, AvgPool1d, AvgPool2d, AvgPool3d
| 50.125 | 107 | 0.835411 |
ffefc737cd75f986a3716bc22e44dcc08035b9be | 8,582 | py | Python | openshift/installer/vendored/openshift-ansible-3.11.0-0.10.0/roles/lib_vendored_deps/library/oo_azure_rm_publish_image_facts.py | propyless/openshift-tools | 16776b4f343ea3b2018f7679cc3383e616020710 | [
"Apache-2.0"
] | null | null | null | openshift/installer/vendored/openshift-ansible-3.11.0-0.10.0/roles/lib_vendored_deps/library/oo_azure_rm_publish_image_facts.py | propyless/openshift-tools | 16776b4f343ea3b2018f7679cc3383e616020710 | [
"Apache-2.0"
] | null | null | null | openshift/installer/vendored/openshift-ansible-3.11.0-0.10.0/roles/lib_vendored_deps/library/oo_azure_rm_publish_image_facts.py | propyless/openshift-tools | 16776b4f343ea3b2018f7679cc3383e616020710 | [
"Apache-2.0"
] | 2 | 2018-10-16T05:11:13.000Z | 2018-11-07T01:46:29.000Z | #!/usr/bin/env python
# pylint: disable=missing-docstring
# Copyright 2018 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function # noqa: F401
# import httplib
import json
import os
import requests
from ansible.module_utils.basic import AnsibleModule
class AzurePublisherException(Exception):
'''Exception class for AzurePublisher'''
pass
class AzurePublisher(object):
'''Python class to represent the Azure Publishing portal https://cloudpartner.azure.com'''
# pylint: disable=too-many-arguments
def __init__(self,
publisher_id,
client_info,
ssl_verify=True,
api_version='2017-10-31',
debug=False):
'''
:publisher_id: string of the publisher id
:client_info: a dict containing the client_id, client_secret to get an access_token
'''
self._azure_server = 'https://cloudpartner.azure.com/api/publishers/{}'.format(publisher_id)
self.client_info = client_info
self.ssl_verify = ssl_verify
self.api_version = 'api-version={}'.format(api_version)
self.debug = debug
# if self.debug:
# httplib.HTTPSConnection.debuglevel = 1
# httplib.HTTPConnection.debuglevel = 1
self._access_token = None
@property
def server(self):
'''property for server url'''
return self._azure_server
@property
def token(self):
'''property for the access_token
curl --data-urlencode "client_id=$AZURE_CLIENT_ID" \
--data-urlencode "client_secret=$AZURE_CLIENT_SECRET" \
--data-urlencode "grant_type=client_credentials" \
--data-urlencode "resource=https://cloudpartner.azure.com" \
https://login.microsoftonline.com/$AZURE_TENANT_ID/oauth2/token
'''
if self._access_token is None:
url = 'https://login.microsoftonline.com/{}/oauth2/token'.format(self.client_info['tenant_id'])
data = {
'client_id': {self.client_info['client_id']},
'client_secret': self.client_info['client_secret'],
'grant_type': 'client_credentials',
'resource': 'https://cloudpartner.azure.com'
}
results = AzurePublisher.request('POST', url, data, {})
jres = results.json()
self._access_token = jres['access_token']
return self._access_token
def get_offers(self, offer=None, version=None, slot=''):
''' fetch all offers by publisherid '''
url = '/offers'
if offer is not None:
url += '/{}'.format(offer)
if version is not None:
url += '/versions/{}'.format(version)
if slot != '':
url += '/slot/{}'.format(slot)
url += '?{}'.format(self.api_version)
return self.prepare_action(url)
def get_operations(self, offer, operation=None, status=None):
''' create or modify an offer '''
url = '/offers/{0}/submissions'.format(offer)
if operation is not None:
url += '/operations/{0}'.format(operation)
if not url.endswith('/'):
url += '/'
url += '?{0}'.format(self.api_version)
if status is not None:
url += '&status={0}'.format(status)
return self.prepare_action(url, 'GET')
def cancel_operation(self, offer):
''' create or modify an offer '''
url = '/offers/{0}/cancel?{1}'.format(offer, self.api_version)
return self.prepare_action(url, 'POST')
def go_live(self, offer):
''' create or modify an offer '''
url = '/offers/{0}/golive?{1}'.format(offer, self.api_version)
return self.prepare_action(url, 'POST')
def create_or_modify_offer(self, offer, data=None, modify=False):
''' create or modify an offer '''
url = '/offers/{0}?{1}'.format(offer, self.api_version)
headers = None
if modify:
headers = {
'If-Match': '*',
}
return self.prepare_action(url, 'PUT', data=data, add_headers=headers)
def prepare_action(self, url, action='GET', data=None, add_headers=None):
'''perform the http request
:action: string of either GET|POST
'''
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer {}'.format(self.token)
}
if add_headers is not None:
headers.update(add_headers)
if data is None:
data = ''
else:
data = json.dumps(data)
return AzurePublisher.request(action.upper(), self.server + url, data, headers)
def manage_offer(self, params):
''' handle creating or modifying offers'''
# fetch the offer to verify it exists:
results = self.get_offers(offer=params['offer'])
if results.status_code == 200 and params['force']:
return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data'], modify=True)
return self.create_or_modify_offer(offer=params['offer'], data=params['offer_data'])
@staticmethod
def request(action, url, data=None, headers=None, ssl_verify=True):
req = requests.Request(action.upper(), url, data=data, headers=headers)
session = requests.Session()
req_prep = session.prepare_request(req)
response = session.send(req_prep, verify=ssl_verify)
return response
@staticmethod
def run_ansible(params):
'''perform the ansible operations'''
client_info = {
'tenant_id': params['tenant_id'],
'client_id': params['client_id'],
'client_secret': params['client_secret']}
apc = AzurePublisher(params['publisher'],
client_info,
debug=params['debug'])
if params['query'] == 'offer':
results = apc.get_offers(offer=params['offer'])
elif params['query'] == 'operation':
results = apc.get_operations(offer=params['offer'], operation=params['operation'], status=params['status'])
else:
raise AzurePublisherException('Unsupported query type: {}'.format(params['query']))
return {'data': results.json(), 'status_code': results.status_code}
def main():
''' ansible oc module for secrets '''
module = AnsibleModule(
argument_spec=dict(
query=dict(default='offer', choices=['offer', 'operation']),
publisher=dict(default='redhat', type='str'),
debug=dict(default=False, type='bool'),
tenant_id=dict(default=os.environ.get('AZURE_TENANT_ID'), type='str'),
client_id=dict(default=os.environ.get('AZURE_CLIENT_ID'), type='str'),
client_secret=dict(default=os.environ.get('AZURE_CLIENT_SECRET'), type='str'),
offer=dict(default=None, type='str'),
operation=dict(default=None, type='str'),
status=dict(default=None, type='str'),
),
)
# Verify we recieved either a valid key or edits with valid keys when receiving a src file.
# A valid key being not None or not ''.
if (module.params['tenant_id'] is None or module.params['client_id'] is None or
module.params['client_secret'] is None):
return module.fail_json(**{'failed': True,
'msg': 'Please specify tenant_id, client_id, and client_secret'})
rval = AzurePublisher.run_ansible(module.params)
if int(rval['status_code']) == 404:
rval['msg'] = 'Offer does not exist.'
elif int(rval['status_code']) >= 300:
rval['msg'] = 'Error.'
return module.fail_json(**rval)
return module.exit_json(**rval)
if __name__ == '__main__':
main()
| 35.316872 | 119 | 0.605919 |
d5a9aa43275f124471927d4fecdb0fa7b66f7c26 | 1,298 | py | Python | tests/test_toy.py | marinang/probfit | fa5a91b84677d4d73fb47b1b9c4886a5417ef457 | [
"MIT"
] | null | null | null | tests/test_toy.py | marinang/probfit | fa5a91b84677d4d73fb47b1b9c4886a5417ef457 | [
"MIT"
] | null | null | null | tests/test_toy.py | marinang/probfit | fa5a91b84677d4d73fb47b1b9c4886a5417ef457 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib
matplotlib.use('Agg', warn=False)
from probfit.nputil import mid
from probfit.pdf import crystalball, gaussian
from probfit.functor import Normalized
from probfit.toy import gen_toy
from probfit._libstat import compute_chi2
from probfit.nputil import vector_apply
from probfit.costfunc import BinnedLH
def test_gen_toy():
np.random.seed(0)
bound = (-1, 2)
ntoy = 100000
toy = gen_toy(crystalball, ntoy, bound=bound,
alpha=1., n=2., mean=1., sigma=0.3, quiet=False)
assert len(toy) == ntoy
htoy, bins = np.histogram(toy, bins=1000, range=bound)
ncball = Normalized(crystalball, bound)
f = lambda x: ncball(x, 1., 2., 1., 0.3)
expected = vector_apply(f, mid(bins)) * ntoy * (bins[1] - bins[0])
htoy = htoy * 1.0
err = np.sqrt(expected)
chi2 = compute_chi2(htoy, expected, err)
print(chi2, len(bins), chi2 / len(bins))
assert (0.9 < (chi2 / len(bins)) < 1.1)
def test_gen_toy2():
pdf = gaussian
np.random.seed(0)
toy = gen_toy(pdf, 10000, (-5, 5), mean=0, sigma=1)
binlh = BinnedLH(pdf, toy, bound=(-5, 5), bins=100)
lh = binlh(0., 1.)
for x in toy:
assert (x < 5)
assert (x >= -5)
assert len(toy) == 10000
assert lh / 100. < 1.
| 25.45098 | 70 | 0.631741 |
c7564d3bb4e8bc0c51389fb5d242ccf2932fa669 | 3,736 | py | Python | tf3d/instance_segmentation/metric_test.py | wondercha/google-research | 1c3d958e8f99aad52d48a0665bc5e8446ad87d8d | [
"Apache-2.0"
] | 2 | 2021-02-11T19:57:58.000Z | 2021-06-25T18:22:21.000Z | tf3d/instance_segmentation/metric_test.py | wondercha/google-research | 1c3d958e8f99aad52d48a0665bc5e8446ad87d8d | [
"Apache-2.0"
] | 1 | 2021-03-22T17:03:30.000Z | 2021-03-22T17:03:30.000Z | tf3d/instance_segmentation/metric_test.py | wondercha/google-research | 1c3d958e8f99aad52d48a0665bc5e8446ad87d8d | [
"Apache-2.0"
] | 4 | 2021-02-08T10:25:45.000Z | 2021-04-17T14:46:26.000Z | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ...tf3d.instance_segmentation.metric."""
import tensorflow as tf
from tf3d import standard_fields
from tf3d.instance_segmentation import metric
class MetricTest(tf.test.TestCase):
def test_instance_segmentation_metric(self):
label_map = {1: 'car', 2: 'bus', 3: 'sign', 9: 'pedestrian', 12: 'cyclist'}
max_num_gt_objects = 10
max_num_predicted_objects = 20
max_num_voxels = 1000
num_classes = 20
num_frames = 5
inputs = []
outputs = []
for _ in range(num_frames):
num_voxels = tf.random.uniform([],
minval=1,
maxval=max_num_voxels,
dtype=tf.int32)
num_gt_objects = tf.random.uniform([],
minval=1,
maxval=max_num_gt_objects,
dtype=tf.int32)
num_predicted_objects = tf.random.uniform(
[], minval=1, maxval=max_num_predicted_objects, dtype=tf.int32)
inputs_i = {
standard_fields.InputDataFields.objects_class:
tf.random.uniform([num_gt_objects, 1],
minval=1,
maxval=num_classes,
dtype=tf.int32),
standard_fields.InputDataFields.object_instance_id_voxels:
tf.random.uniform([num_voxels, 1],
minval=0,
maxval=num_gt_objects,
dtype=tf.int32),
}
inputs.append(inputs_i)
outputs_i = {
standard_fields.DetectionResultFields.objects_score:
tf.random.uniform([num_predicted_objects, 1],
minval=0.0,
maxval=1.0,
dtype=tf.float32),
standard_fields.DetectionResultFields.objects_class:
tf.random.uniform([num_predicted_objects, 1],
minval=1,
maxval=num_classes,
dtype=tf.int32),
standard_fields.DetectionResultFields.instance_segments_voxel_mask:
tf.random.uniform([num_predicted_objects, num_voxels],
minval=0.0,
maxval=1.0,
dtype=tf.float32),
}
outputs.append(outputs_i)
iou_threshold = 0.5
m = metric.InstanceSegmentationMetric(
iou_threshold=iou_threshold,
num_classes=num_classes,
label_map=label_map,
eval_prefix='eval')
for i in range(num_frames):
m.update_state(inputs[i], outputs[i])
metrics_dict = m.get_metric_dictionary()
for object_name in ['car', 'bus', 'sign', 'pedestrian', 'cyclist']:
self.assertIn('eval_IOU{}_AP/{}'.format(iou_threshold, object_name),
metrics_dict)
self.assertIn('eval_IOU{}_AP/avg'.format(iou_threshold), metrics_dict)
if __name__ == '__main__':
tf.test.main()
| 40.172043 | 79 | 0.567184 |
3892415b67d5868d3ebb2be51b154b8a7bf65036 | 91 | py | Python | teste_quiz.py | Fabio-Coder/100_days_of_python | 74f10dae5b0bb2f9d32d5c9c779652b46fa258cf | [
"MIT"
] | null | null | null | teste_quiz.py | Fabio-Coder/100_days_of_python | 74f10dae5b0bb2f9d32d5c9c779652b46fa258cf | [
"MIT"
] | null | null | null | teste_quiz.py | Fabio-Coder/100_days_of_python | 74f10dae5b0bb2f9d32d5c9c779652b46fa258cf | [
"MIT"
] | null | null | null | def all_aboard(a, *args, **kw):
print(a, args, kw)
all_aboard(4, 7, 3, 0, x=10, y=64) | 18.2 | 34 | 0.571429 |
772ac86bf91e79082281fdc34b7cc7488a1996df | 23,043 | py | Python | tensorflow/python/data/experimental/kernel_tests/copy_to_device_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/experimental/kernel_tests/copy_to_device_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/experimental/kernel_tests/copy_to_device_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.copy_to_device()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import prefetching_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat as util_compat
# TODO(b/117581999): add eager coverage when supported.
class CopyToDeviceTest(test_base.DatasetTestBase):
@test_util.deprecated_graph_mode_only
def testCopyToDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceInt32(self):
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int32, next_element.dtype)
self.assertEqual((4,), next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToSameDevice(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0"))
with ops.device("/cpu:0"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyDictToDevice(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyDictToDeviceWithPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x})
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element["a"].dtype)
self.assertEqual([], next_element["a"].shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
self.assertEqual({"a": i}, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopySparseTensorsToDevice(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopySparseTensorsToDeviceWithPrefetch(self):
def make_tensor(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2])
host_dataset = dataset_ops.Dataset.range(10).map(make_tensor)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_one_shot_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
for i in range(10):
actual = self.evaluate(next_element)
self.assertAllEqual([i], actual.values)
self.assertAllEqual([[0, 0]], actual.indices)
self.assertAllEqual([2, 2], actual.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpu(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithMap(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
def generator():
for i in range(10):
yield i, float(i), str(i)
host_dataset = dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int32, dtypes.float32, dtypes.string))
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
def gpu_map_func(x, y, z):
return math_ops.square(x), math_ops.square(y), z
device_dataset = device_dataset.apply(
prefetching_ops.map_on_gpu(gpu_map_func))
options = dataset_ops.Options()
options.experimental_optimization.autotune = False
device_dataset = device_dataset.with_options(options)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
x, y, z = self.evaluate(next_element)
self.assertEqual(i**2, x)
self.assertEqual(float(i**2), y)
self.assertEqual(util_compat.as_bytes(str(i)), z)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuInt32(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuInt32AndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuStringsAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"])
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDevicePingPongCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
with compat.forward_compatibility_horizon(2018, 8, 4):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0"))
back_to_cpu_dataset = device_dataset.apply(
prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0"))
with ops.device("/cpu:0"):
iterator = dataset_ops.make_initializable_iterator(back_to_cpu_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithReInit(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1"))
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceWithReInitAndPrefetch(self):
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/cpu:1")).prefetch(1)
with ops.device("/cpu:1"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(host_dataset),
dataset_ops.get_structure(device_dataset)))
self.assertEqual(dtypes.int64, next_element.dtype)
self.assertEqual([], next_element.shape)
worker_config = config_pb2.ConfigProto(device_count={"CPU": 2})
with self.test_session(config=worker_config):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithReInit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testCopyToDeviceGpuWithReInitAndPrefetch(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0")).prefetch(1)
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_element = iterator.get_next()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
self.evaluate(iterator.initializer)
for i in range(5):
self.assertEqual(i, self.evaluate(next_element))
self.evaluate(iterator.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(next_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element)
@test_util.deprecated_graph_mode_only
def testIteratorGetNextAsOptionalOnGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(3)
device_dataset = host_dataset.apply(
prefetching_ops.copy_to_device("/gpu:0"))
with ops.device("/gpu:0"):
iterator = dataset_ops.make_initializable_iterator(device_dataset)
next_elem = iterator_ops.get_next_as_optional(iterator)
elem_has_value_t = next_elem.has_value()
elem_value_t = next_elem.get_value()
with self.cached_session(
config=config_pb2.ConfigProto(allow_soft_placement=False)):
# Before initializing the iterator, evaluating the optional fails with
# a FailedPreconditionError.
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_has_value_t)
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(elem_value_t)
# For each element of the dataset, assert that the optional evaluates to
# the expected value.
self.evaluate(iterator.initializer)
for i in range(3):
elem_has_value, elem_value = self.evaluate(
[elem_has_value_t, elem_value_t])
self.assertTrue(elem_has_value)
self.assertEqual(i, elem_value)
# After exhausting the iterator, `next_elem.has_value()` will evaluate to
# false, and attempting to get the value will fail.
for _ in range(2):
self.assertFalse(self.evaluate(elem_has_value_t))
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(elem_value_t)
if __name__ == "__main__":
test.main()
| 39.255537 | 81 | 0.70069 |
57483345c77a965e2fd67940c92cff50d627404c | 17,076 | py | Python | netbox/extras/views.py | letic/netbox | 0930745e16330edf00da081150b079d5ed6ecc02 | [
"Apache-2.0"
] | 2 | 2021-07-08T03:58:12.000Z | 2022-02-11T21:50:46.000Z | netbox/extras/views.py | letic/netbox | 0930745e16330edf00da081150b079d5ed6ecc02 | [
"Apache-2.0"
] | 25 | 2019-09-17T19:40:50.000Z | 2022-03-11T04:01:55.000Z | netbox/extras/views.py | letic/netbox | 0930745e16330edf00da081150b079d5ed6ecc02 | [
"Apache-2.0"
] | 1 | 2022-02-11T21:50:58.000Z | 2022-02-11T21:50:58.000Z | from django import template
from django.contrib import messages
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.http import Http404, HttpResponseForbidden
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import View
from django_rq.queues import get_connection
from django_tables2 import RequestConfig
from rq import Worker
from netbox.views import generic
from utilities.forms import ConfirmationForm
from utilities.paginator import EnhancedPaginator, get_paginate_count
from utilities.utils import copy_safe_request, count_related, shallow_compare_dict
from utilities.views import ContentTypePermissionRequiredMixin
from . import filters, forms, tables
from .choices import JobResultStatusChoices
from .models import ConfigContext, ImageAttachment, ObjectChange, JobResult, Tag, TaggedItem
from .reports import get_report, get_reports, run_report
from .scripts import get_scripts, run_script
#
# Tags
#
class TagListView(generic.ObjectListView):
queryset = Tag.objects.annotate(
items=count_related(TaggedItem, 'tag')
)
filterset = filters.TagFilterSet
filterset_form = forms.TagFilterForm
table = tables.TagTable
class TagEditView(generic.ObjectEditView):
queryset = Tag.objects.all()
model_form = forms.TagForm
template_name = 'extras/tag_edit.html'
class TagDeleteView(generic.ObjectDeleteView):
queryset = Tag.objects.all()
class TagBulkImportView(generic.BulkImportView):
queryset = Tag.objects.all()
model_form = forms.TagCSVForm
table = tables.TagTable
class TagBulkEditView(generic.BulkEditView):
queryset = Tag.objects.annotate(
items=count_related(TaggedItem, 'tag')
)
table = tables.TagTable
form = forms.TagBulkEditForm
class TagBulkDeleteView(generic.BulkDeleteView):
queryset = Tag.objects.annotate(
items=count_related(TaggedItem, 'tag')
)
table = tables.TagTable
#
# Config contexts
#
class ConfigContextListView(generic.ObjectListView):
queryset = ConfigContext.objects.all()
filterset = filters.ConfigContextFilterSet
filterset_form = forms.ConfigContextFilterForm
table = tables.ConfigContextTable
action_buttons = ('add',)
class ConfigContextView(generic.ObjectView):
queryset = ConfigContext.objects.all()
def get_extra_context(self, request, instance):
# Determine user's preferred output format
if request.GET.get('format') in ['json', 'yaml']:
format = request.GET.get('format')
if request.user.is_authenticated:
request.user.config.set('extras.configcontext.format', format, commit=True)
elif request.user.is_authenticated:
format = request.user.config.get('extras.configcontext.format', 'json')
else:
format = 'json'
return {
'format': format,
}
class ConfigContextEditView(generic.ObjectEditView):
queryset = ConfigContext.objects.all()
model_form = forms.ConfigContextForm
template_name = 'extras/configcontext_edit.html'
class ConfigContextBulkEditView(generic.BulkEditView):
queryset = ConfigContext.objects.all()
filterset = filters.ConfigContextFilterSet
table = tables.ConfigContextTable
form = forms.ConfigContextBulkEditForm
class ConfigContextDeleteView(generic.ObjectDeleteView):
queryset = ConfigContext.objects.all()
class ConfigContextBulkDeleteView(generic.BulkDeleteView):
queryset = ConfigContext.objects.all()
table = tables.ConfigContextTable
class ObjectConfigContextView(generic.ObjectView):
base_template = None
template_name = 'extras/object_configcontext.html'
def get_extra_context(self, request, instance):
source_contexts = ConfigContext.objects.restrict(request.user, 'view').get_for_object(instance)
# Determine user's preferred output format
if request.GET.get('format') in ['json', 'yaml']:
format = request.GET.get('format')
if request.user.is_authenticated:
request.user.config.set('extras.configcontext.format', format, commit=True)
elif request.user.is_authenticated:
format = request.user.config.get('extras.configcontext.format', 'json')
else:
format = 'json'
return {
'rendered_context': instance.get_config_context(),
'source_contexts': source_contexts,
'format': format,
'base_template': self.base_template,
'active_tab': 'config-context',
}
#
# Change logging
#
class ObjectChangeListView(generic.ObjectListView):
queryset = ObjectChange.objects.all()
filterset = filters.ObjectChangeFilterSet
filterset_form = forms.ObjectChangeFilterForm
table = tables.ObjectChangeTable
template_name = 'extras/objectchange_list.html'
action_buttons = ('export',)
class ObjectChangeView(generic.ObjectView):
queryset = ObjectChange.objects.all()
def get_extra_context(self, request, instance):
related_changes = ObjectChange.objects.restrict(request.user, 'view').filter(
request_id=instance.request_id
).exclude(
pk=instance.pk
)
related_changes_table = tables.ObjectChangeTable(
data=related_changes[:50],
orderable=False
)
objectchanges = ObjectChange.objects.restrict(request.user, 'view').filter(
changed_object_type=instance.changed_object_type,
changed_object_id=instance.changed_object_id,
)
next_change = objectchanges.filter(time__gt=instance.time).order_by('time').first()
prev_change = objectchanges.filter(time__lt=instance.time).order_by('-time').first()
if prev_change:
diff_added = shallow_compare_dict(
prev_change.object_data,
instance.object_data,
exclude=['last_updated'],
)
diff_removed = {x: prev_change.object_data.get(x) for x in diff_added}
else:
# No previous change; this is the initial change that added the object
diff_added = diff_removed = instance.object_data
return {
'diff_added': diff_added,
'diff_removed': diff_removed,
'next_change': next_change,
'prev_change': prev_change,
'related_changes_table': related_changes_table,
'related_changes_count': related_changes.count()
}
class ObjectChangeLogView(View):
"""
Present a history of changes made to a particular object.
base_template: The name of the template to extend. If not provided, "<app>/<model>.html" will be used.
"""
base_template = None
def get(self, request, model, **kwargs):
# Handle QuerySet restriction of parent object if needed
if hasattr(model.objects, 'restrict'):
obj = get_object_or_404(model.objects.restrict(request.user, 'view'), **kwargs)
else:
obj = get_object_or_404(model, **kwargs)
# Gather all changes for this object (and its related objects)
content_type = ContentType.objects.get_for_model(model)
objectchanges = ObjectChange.objects.restrict(request.user, 'view').prefetch_related(
'user', 'changed_object_type'
).filter(
Q(changed_object_type=content_type, changed_object_id=obj.pk) |
Q(related_object_type=content_type, related_object_id=obj.pk)
)
objectchanges_table = tables.ObjectChangeTable(
data=objectchanges,
orderable=False
)
# Apply the request context
paginate = {
'paginator_class': EnhancedPaginator,
'per_page': get_paginate_count(request)
}
RequestConfig(request, paginate).configure(objectchanges_table)
# Default to using "<app>/<model>.html" as the template, if it exists. Otherwise,
# fall back to using base.html.
if self.base_template is None:
self.base_template = f"{model._meta.app_label}/{model._meta.model_name}.html"
# TODO: This can be removed once an object view has been established for every model.
try:
template.loader.get_template(self.base_template)
except template.TemplateDoesNotExist:
self.base_template = 'base.html'
return render(request, 'extras/object_changelog.html', {
'object': obj,
'table': objectchanges_table,
'base_template': self.base_template,
'active_tab': 'changelog',
})
#
# Image attachments
#
class ImageAttachmentEditView(generic.ObjectEditView):
queryset = ImageAttachment.objects.all()
model_form = forms.ImageAttachmentForm
def alter_obj(self, imageattachment, request, args, kwargs):
if not imageattachment.pk:
# Assign the parent object based on URL kwargs
model = kwargs.get('model')
imageattachment.parent = get_object_or_404(model, pk=kwargs['object_id'])
return imageattachment
def get_return_url(self, request, imageattachment):
return imageattachment.parent.get_absolute_url()
class ImageAttachmentDeleteView(generic.ObjectDeleteView):
queryset = ImageAttachment.objects.all()
def get_return_url(self, request, imageattachment):
return imageattachment.parent.get_absolute_url()
#
# Reports
#
class ReportListView(ContentTypePermissionRequiredMixin, View):
"""
Retrieve all of the available reports from disk and the recorded JobResult (if any) for each.
"""
def get_required_permission(self):
return 'extras.view_report'
def get(self, request):
reports = get_reports()
report_content_type = ContentType.objects.get(app_label='extras', model='report')
results = {
r.name: r
for r in JobResult.objects.filter(
obj_type=report_content_type,
status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES
).defer('data')
}
ret = []
for module, report_list in reports:
module_reports = []
for report in report_list:
report.result = results.get(report.full_name, None)
module_reports.append(report)
ret.append((module, module_reports))
return render(request, 'extras/report_list.html', {
'reports': ret,
})
class ReportView(ContentTypePermissionRequiredMixin, View):
"""
Display a single Report and its associated JobResult (if any).
"""
def get_required_permission(self):
return 'extras.view_report'
def get(self, request, module, name):
report = get_report(module, name)
if report is None:
raise Http404
report_content_type = ContentType.objects.get(app_label='extras', model='report')
report.result = JobResult.objects.filter(
obj_type=report_content_type,
name=report.full_name,
status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES
).first()
return render(request, 'extras/report.html', {
'report': report,
'run_form': ConfirmationForm(),
})
def post(self, request, module, name):
# Permissions check
if not request.user.has_perm('extras.run_report'):
return HttpResponseForbidden()
report = get_report(module, name)
if report is None:
raise Http404
# Allow execution only if RQ worker process is running
if not Worker.count(get_connection('default')):
messages.error(request, "Unable to run report: RQ worker process not running.")
return render(request, 'extras/report.html', {
'report': report,
})
# Run the Report. A new JobResult is created.
report_content_type = ContentType.objects.get(app_label='extras', model='report')
job_result = JobResult.enqueue_job(
run_report,
report.full_name,
report_content_type,
request.user
)
return redirect('extras:report_result', job_result_pk=job_result.pk)
class ReportResultView(ContentTypePermissionRequiredMixin, View):
"""
Display a JobResult pertaining to the execution of a Report.
"""
def get_required_permission(self):
return 'extras.view_report'
def get(self, request, job_result_pk):
report_content_type = ContentType.objects.get(app_label='extras', model='report')
jobresult = get_object_or_404(JobResult.objects.all(), pk=job_result_pk, obj_type=report_content_type)
# Retrieve the Report and attach the JobResult to it
module, report_name = jobresult.name.split('.')
report = get_report(module, report_name)
report.result = jobresult
return render(request, 'extras/report_result.html', {
'report': report,
'result': jobresult,
})
#
# Scripts
#
class GetScriptMixin:
def _get_script(self, name, module=None):
if module is None:
module, name = name.split('.', 1)
scripts = get_scripts()
try:
return scripts[module][name]()
except KeyError:
raise Http404
class ScriptListView(ContentTypePermissionRequiredMixin, View):
def get_required_permission(self):
return 'extras.view_script'
def get(self, request):
scripts = get_scripts(use_names=True)
script_content_type = ContentType.objects.get(app_label='extras', model='script')
results = {
r.name: r
for r in JobResult.objects.filter(
obj_type=script_content_type,
status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES
).defer('data')
}
for _scripts in scripts.values():
for script in _scripts.values():
script.result = results.get(script.full_name)
return render(request, 'extras/script_list.html', {
'scripts': scripts,
})
class ScriptView(ContentTypePermissionRequiredMixin, GetScriptMixin, View):
def get_required_permission(self):
return 'extras.view_script'
def get(self, request, module, name):
script = self._get_script(name, module)
form = script.as_form(initial=request.GET)
# Look for a pending JobResult (use the latest one by creation timestamp)
script_content_type = ContentType.objects.get(app_label='extras', model='script')
script.result = JobResult.objects.filter(
obj_type=script_content_type,
name=script.full_name,
).exclude(
status__in=JobResultStatusChoices.TERMINAL_STATE_CHOICES
).first()
return render(request, 'extras/script.html', {
'module': module,
'script': script,
'form': form,
})
def post(self, request, module, name):
# Permissions check
if not request.user.has_perm('extras.run_script'):
return HttpResponseForbidden()
script = self._get_script(name, module)
form = script.as_form(request.POST, request.FILES)
# Allow execution only if RQ worker process is running
if not Worker.count(get_connection('default')):
messages.error(request, "Unable to run script: RQ worker process not running.")
elif form.is_valid():
commit = form.cleaned_data.pop('_commit')
script_content_type = ContentType.objects.get(app_label='extras', model='script')
job_result = JobResult.enqueue_job(
run_script,
script.full_name,
script_content_type,
request.user,
data=form.cleaned_data,
request=copy_safe_request(request),
commit=commit
)
return redirect('extras:script_result', job_result_pk=job_result.pk)
return render(request, 'extras/script.html', {
'module': module,
'script': script,
'form': form,
})
class ScriptResultView(ContentTypePermissionRequiredMixin, GetScriptMixin, View):
def get_required_permission(self):
return 'extras.view_script'
def get(self, request, job_result_pk):
result = get_object_or_404(JobResult.objects.all(), pk=job_result_pk)
script_content_type = ContentType.objects.get(app_label='extras', model='script')
if result.obj_type != script_content_type:
raise Http404
script = self._get_script(result.name)
return render(request, 'extras/script_result.html', {
'script': script,
'result': result,
'class_name': script.__class__.__name__
})
| 33.22179 | 110 | 0.659932 |
225da33902d662c9cc49dbbc4f0e1566550f3c4b | 24,109 | py | Python | domonic/xml/x3d.py | code-review-doctor/domonic | fee5704ab051d40c7b3fec5488a44d3ab1ee027c | [
"MIT"
] | null | null | null | domonic/xml/x3d.py | code-review-doctor/domonic | fee5704ab051d40c7b3fec5488a44d3ab1ee027c | [
"MIT"
] | null | null | null | domonic/xml/x3d.py | code-review-doctor/domonic | fee5704ab051d40c7b3fec5488a44d3ab1ee027c | [
"MIT"
] | null | null | null | """
domonic.x3d
====================================
Generate x3d with python 3
"""
from domonic.dom import Element
from domonic.html import closed_tag
x3d = X3D = type('x3d', (Element,), {'name': 'x3d'})
scene = Scene = type('scene', (Element,), {'name': 'scene'})
material = Material = type('material', (Element,), {'name': 'material'})
appearance = Appearance = type('appearance', (Element,), {'name': 'appearance'})
sphere = Sphere = type('sphere', (Element,), {'name': 'sphere'})
shape = Shape = type('shape', (Element,), {'name': 'shape'})
transform = Transform = type('transform', (Element,), {'name': 'transform'})
timeSensor = TimeSensor = type('timeSensor', (Element,), {'name': 'timeSensor'})
inline = Inline = type('inline', (Element,), {'name': 'inline'})
box = Box = type('box', (Element,), {'name': 'box'})
plane = Plane = type('plane', (Element,), {'name': 'plane'})
# TODO - go through examples to find which are usually lower. i.e before they allowed mised cases
positionInterpolator = PositionInterpolator = type('PositionInterpolator', (Element,), {'name': 'PositionInterpolator'})
route = Route = type('Route', (Element,), {'name': 'Route'})
anchor = Anchor = type('Anchor', (Element,), {'name': 'Anchor'})
arc2D = Arc2D = type('Arc2D', (Element,), {'name': 'Arc2D'})
arcClose2D = ArcClose2D = type('ArcClose2D', (Element,), {'name': 'ArcClose2D'})
audioClip = AudioClip = type('AudioClip', (Element,), {'name': 'AudioClip'})
background = Background = type('Background', (Element,), {'name': 'Background'})
ballJoint = BallJoint = type('BallJoint', (Element,), {'name': 'BallJoint'})
billboard = Billboard = type('Billboard', (Element,), {'name': 'Billboard'})
binaryGeometry = BinaryGeometry = type('BinaryGeometry', (Element,), {'name': 'BinaryGeometry'})
blendedVolumeStyle = BlendedVolumeStyle = type('BlendedVolumeStyle', (Element,), {'name': 'BlendedVolumeStyle'})
blendMode = BlendMode = type('BlendMode', (Element,), {'name': 'BlendMode'})
block = Block = type('Block', (Element,), {'name': 'Block'})
boundaryEnhancementVolumeStyle = BoundaryEnhancementVolumeStyle = type('BoundaryEnhancementVolumeStyle', (Element,), {'name': 'BoundaryEnhancementVolumeStyle'})
bufferAccessor = BufferAccessor = type('BufferAccessor', (Element,), {'name': 'BufferAccessor'})
bufferGeometry = BufferGeometry = type('BufferGeometry', (Element,), {'name': 'BufferGeometry'})
bufferView = BufferView = type('BufferView', (Element,), {'name': 'BufferView'})
cADAssembly = CADAssembly = type('CADAssembly', (Element,), {'name': 'CADAssembly'})
cADFace = CADFace = type('CADFace', (Element,), {'name': 'CADFace'})
cADLayer = CADLayer = type('CADLayer', (Element,), {'name': 'CADLayer'})
cADPart = CADPart = type('CADPart', (Element,), {'name': 'CADPart'})
cartoonVolumeStyle = CartoonVolumeStyle = type('CartoonVolumeStyle', (Element,), {'name': 'CartoonVolumeStyle'})
circle2D = Circle2D = type('Circle2D', (Element,), {'name': 'Circle2D'})
clipPlane = ClipPlane = type('ClipPlane', (Element,), {'name': 'ClipPlane'})
collidableShape = CollidableShape = type('CollidableShape', (Element,), {'name': 'CollidableShape'})
collision = Collision = type('Collision', (Element,), {'name': 'Collision'})
collisionCollection = CollisionCollection = type('CollisionCollection', (Element,), {'name': 'CollisionCollection'})
collisionSensor = CollisionSensor = type('CollisionSensor', (Element,), {'name': 'CollisionSensor'})
color = Color = type('Color', (Element,), {'name': 'Color'})
colorChaser = ColorChaser = type('ColorChaser', (Element,), {'name': 'ColorChaser'})
colorDamper = ColorDamper = type('ColorDamper', (Element,), {'name': 'ColorDamper'})
colorInterpolator = ColorInterpolator = type('ColorInterpolator', (Element,), {'name': 'ColorInterpolator'})
colorMaskMode = ColorMaskMode = type('ColorMaskMode', (Element,), {'name': 'ColorMaskMode'})
colorRGBA = ColorRGBA = type('ColorRGBA', (Element,), {'name': 'ColorRGBA'})
commonSurfaceShader = CommonSurfaceShader = type('CommonSurfaceShader', (Element,), {'name': 'CommonSurfaceShader'})
composedCubeMapTexture = ComposedCubeMapTexture = type('ComposedCubeMapTexture', (Element,), {'name': 'ComposedCubeMapTexture'})
composedShader = ComposedShader = type('ComposedShader', (Element,), {'name': 'ComposedShader'})
composedTexture3D = ComposedTexture3D = type('ComposedTexture3D', (Element,), {'name': 'ComposedTexture3D'})
composedVolumeStyle = ComposedVolumeStyle = type('ComposedVolumeStyle', (Element,), {'name': 'ComposedVolumeStyle'})
cone = Cone = type('Cone', (Element,), {'name': 'Cone'})
coordinate = Coordinate = type('Coordinate', (Element,), {'name': 'Coordinate'})
coordinateDamper = CoordinateDamper = type('CoordinateDamper', (Element,), {'name': 'CoordinateDamper'})
coordinateDouble = CoordinateDouble = type('CoordinateDouble', (Element,), {'name': 'CoordinateDouble'})
coordinateInterpolator = CoordinateInterpolator = type('CoordinateInterpolator', (Element,), {'name': 'CoordinateInterpolator'})
cylinder = Cylinder = type('Cylinder', (Element,), {'name': 'Cylinder'})
cylinderSensor = CylinderSensor = type('CylinderSensor', (Element,), {'name': 'CylinderSensor'})
depthMode = DepthMode = type('DepthMode', (Element,), {'name': 'DepthMode'})
directionalLight = DirectionalLight = type('DirectionalLight', (Element,), {'name': 'DirectionalLight'})
dish = Dish = type('Dish', (Element,), {'name': 'Dish'})
disk2D = Disk2D = type('Disk2D', (Element,), {'name': 'Disk2D'})
doubleAxisHingeJoint = DoubleAxisHingeJoint = type('DoubleAxisHingeJoint', (Element,), {'name': 'DoubleAxisHingeJoint'})
dynamicLOD = DynamicLOD = type('DynamicLOD', (Element,), {'name': 'DynamicLOD'})
edgeEnhancementVolumeStyle = EdgeEnhancementVolumeStyle = type('EdgeEnhancementVolumeStyle', (Element,), {'name': 'EdgeEnhancementVolumeStyle'})
elevationGrid = ElevationGrid = type('ElevationGrid', (Element,), {'name': 'ElevationGrid'})
environment = Environment = type('Environment', (Element,), {'name': 'Environment'})
extrusion = Extrusion = type('Extrusion', (Element,), {'name': 'Extrusion'})
field = Field = type('Field', (Element,), {'name': 'Field'})
floatVertexAttribute = FloatVertexAttribute = type('FloatVertexAttribute', (Element,), {'name': 'FloatVertexAttribute'})
fog = Fog = type('Fog', (Element,), {'name': 'Fog'})
fontStyle = FontStyle = type('FontStyle', (Element,), {'name': 'FontStyle'})
generatedCubeMapTexture = GeneratedCubeMapTexture = type('GeneratedCubeMapTexture', (Element,), {'name': 'GeneratedCubeMapTexture'})
geoCoordinate = GeoCoordinate = type('GeoCoordinate', (Element,), {'name': 'GeoCoordinate'})
geoElevationGrid = GeoElevationGrid = type('GeoElevationGrid', (Element,), {'name': 'GeoElevationGrid'})
geoLocation = GeoLocation = type('GeoLocation', (Element,), {'name': 'GeoLocation'})
geoLOD = GeoLOD = type('GeoLOD', (Element,), {'name': 'GeoLOD'})
geoMetadata = GeoMetadata = type('GeoMetadata', (Element,), {'name': 'GeoMetadata'})
geoOrigin = GeoOrigin = type('GeoOrigin', (Element,), {'name': 'GeoOrigin'})
geoPositionInterpolator = GeoPositionInterpolator = type('GeoPositionInterpolator', (Element,), {'name': 'GeoPositionInterpolator'})
geoTransform = GeoTransform = type('GeoTransform', (Element,), {'name': 'GeoTransform'})
geoViewpoint = GeoViewpoint = type('GeoViewpoint', (Element,), {'name': 'GeoViewpoint'})
group = Group = type('Group', (Element,), {'name': 'Group'})
hAnimDisplacer = HAnimDisplacer = type('HAnimDisplacer', (Element,), {'name': 'HAnimDisplacer'})
hAnimHumanoid = HAnimHumanoid = type('HAnimHumanoid', (Element,), {'name': 'HAnimHumanoid'})
hAnimJoint = HAnimJoint = type('HAnimJoint', (Element,), {'name': 'HAnimJoint'})
hAnimSegment = HAnimSegment = type('HAnimSegment', (Element,), {'name': 'HAnimSegment'})
hAnimSite = HAnimSite = type('HAnimSite', (Element,), {'name': 'HAnimSite'})
imageTexture = ImageTexture = type('ImageTexture', (Element,), {'name': 'ImageTexture'})
imageTexture3D = ImageTexture3D = type('ImageTexture3D', (Element,), {'name': 'ImageTexture3D'})
imageTextureAtlas = ImageTextureAtlas = type('ImageTextureAtlas', (Element,), {'name': 'ImageTextureAtlas'})
indexedFaceSet = IndexedFaceSet = type('IndexedFaceSet', (Element,), {'name': 'IndexedFaceSet'})
indexedLineSet = IndexedLineSet = type('IndexedLineSet', (Element,), {'name': 'IndexedLineSet'})
indexedQuadSet = IndexedQuadSet = type('IndexedQuadSet', (Element,), {'name': 'IndexedQuadSet'})
indexedTriangleSet = IndexedTriangleSet = type('IndexedTriangleSet', (Element,), {'name': 'IndexedTriangleSet'})
indexedTriangleStripSet = IndexedTriangleStripSet = type('IndexedTriangleStripSet', (Element,), {'name': 'IndexedTriangleStripSet'})
isoSurfaceVolumeData = IsoSurfaceVolumeData = type('IsoSurfaceVolumeData', (Element,), {'name': 'IsoSurfaceVolumeData'})
lineProperties = LineProperties = type('LineProperties', (Element,), {'name': 'LineProperties'})
lineSet = LineSet = type('LineSet', (Element,), {'name': 'LineSet'})
lOD = LOD = type('LOD', (Element,), {'name': 'LOD'})
matrixTextureTransform = MatrixTextureTransform = type('MatrixTextureTransform', (Element,), {'name': 'MatrixTextureTransform'})
matrixTransform = MatrixTransform = type('MatrixTransform', (Element,), {'name': 'MatrixTransform'})
mesh = Mesh = type('Mesh', (Element,), {'name': 'Mesh'})
metadataBoolean = MetadataBoolean = type('MetadataBoolean', (Element,), {'name': 'MetadataBoolean'})
metadataDouble = MetadataDouble = type('MetadataDouble', (Element,), {'name': 'MetadataDouble'})
metadataFloat = MetadataFloat = type('MetadataFloat', (Element,), {'name': 'MetadataFloat'})
metadataInteger = MetadataInteger = type('MetadataInteger', (Element,), {'name': 'MetadataInteger'})
metadataSet = MetadataSet = type('MetadataSet', (Element,), {'name': 'MetadataSet'})
metadataString = MetadataString = type('MetadataString', (Element,), {'name': 'MetadataString'})
motorJoint = MotorJoint = type('MotorJoint', (Element,), {'name': 'MotorJoint'})
movieTexture = MovieTexture = type('MovieTexture', (Element,), {'name': 'MovieTexture'})
mPRPlane = MPRPlane = type('MPRPlane', (Element,), {'name': 'MPRPlane'})
mPRVolumeStyle = MPRVolumeStyle = type('MPRVolumeStyle', (Element,), {'name': 'MPRVolumeStyle'})
multiTexture = MultiTexture = type('MultiTexture', (Element,), {'name': 'MultiTexture'})
multiTextureCoordinate = MultiTextureCoordinate = type('MultiTextureCoordinate', (Element,), {'name': 'MultiTextureCoordinate'})
navigationInfo = NavigationInfo = type('NavigationInfo', (Element,), {'name': 'NavigationInfo'})
normal = Normal = type('Normal', (Element,), {'name': 'Normal'})
normalInterpolator = NormalInterpolator = type('NormalInterpolator', (Element,), {'name': 'NormalInterpolator'})
nozzle = Nozzle = type('Nozzle', (Element,), {'name': 'Nozzle'})
opacityMapVolumeStyle = OpacityMapVolumeStyle = type('OpacityMapVolumeStyle', (Element,), {'name': 'OpacityMapVolumeStyle'})
orientationChaser = OrientationChaser = type('OrientationChaser', (Element,), {'name': 'OrientationChaser'})
orientationDamper = OrientationDamper = type('OrientationDamper', (Element,), {'name': 'OrientationDamper'})
orientationInterpolator = OrientationInterpolator = type('OrientationInterpolator', (Element,), {'name': 'OrientationInterpolator'})
orthoViewpoint = OrthoViewpoint = type('OrthoViewpoint', (Element,), {'name': 'OrthoViewpoint'})
param = Param = type('Param', (Element,), {'name': 'Param'})
particleSet = ParticleSet = type('ParticleSet', (Element,), {'name': 'ParticleSet'})
physicalEnvironmentLight = PhysicalEnvironmentLight = type('PhysicalEnvironmentLight', (Element,), {'name': 'PhysicalEnvironmentLight'})
physicalMaterial = PhysicalMaterial = type('PhysicalMaterial', (Element,), {'name': 'PhysicalMaterial'})
pixelTexture = PixelTexture = type('PixelTexture', (Element,), {'name': 'PixelTexture'})
pixelTexture3D = PixelTexture3D = type('PixelTexture3D', (Element,), {'name': 'PixelTexture3D'})
planeSensor = PlaneSensor = type('PlaneSensor', (Element,), {'name': 'PlaneSensor'})
pointLight = PointLight = type('PointLight', (Element,), {'name': 'PointLight'})
pointSet = PointSet = type('PointSet', (Element,), {'name': 'PointSet'})
polyline2D = Polyline2D = type('Polyline2D', (Element,), {'name': 'Polyline2D'})
polypoint2D = Polypoint2D = type('Polypoint2D', (Element,), {'name': 'Polypoint2D'})
popGeometry = PopGeometry = type('PopGeometry', (Element,), {'name': 'PopGeometry'})
popGeometryLevel = PopGeometryLevel = type('PopGeometryLevel', (Element,), {'name': 'PopGeometryLevel'})
positionChaser = PositionChaser = type('PositionChaser', (Element,), {'name': 'PositionChaser'})
positionChaser2D = PositionChaser2D = type('PositionChaser2D', (Element,), {'name': 'PositionChaser2D'})
positionDamper = PositionDamper = type('PositionDamper', (Element,), {'name': 'PositionDamper'})
positionDamper2D = PositionDamper2D = type('PositionDamper2D', (Element,), {'name': 'PositionDamper2D'})
positionInterpolator = PositionInterpolator = type('PositionInterpolator', (Element,), {'name': 'PositionInterpolator'})
positionInterpolator2D = PositionInterpolator2D = type('PositionInterpolator2D', (Element,), {'name': 'PositionInterpolator2D'})
projectionVolumeStyle = ProjectionVolumeStyle = type('ProjectionVolumeStyle', (Element,), {'name': 'ProjectionVolumeStyle'})
pyramid = Pyramid = type('Pyramid', (Element,), {'name': 'Pyramid'})
quadSet = QuadSet = type('QuadSet', (Element,), {'name': 'QuadSet'})
radarVolumeStyle = RadarVolumeStyle = type('RadarVolumeStyle', (Element,), {'name': 'RadarVolumeStyle'})
rectangle2D = Rectangle2D = type('Rectangle2D', (Element,), {'name': 'Rectangle2D'})
rectangularTorus = RectangularTorus = type('RectangularTorus', (Element,), {'name': 'RectangularTorus'})
refinementTexture = RefinementTexture = type('RefinementTexture', (Element,), {'name': 'RefinementTexture'})
remoteSelectionGroup = RemoteSelectionGroup = type('RemoteSelectionGroup', (Element,), {'name': 'RemoteSelectionGroup'})
renderedTexture = RenderedTexture = type('RenderedTexture', (Element,), {'name': 'RenderedTexture'})
rigidBody = RigidBody = type('RigidBody', (Element,), {'name': 'RigidBody'})
rigidBodyCollection = RigidBodyCollection = type('RigidBodyCollection', (Element,), {'name': 'RigidBodyCollection'})
scalarChaser = ScalarChaser = type('ScalarChaser', (Element,), {'name': 'ScalarChaser'})
scalarDamper = ScalarDamper = type('ScalarDamper', (Element,), {'name': 'ScalarDamper'})
scalarInterpolator = ScalarInterpolator = type('ScalarInterpolator', (Element,), {'name': 'ScalarInterpolator'})
segmentedVolumeData = SegmentedVolumeData = type('SegmentedVolumeData', (Element,), {'name': 'SegmentedVolumeData'})
shadedVolumeStyle = ShadedVolumeStyle = type('ShadedVolumeStyle', (Element,), {'name': 'ShadedVolumeStyle'})
shaderPart = ShaderPart = type('ShaderPart', (Element,), {'name': 'ShaderPart'})
silhouetteEnhancementVolumeStyle = SilhouetteEnhancementVolumeStyle = type('SilhouetteEnhancementVolumeStyle', (Element,), {'name': 'SilhouetteEnhancementVolumeStyle'})
singleAxisHingeJoint = SingleAxisHingeJoint = type('SingleAxisHingeJoint', (Element,), {'name': 'SingleAxisHingeJoint'})
sliderJoint = SliderJoint = type('SliderJoint', (Element,), {'name': 'SliderJoint'})
slopedCylinder = SlopedCylinder = type('SlopedCylinder', (Element,), {'name': 'SlopedCylinder'})
snout = Snout = type('Snout', (Element,), {'name': 'Snout'})
solidOfRevolution = SolidOfRevolution = type('SolidOfRevolution', (Element,), {'name': 'SolidOfRevolution'})
sound = Sound = type('Sound', (Element,), {'name': 'Sound'})
sphereSegment = SphereSegment = type('SphereSegment', (Element,), {'name': 'SphereSegment'})
sphereSensor = SphereSensor = type('SphereSensor', (Element,), {'name': 'SphereSensor'})
splinePositionInterpolator = SplinePositionInterpolator = type('SplinePositionInterpolator', (Element,), {'name': 'SplinePositionInterpolator'})
spotLight = SpotLight = type('SpotLight', (Element,), {'name': 'SpotLight'})
staticGroup = StaticGroup = type('StaticGroup', (Element,), {'name': 'StaticGroup'})
stippleVolumeStyle = StippleVolumeStyle = type('StippleVolumeStyle', (Element,), {'name': 'StippleVolumeStyle'})
surfaceShaderTexture = SurfaceShaderTexture = type('SurfaceShaderTexture', (Element,), {'name': 'SurfaceShaderTexture'})
switch = Switch = type('Switch', (Element,), {'name': 'Switch'})
texCoordDamper2D = TexCoordDamper2D = type('TexCoordDamper2D', (Element,), {'name': 'TexCoordDamper2D'})
text = Text = type('Text', (Element,), {'name': 'Text'})
texture = Texture = type('Texture', (Element,), {'name': 'Texture'})
textureCoordinate = TextureCoordinate = type('TextureCoordinate', (Element,), {'name': 'TextureCoordinate'})
textureCoordinate3D = TextureCoordinate3D = type('TextureCoordinate3D', (Element,), {'name': 'TextureCoordinate3D'})
textureCoordinateGenerator = TextureCoordinateGenerator = type('TextureCoordinateGenerator', (Element,), {'name': 'TextureCoordinateGenerator'})
textureProperties = TextureProperties = type('TextureProperties', (Element,), {'name': 'TextureProperties'})
textureTransform = TextureTransform = type('TextureTransform', (Element,), {'name': 'TextureTransform'})
textureTransform3D = TextureTransform3D = type('TextureTransform3D', (Element,), {'name': 'TextureTransform3D'})
textureTransformMatrix3D = TextureTransformMatrix3D = type('TextureTransformMatrix3D', (Element,), {'name': 'TextureTransformMatrix3D'})
toneMappedVolumeStyle = ToneMappedVolumeStyle = type('ToneMappedVolumeStyle', (Element,), {'name': 'ToneMappedVolumeStyle'})
torus = Torus = type('Torus', (Element,), {'name': 'Torus'})
touchSensor = TouchSensor = type('TouchSensor', (Element,), {'name': 'TouchSensor'})
triangleSet = TriangleSet = type('TriangleSet', (Element,), {'name': 'TriangleSet'})
triangleSet2D = TriangleSet2D = type('TriangleSet2D', (Element,), {'name': 'TriangleSet2D'})
twoSidedMaterial = TwoSidedMaterial = type('TwoSidedMaterial', (Element,), {'name': 'TwoSidedMaterial'})
uniform = Uniform = type('Uniform', (Element,), {'name': 'Uniform'})
universalJoint = UniversalJoint = type('UniversalJoint', (Element,), {'name': 'UniversalJoint'})
viewfrustum = Viewfrustum = type('Viewfrustum', (Element,), {'name': 'Viewfrustum'})
viewpoint = Viewpoint = type('Viewpoint', (Element,), {'name': 'Viewpoint'})
volumeData = VolumeData = type('VolumeData', (Element,), {'name': 'VolumeData'})
worldInfo = WorldInfo = type('WorldInfo', (Element,), {'name': 'WorldInfo'})
x3DAppearanceChildNode = X3DAppearanceChildNode = type('X3DAppearanceChildNode', (Element,), {'name': 'X3DAppearanceChildNode'})
x3DAppearanceNode = X3DAppearanceNode = type('X3DAppearanceNode', (Element,), {'name': 'X3DAppearanceNode'})
x3DBackgroundNode = X3DBackgroundNode = type('X3DBackgroundNode', (Element,), {'name': 'X3DBackgroundNode'})
x3DBinaryContainerGeometryNode = X3DBinaryContainerGeometryNode = type('X3DBinaryContainerGeometryNode', (Element,), {'name': 'X3DBinaryContainerGeometryNode'})
x3DBindableNode = X3DBindableNode = type('X3DBindableNode', (Element,), {'name': 'X3DBindableNode'})
x3DBoundedObject = X3DBoundedObject = type('X3DBoundedObject', (Element,), {'name': 'X3DBoundedObject'})
x3DChaserNode = X3DChaserNode = type('X3DChaserNode', (Element,), {'name': 'X3DChaserNode'})
x3DChildNode = X3DChildNode = type('X3DChildNode', (Element,), {'name': 'X3DChildNode'})
x3DColorNode = X3DColorNode = type('X3DColorNode', (Element,), {'name': 'X3DColorNode'})
x3DComposableVolumeRenderStyleNode = X3DComposableVolumeRenderStyleNode = type('X3DComposableVolumeRenderStyleNode', (Element,), {'name': 'X3DComposableVolumeRenderStyleNode'})
x3DComposedGeometryNode = X3DComposedGeometryNode = type('X3DComposedGeometryNode', (Element,), {'name': 'X3DComposedGeometryNode'})
x3DCoordinateNode = X3DCoordinateNode = type('X3DCoordinateNode', (Element,), {'name': 'X3DCoordinateNode'})
x3DDamperNode = X3DDamperNode = type('X3DDamperNode', (Element,), {'name': 'X3DDamperNode'})
x3DDragSensorNode = X3DDragSensorNode = type('X3DDragSensorNode', (Element,), {'name': 'X3DDragSensorNode'})
x3DEnvironmentNode = X3DEnvironmentNode = type('X3DEnvironmentNode', (Element,), {'name': 'X3DEnvironmentNode'})
x3DEnvironmentTextureNode = X3DEnvironmentTextureNode = type('X3DEnvironmentTextureNode', (Element,), {'name': 'X3DEnvironmentTextureNode'})
x3DFogNode = X3DFogNode = type('X3DFogNode', (Element,), {'name': 'X3DFogNode'})
x3DFollowerNode = X3DFollowerNode = type('X3DFollowerNode', (Element,), {'name': 'X3DFollowerNode'})
x3DFontStyleNode = X3DFontStyleNode = type('X3DFontStyleNode', (Element,), {'name': 'X3DFontStyleNode'})
x3DGeometricPropertyNode = X3DGeometricPropertyNode = type('X3DGeometricPropertyNode', (Element,), {'name': 'X3DGeometricPropertyNode'})
x3DGeometryNode = X3DGeometryNode = type('X3DGeometryNode', (Element,), {'name': 'X3DGeometryNode'})
x3DGroupingNode = X3DGroupingNode = type('X3DGroupingNode', (Element,), {'name': 'X3DGroupingNode'})
x3DInfoNode = X3DInfoNode = type('X3DInfoNode', (Element,), {'name': 'X3DInfoNode'})
x3DInterpolatorNode = X3DInterpolatorNode = type('X3DInterpolatorNode', (Element,), {'name': 'X3DInterpolatorNode'})
x3DLightNode = X3DLightNode = type('X3DLightNode', (Element,), {'name': 'X3DLightNode'})
x3DLODNode = X3DLODNode = type('X3DLODNode', (Element,), {'name': 'X3DLODNode'})
x3DMaterialNode = X3DMaterialNode = type('X3DMaterialNode', (Element,), {'name': 'X3DMaterialNode'})
x3DMetadataObject = X3DMetadataObject = type('X3DMetadataObject', (Element,), {'name': 'X3DMetadataObject'})
x3DNavigationInfoNode = X3DNavigationInfoNode = type('X3DNavigationInfoNode', (Element,), {'name': 'X3DNavigationInfoNode'})
x3DNBodyCollidableNode = X3DNBodyCollidableNode = type('X3DNBodyCollidableNode', (Element,), {'name': 'X3DNBodyCollidableNode'})
x3DNode = X3DNode = type('X3DNode', (Element,), {'name': 'X3DNode'})
x3DPlanarGeometryNode = X3DPlanarGeometryNode = type('X3DPlanarGeometryNode', (Element,), {'name': 'X3DPlanarGeometryNode'})
x3DPointingDeviceSensorNode = X3DPointingDeviceSensorNode = type('X3DPointingDeviceSensorNode', (Element,), {'name': 'X3DPointingDeviceSensorNode'})
x3DRigidJointNode = X3DRigidJointNode = type('X3DRigidJointNode', (Element,), {'name': 'X3DRigidJointNode'})
x3DSensorNode = X3DSensorNode = type('X3DSensorNode', (Element,), {'name': 'X3DSensorNode'})
x3DShaderNode = X3DShaderNode = type('X3DShaderNode', (Element,), {'name': 'X3DShaderNode'})
x3DShapeNode = X3DShapeNode = type('X3DShapeNode', (Element,), {'name': 'X3DShapeNode'})
x3DSoundNode = X3DSoundNode = type('X3DSoundNode', (Element,), {'name': 'X3DSoundNode'})
x3DSoundSourceNode = X3DSoundSourceNode = type('X3DSoundSourceNode', (Element,), {'name': 'X3DSoundSourceNode'})
x3DSpatialGeometryNode = X3DSpatialGeometryNode = type('X3DSpatialGeometryNode', (Element,), {'name': 'X3DSpatialGeometryNode'})
x3DTexture3DNode = X3DTexture3DNode = type('X3DTexture3DNode', (Element,), {'name': 'X3DTexture3DNode'})
x3DTextureCoordinateNode = X3DTextureCoordinateNode = type('X3DTextureCoordinateNode', (Element,), {'name': 'X3DTextureCoordinateNode'})
x3DTextureNode = X3DTextureNode = type('X3DTextureNode', (Element,), {'name': 'X3DTextureNode'})
x3DTextureTransformNode = X3DTextureTransformNode = type('X3DTextureTransformNode', (Element,), {'name': 'X3DTextureTransformNode'})
x3DTimeDependentNode = X3DTimeDependentNode = type('X3DTimeDependentNode', (Element,), {'name': 'X3DTimeDependentNode'})
x3DTouchSensorNode = X3DTouchSensorNode = type('X3DTouchSensorNode', (Element,), {'name': 'X3DTouchSensorNode'})
x3DTransformNode = X3DTransformNode = type('X3DTransformNode', (Element,), {'name': 'X3DTransformNode'})
x3DVertexAttributeNode = X3DVertexAttributeNode = type('X3DVertexAttributeNode', (Element,), {'name': 'X3DVertexAttributeNode'})
x3DViewpointNode = X3DViewpointNode = type('X3DViewpointNode', (Element,), {'name': 'X3DViewpointNode'})
x3DVolumeDataNode = X3DVolumeDataNode = type('X3DVolumeDataNode', (Element,), {'name': 'X3DVolumeDataNode'})
x3DVolumeRenderStyleNode = X3DVolumeRenderStyleNode = type('X3DVolumeRenderStyleNode', (Element,), {'name': 'X3DVolumeRenderStyleNode'})
| 92.726923 | 176 | 0.729479 |
669d46e027f3f8e88ee81dcd7d00f32d56eefdd2 | 1,280 | py | Python | bundle-workflow/src/test_workflow/perf_test/perf_test_suite.py | aditjind/opensearch-build | aa98ab8be40890b830ab2974cd7ca9030d9b3135 | [
"Apache-2.0"
] | 1 | 2022-01-11T17:47:01.000Z | 2022-01-11T17:47:01.000Z | bundle-workflow/src/test_workflow/perf_test/perf_test_suite.py | aditjind/opensearch-build | aa98ab8be40890b830ab2974cd7ca9030d9b3135 | [
"Apache-2.0"
] | null | null | null | bundle-workflow/src/test_workflow/perf_test/perf_test_suite.py | aditjind/opensearch-build | aa98ab8be40890b830ab2974cd7ca9030d9b3135 | [
"Apache-2.0"
] | null | null | null | import os
import subprocess
from system.working_directory import WorkingDirectory
class PerfTestSuite:
"""
Represents a performance test suite. This class runs rally test on the deployed cluster with the provided IP.
"""
def __init__(self, bundle_manifest, endpoint, security, current_workspace):
self.manifest = bundle_manifest
self.work_dir = 'mensor/'
self.endpoint = endpoint
self.security = security
self.current_workspace = current_workspace
self.command = f'pipenv run python test_config.py -i {self.endpoint} -b {self.manifest.build.id}'\
f' -a {self.manifest.build.architecture} -p {self.current_workspace}'
def execute(self):
try:
with WorkingDirectory(self.work_dir):
dir = os.getcwd()
subprocess.check_call('python3 -m pipenv install', cwd=dir, shell=True)
subprocess.check_call('pipenv install', cwd=dir, shell=True)
if self.security:
subprocess.check_call(f'{self.command} -s', cwd=dir, shell=True)
else:
subprocess.check_call(f'{self.command}', cwd=dir, shell=True)
finally:
os.chdir(self.current_workspace)
| 37.647059 | 113 | 0.628906 |
a153476159f0c7357cda6c1bce02148f29055a9d | 535 | py | Python | tramatego/src/tramatego/transforms/__init__.py | kvsaurav/QRadio | 53299f5bd57b60f76596ed05ba7f1f65b255114d | [
"Apache-2.0"
] | 95 | 2016-03-04T18:34:51.000Z | 2021-08-30T03:43:17.000Z | tramatego/src/tramatego/transforms/__init__.py | netwrkspider/QRadio | 53299f5bd57b60f76596ed05ba7f1f65b255114d | [
"Apache-2.0"
] | null | null | null | tramatego/src/tramatego/transforms/__init__.py | netwrkspider/QRadio | 53299f5bd57b60f76596ed05ba7f1f65b255114d | [
"Apache-2.0"
] | 21 | 2016-03-10T12:19:59.000Z | 2020-05-09T18:54:00.000Z | #!/usr/bin/env python
__author__ = 'Zappus'
__copyright__ = 'Copyright 2016, Tramatego Project'
__credits__ = []
__license__ = 'GPL'
__version__ = '0.1'
__maintainer__ = 'Zappus'
__email__ = ''
__status__ = 'Development'
__all__ = [
'ipv4_to_hash',
'ipv4_to_blacklist',
'ipv4_to_score',
'ipv4_to_domain',
'hash_to_url',
'hash_to_score',
'hash_to_ipv4',
'hash_to_domain',
'domain_to_url',
'domain_to_score',
'domain_to_ipv4',
'domain_to_hash',
'domain_to_blacklist',
'common'
] | 19.107143 | 51 | 0.663551 |
20dee652cd31bb0ec3d6cf15e440e04cec763d64 | 384 | py | Python | ovc5/firmware/sandbox/blink/nmigen/sim.py | alexvonduar/ovc | ce4ecebd49208a69b1402763adda0f6b80584eac | [
"Apache-2.0"
] | 174 | 2018-03-27T23:30:27.000Z | 2022-03-28T13:48:22.000Z | ovc5/firmware/sandbox/blink/nmigen/sim.py | memoo55/ovc | 0980904627669691e2012d2fcfc2390d432c27e4 | [
"Apache-2.0"
] | 41 | 2018-03-30T16:25:39.000Z | 2022-03-28T09:00:36.000Z | ovc5/firmware/sandbox/blink/nmigen/sim.py | memoo55/ovc | 0980904627669691e2012d2fcfc2390d432c27e4 | [
"Apache-2.0"
] | 50 | 2018-04-01T01:44:03.000Z | 2022-03-11T05:58:30.000Z | #!/usr/bin/env python3
from blink import *
from nmigen.back.pysim import *
'''
def wait_led(state):
while True:
yield Tick()
if (yield dut.led) == state:
return
'''
def test():
for i in range(10000):
yield
dut = Blink()
sim = Simulator(dut)
sim.add_clock(1e-6)
sim.add_sync_process(test)
with sim.write_vcd("test.vcd"):
sim.run()
| 14.769231 | 36 | 0.604167 |
f132d9a4e44906632aab9691e4ffd0860397ad79 | 3,336 | py | Python | samples/gmd/plot_wavelength_2.py | aluque/CloudScat.jl | 6691533eb8c4bbed683e052409a49d6fc5f96804 | [
"MIT"
] | 8 | 2020-06-30T10:40:35.000Z | 2022-02-27T20:16:45.000Z | samples/gmd/plot_wavelength_2.py | aluque/CloudScat.jl | 6691533eb8c4bbed683e052409a49d6fc5f96804 | [
"MIT"
] | 4 | 2020-07-16T01:37:24.000Z | 2021-08-30T08:56:57.000Z | samples/gmd/plot_wavelength_2.py | aluque/CloudScat.jl | 6691533eb8c4bbed683e052409a49d6fc5f96804 | [
"MIT"
] | null | null | null | import sys
import string
from itertools import product
import scipy.constants as co
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from scipy import stats
import h5py
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage[varg]{txfonts}')
plt.rc('axes', titlesize=54)
plt.rc('font', family='serif', size=14)
FOUT = "wavelength.pdf"
def main():
obs = 1
plt.figure(figsize=(7, 9))
plt.subplots_adjust(top=0.98, bottom=0.1, hspace=0.075)
lst = list(product([10, 12], [10, 20])) + [[0, 20]]
for i, (h, R) in enumerate(lst):
ax = plt.subplot(5, 1, i + 1)
if i == 4:
move_down(ax)
plot_panel(ax, h, R, letter=string.ascii_lowercase[i])
if i != 4:
noxticks(ax)
else:
ax.set_xlabel("pixel")
if i == 0:
ax.legend(["777 nm", "337 nm"])
ax.set_ylabel("brightness (a.u.)")
ax.set_xlim([472, 537])
ax.axvline(512, color='k', lw=0.75)
ax.grid()
plt.savefig(FOUT)
#plt.show()
def plot_panel(ax, h, R, letter):
plot_line(ax, h, R, 777, color='#ff7777')
plot_line(ax, h, R, 337, color='#7799bb')
if h > 0:
title = f"\\Large{{{{\\bf {letter}.}} {h} km, {R} µm}}"
else:
title = f"\\Large{{{{\\bf {letter}.}} 10-12 km, {R} µm}}"
ax.text(0.02, 0.85, title, transform=plt.gca().transAxes)
axins1 = ax.inset_axes([0.025, 0.1, 0.15, 0.6])
plot_map(axins1, h, R, 777)
axins2 = ax.inset_axes([0.18, 0.1, 0.15, 0.6])
plot_map(axins2, h, R, 337)
def plot_line(ax, h, R, lmbd, **kwargs):
if h != 0:
fname = f"wavelength_2_{lmbd}nm_{h}km_{R}um.h5"
else:
fname = f"wavelength_2_extended_{lmbd}nm_{R}um.h5"
fp = h5py.File(fname, "r")
obs = 1
# Note that the image is transposed wrt the julia array.
img = np.array(fp[f"obs{obs:05d}/image"])
width, height = img.shape
x, y = np.arange(width), np.arange(height)
v = img[:, height // 2]
ax.plot(x, v / np.amax(v), **kwargs)
# ax.semilogy()
#ax.set_ylim([0, 6e-9])
def plot_map(ax, h, R, lmbd):
if h != 0:
fname = f"wavelength_2_{lmbd}nm_{h}km_{R}um.h5"
else:
fname = f"wavelength_2_extended_{lmbd}nm_{R}um.h5"
fp = h5py.File(fname, "r")
obs = 1
# Note that the image is transposed wrt the julia array.
img = np.array(fp[f"obs{obs:05d}/image"])
width, height = img.shape
ax.pcolormesh(img[492:532, 492:532], cmap="gnuplot2", rasterized=True)
noxticks(ax)
noyticks(ax)
ax.tick_params('both', length=2, width=0.5, which='major')
ax.axhline(512 - 492, lw=0.75, c="#777777")
ax.text(0.03, 0.05, f"{lmbd} nm", color="w",
transform=ax.transAxes)
def move_down(ax):
[left, bottom, width, height] = ax.get_position().bounds
ax.set_position([left, bottom - 0.04, width, height])
def noxticks(ax):
""" Remove xticks from the plot. """
loc = ax.get_xticks()
ax.set_xticklabels(['' for l in loc])
def noyticks(ax):
""" Remove xticks from the plot. """
loc = ax.get_yticks()
ax.set_yticklabels(['' for l in loc])
if __name__ == '__main__':
main()
| 26.267717 | 74 | 0.565647 |
4874c213b50026d6b6b8c21b105c26ad383d66b7 | 935 | py | Python | src/ggrc/migrations/versions/20190429_9d89d2061961_add_review_status_for_risk.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-01-12T23:46:00.000Z | 2019-01-12T23:46:00.000Z | src/ggrc/migrations/versions/20190429_9d89d2061961_add_review_status_for_risk.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc/migrations/versions/20190429_9d89d2061961_add_review_status_for_risk.py | MikalaiMikalalai/ggrc-core | f0f83b3638574bb64de474f3b70ed27436ca812a | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add review status for Risk
Create Date: 2019-04-29 10:47:20.217525
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '9d89d2061961'
down_revision = '5de274e87318'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.add_column(
'risks',
sa.Column('review_status', sa.String(length=250), nullable=True),
)
op.add_column(
'risks',
sa.Column('review_status_display_name', sa.String(length=250),
nullable=True),
)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
raise NotImplementedError("Downgrade is not supported")
| 24.605263 | 79 | 0.71123 |
68a79a1a6534946659a46c0e643ca224fd2af0e1 | 769 | py | Python | catalog/bindings/gmd/point_array_property_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/point_array_property_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/point_array_property_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import List
from bindings.gmd.point import Point
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class PointArrayPropertyType:
"""gml:PointArrayPropertyType is a container for an array of points.
The elements are always contained inline in the array property,
referencing geometry elements or arrays of geometry elements via
XLinks is not supported.
"""
point: List[Point] = field(
default_factory=list,
metadata={
"name": "Point",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
owns: bool = field(
default=False,
metadata={
"type": "Attribute",
},
)
| 24.806452 | 72 | 0.624187 |
6ad0fe246df623ee396d8f7f402dce90fe31c59f | 10,799 | py | Python | tests/test_02_dxf_graphics/test_236_mleader.py | mozman/ezdxf | a6442b808baf6504f27034bde5a7a74d1e605003 | [
"MIT"
] | 515 | 2017-01-25T05:46:52.000Z | 2022-03-29T09:52:27.000Z | tests/test_02_dxf_graphics/test_236_mleader.py | triroakenshield/ezdxf | 82e964a574bcb86febc677bd63f1626318f51caf | [
"MIT"
] | 417 | 2017-01-25T10:01:17.000Z | 2022-03-29T09:22:04.000Z | tests/test_02_dxf_graphics/test_236_mleader.py | triroakenshield/ezdxf | 82e964a574bcb86febc677bd63f1626318f51caf | [
"MIT"
] | 149 | 2017-02-01T15:52:02.000Z | 2022-03-17T10:33:38.000Z | # Copyright (c) 2018-2020 Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
from ezdxf.layouts import VirtualLayout
from ezdxf import colors
from ezdxf.lldxf import const
from ezdxf.lldxf.tags import Tags
from ezdxf.lldxf.extendedtags import ExtendedTags
from ezdxf.math import Matrix44
# noinspection PyProtectedMember
from ezdxf.entities.mleader import (
LeaderLine,
Leader,
compile_context_tags,
MultiLeaderContext,
MultiLeader,
BlockData,
)
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
@pytest.fixture
def msp():
return VirtualLayout()
# todo: real MLEADER tests
def test_generic_mleader(msp):
mleader = msp.new_entity("MLEADER", {})
assert mleader.dxftype() == "MLEADER"
assert mleader.dxf.style_handle is None
def test_synonym_multileader(msp):
mleader = msp.new_entity("MULTILEADER", {})
assert mleader.dxftype() == "MULTILEADER"
assert mleader.dxf.style_handle is None
# todo: real MLEADERSTYLE tests
def test_standard_mleader_style():
doc = ezdxf.new("R2007")
mleader_style = doc.mleader_styles.get("Standard")
assert mleader_style.dxftype() == "MLEADERSTYLE"
assert mleader_style.dxf.content_type == 2
class TestLeaderLine:
@pytest.fixture(scope="class")
def tags(self):
return Tags.from_text(LEADER_LINE_1)
def test_parse(self, tags):
line = LeaderLine.load(tags)
assert len(line.vertices) == 1
assert len(line.breaks) == 3
assert line.index == 0
assert line.color == colors.BY_BLOCK_RAW_VALUE
def test_export_dxf(self, tags):
expected = basic_tags_from_text(LEADER_LINE_1)
line = LeaderLine.load(tags)
collector = TagCollector()
line.export_dxf(collector)
assert collector.tags == expected
LEADER_LINE_1 = """304
LEADER_LINE{
10
181.0
20
176.0
30
0.0
90
0
11
204.0
21
159.0
31
0.0
12
206.0
22
158.0
32
0.0
91
0
92
-1056964608
305
}
"""
class TestLeader:
@pytest.fixture(scope="class")
def tags(self):
return Tags.from_text(LEADER_1)
def test_parse(self, tags):
ctx = compile_context_tags(tags, 303)
leader = Leader.load(ctx)
assert len(leader.lines) == 1
assert leader.has_last_leader_line == 1
assert leader.has_dogleg_vector == 1
assert leader.last_leader_point == (213.9, 199.1, 0)
assert leader.dogleg_vector == (1, 0, 0)
assert len(leader.breaks) == 2
assert leader.dogleg_length == 8.0
assert leader.index == 0
def test_export_dxf(self, tags):
expected = basic_tags_from_text(LEADER_1)
ctx = compile_context_tags(tags, 303)
leader = Leader.load(ctx)
collector = TagCollector()
leader.export_dxf(collector)
assert collector.tags == expected
LEADER_1 = """302
LEADER{
290
1
291
1
10
213.9
20
199.1
30
0.0
11
1.0
21
0.0
31
0.0
12
215.2
22
199.1
32
0.0
13
219.0
23
199.1
33
0.0
90
0
40
8.0
304
LEADER_LINE{
10
195.8
20
176.1
30
0.0
91
0
92
-1056964608
305
}
271
0
303
}
"""
class MLeaderTesting:
@pytest.fixture(scope="class")
def tags(self, text):
tags = Tags.from_text(text)
return MultiLeader.extract_context_data(tags)
@pytest.fixture(scope="class")
def ctx(self, tags):
return MultiLeaderContext.load(compile_context_tags(tags, 301))
@pytest.fixture(scope="class")
def mleader(self, text):
return MultiLeader.load(ExtendedTags.from_text(text))
def test_context_attribs_definition(self, ctx):
for name in ctx.ATTRIBS.values():
assert hasattr(ctx, name) is True
def test_mleader_export_dxf(self, text, mleader):
expected = basic_tags_from_text(text)
collector = TagCollector(dxfversion=const.DXF2010)
mleader.export_dxf(collector)
assert collector.tags == expected
class TestMTextContext(MLeaderTesting):
@pytest.fixture(scope="class")
def text(self):
return MTEXT_MLEADER_R2010
def test_mtext_data_attribs_definition(self, ctx):
mtext = ctx.mtext
for name in mtext.ATTRIBS.values():
assert hasattr(mtext, name) is True
def test_load_mtext_context(self, ctx):
# Leader() class is tested in TestLeader():
assert len(ctx.leaders) == 2
assert ctx.scale == 1
assert ctx.base_point == (187.4, 185, 0)
assert ctx.text_height == 5
assert ctx.arrowhead_size == 3
assert ctx.landing_gap_size == 2.5
assert ctx.left_attachment == 1
assert ctx.right_attachment == 1
assert ctx.attachment_type == 0
assert ctx.mtext is not None # see test_mtext_data()
assert ctx.block is None
assert ctx.plane_origin == (1, 2, 3)
assert ctx.plane_x_axis == (0, 1, 0)
assert ctx.plane_y_axis == (1, 0, 0)
assert ctx.plane_normal_reversed == 1
assert ctx.top_attachment == 8
assert ctx.bottom_attachment == 8
def test_mtext_data(self, ctx):
mtext = ctx.mtext
assert mtext.default_content == "MTEXT-DATA-CONTENT"
assert mtext.normal_direction == (1, 0, 0)
assert mtext.style_handle == "FEFE" # handle of TextStyle() table entry
assert mtext.location == (236.6, 187.0, 0)
assert mtext.direction == (0, 1, 0)
assert mtext.rotation == 0.2 # in radians!
assert mtext.boundary_width == 104.6
assert mtext.line_space_factor == 1.5
assert mtext.line_space_style == 1
assert mtext.color == colors.BY_BLOCK_RAW_VALUE
assert mtext.alignment == 3
assert mtext.flow_direction == 1
assert mtext.bg_color == -939524096 # use window background color?
assert mtext.bg_scale_factor == 2
assert mtext.bg_transparency == 0
assert mtext.has_bg_color == 0
assert mtext.has_bg_fill == 0
assert mtext.column_type == 0
assert mtext.use_auto_height == 0
assert mtext.column_width == 0.0
assert mtext.column_gutter_width == 0.0
assert mtext.column_flow_reversed == 0
assert len(mtext.column_sizes) == 0
assert mtext.use_word_break == 0
MTEXT_MLEADER_R2010 = """0
MULTILEADER
5
98
330
1F
100
AcDbEntity
8
0
100
AcDbMLeader
270
2
300
CONTEXT_DATA{
40
1.0
10
187.4
20
185.0
30
0.0
41
5.0
140
3.0
145
2.5
174
1
175
1
176
2
177
0
290
1
304
MTEXT-DATA-CONTENT
11
1.0
21
0.0
31
0.0
340
FEFE
12
236.6
22
187.0
32
0.0
13
0.0
23
1.0
33
0.0
42
0.2
43
104.6
44
0.0
45
1.5
170
1
90
-1056964608
171
3
172
1
91
-939524096
141
2.0
92
0
291
0
292
0
173
0
293
0
142
0.0
143
0.0
294
0
295
0
296
0
110
1.0
120
2.0
130
3.0
111
0.0
121
1.0
131
0.0
112
1.0
122
0.0
132
0.0
297
1
302
LEADER{
290
1
291
1
10
246.6
20
185.0
30
0.0
11
-1.0
21
0.0
31
0.0
90
0
40
8.0
304
LEADER_LINE{
10
287.3
20
220.5
30
0.0
91
0
92
-1056964608
305
}
271
0
303
}
302
LEADER{
290
1
291
1
10
179.4
20
185.0
30
0.0
11
1.0
21
0.0
31
0.0
90
1
40
8.0
304
LEADER_LINE{
10
146.5
20
149.0
30
0.0
91
1
92
-1056964608
305
}
271
0
303
}
272
8
273
8
301
}
340
6D
90
330752
170
1
91
-1056964608
341
14
171
-2
290
1
291
1
41
8.0
42
4.0
172
2
343
11
173
1
95
1
174
1
175
0
92
-1056964608
292
0
93
-1056964608
10
1.0
20
1.0
30
1.0
43
0.0
176
0
293
0
294
0
178
0
179
1
45
1.0
271
0
272
9
273
9
"""
class TestBlockContext(MLeaderTesting):
@pytest.fixture(scope="class")
def text(self):
return BLOCK_MLEADER_R2010
def test_block_data_attribs_definition(self, ctx):
block = ctx.block
for name in block.ATTRIBS.values():
assert hasattr(block, name) is True
def test_load_block_context(self, ctx):
# Leader() class is tested in TestLeader():
assert len(ctx.leaders) == 1
assert ctx.scale == 1
assert ctx.base_point == (8.42, 0.70, 0)
assert ctx.text_height == 5
assert ctx.arrowhead_size == 3
assert ctx.landing_gap_size == 2.5
assert ctx.left_attachment == 1
assert ctx.right_attachment == 1
assert ctx.attachment_type == 0
assert ctx.mtext is None
assert ctx.block is not None # see test_block_data()
assert ctx.plane_origin == (1, 2, 3)
assert ctx.plane_x_axis == (0, 1, 0)
assert ctx.plane_y_axis == (1, 0, 0)
assert ctx.plane_normal_reversed == 1
assert ctx.top_attachment == 8
assert ctx.bottom_attachment == 8
def test_block_data(self, ctx):
block = ctx.block
assert block.block_record_handle == "FEFE"
assert block.normal_direction == (0, 0, 1)
assert block.location == (18.42, 0.70, 0)
assert block.scale == (1.0, 2.0, 3.0)
assert block.rotation == 0.2
assert block.color == colors.BY_BLOCK_RAW_VALUE
def test_get_transformation_matrix(self, ctx):
# The transformation matrix is stored in transposed order
# of ezdxf.math.Matrix44()!
assert ctx.block._matrix == [
1,
0,
0,
18.42,
0,
1,
0,
0.70,
0,
0,
1,
0,
0,
0,
0,
1,
]
assert ctx.block.matrix44.get_row(3) == (18.42, 0.70, 0, 1)
def test_set_transformation_matrix(self):
m = Matrix44()
m.set_row(3, (4, 3, 2, 1))
block = BlockData()
block.matrix44 = m
# The transformation matrix is stored in transposed order
# of ezdxf.math.Matrix44()!
assert block._matrix == [
1,
0,
0,
4,
0,
1,
0,
3,
0,
0,
1,
2,
0,
0,
0,
1,
]
BLOCK_MLEADER_R2010 = """ 0
MULTILEADER
5
B5
330
1F
100
AcDbEntity
8
0
100
AcDbMLeader
270
2
300
CONTEXT_DATA{
40
1.0
10
8.42
20
0.70
30
0.0
41
5.0
140
3.0
145
2.5
174
1
175
1
176
0
177
0
290
0
296
1
341
FEFE
14
0.0
24
0.0
34
1.0
15
18.42
25
0.70
35
0.0
16
1.0
26
2.0
36
3.0
46
0.2
93
-1056964608
47
1.0
47
0.0
47
0.0
47
18.42
47
0.0
47
1.0
47
0.0
47
0.70
47
0.0
47
0.0
47
1.0
47
0.0
47
0.0
47
0.0
47
0.0
47
1.0
110
1.0
120
2.0
130
3.0
111
0.0
121
1.0
131
0.0
112
1.0
122
0.0
132
0.0
297
1
302
LEADER{
290
1
291
1
10
9.42
20
0.70
30
0.0
11
1.0
21
0.0
31
0.0
90
0
40
8.0
304
LEADER_LINE{
10
1.15
20
-10.40
30
0.0
91
0
92
-1056964608
305
}
271
0
303
}
272
8
273
8
301
}
340
6D
90
6816768
170
1
91
-1056964608
341
14
171
-2
290
1
291
1
41
8.0
42
4.0
172
1
343
11
173
1
95
1
174
1
175
0
92
-1056964608
292
0
344
94
93
-1056964608
10
1.0
20
1.0
30
1.0
43
0.0
176
0
293
0
330
A3
177
1
44
0.0
302
B
294
0
178
0
179
1
45
1.0
271
0
272
9
273
9
"""
| 12.441244 | 80 | 0.625799 |
188a8c05b6eed708a1a001fcbbf2316a2c47b70c | 1,842 | py | Python | version.py | orange-kao/rpm-s3-mirror | 4a08cdb47de33045c5e5bc8be1c5ee17bc169d56 | [
"Apache-2.0"
] | null | null | null | version.py | orange-kao/rpm-s3-mirror | 4a08cdb47de33045c5e5bc8be1c5ee17bc169d56 | [
"Apache-2.0"
] | null | null | null | version.py | orange-kao/rpm-s3-mirror | 4a08cdb47de33045c5e5bc8be1c5ee17bc169d56 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Aiven, Helsinki, Finland. https://aiven.io/
import imp
import os
import subprocess
def save_version(new_ver, old_ver, version_file):
if not new_ver:
return False
version_file = os.path.join(os.path.dirname(__file__), version_file)
if not old_ver or new_ver != old_ver:
with open(version_file, "w") as fp:
fp.write("__version__ = '{}'\n".format(new_ver))
return True
def get_project_version(version_file):
version_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), version_file)
try:
module = imp.load_source("verfile", version_file)
file_ver = module.__version__
except IOError:
file_ver = None
os.chdir(os.path.dirname(__file__) or ".")
try:
git_out = subprocess.check_output(["git", "describe", "--always"],
stderr=getattr(subprocess, "DEVNULL", None))
except (OSError, subprocess.CalledProcessError):
pass
else:
git_ver = git_out.splitlines()[0].strip().decode("utf-8")
if "." not in git_ver:
git_ver = "0.0.1-0-unknown-{}".format(git_ver)
if save_version(git_ver, file_ver, version_file):
return git_ver
makefile = os.path.join(os.path.dirname(__file__), "Makefile")
if os.path.exists(makefile):
with open(makefile, "r") as fp:
lines = fp.readlines()
short_ver = [line.split("=", 1)[1].strip() for line in lines if line.startswith("short_ver")][0]
if save_version(short_ver, file_ver, version_file):
return short_ver
if not file_ver:
raise Exception("version not available from git or from file {!r}".format(version_file))
return file_ver
if __name__ == "__main__":
import sys
get_project_version(sys.argv[1])
| 32.892857 | 104 | 0.634093 |
96779d58b7cf3776c06333a82dfeeea454d7aa16 | 1,120 | py | Python | pyvisdk/do/cannot_move_vm_with_native_delta_disk.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/cannot_move_vm_with_native_delta_disk.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/do/cannot_move_vm_with_native_delta_disk.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def CannotMoveVmWithNativeDeltaDisk(vim, *args, **kwargs):
'''This fault is thrown when an attempt is made to migrate a virtual machine with
native delta disks to different datastores.'''
obj = vim.client.factory.create('{urn:vim25}CannotMoveVmWithNativeDeltaDisk')
# do some validation checking...
if (len(args) + len(kwargs)) < 4:
raise IndexError('Expected at least 5 arguments got: %d' % len(args))
required = [ 'dynamicProperty', 'dynamicType', 'faultCause', 'faultMessage' ]
optional = [ ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 32.941176 | 124 | 0.625 |
e828f4adfe1c7454d82ebcf22f171a73d4535a36 | 5,576 | py | Python | ialgebra/operations/operator.py | huashen218/pytorch-ialgebra | f498fb2c91c5a48204c66ad5e6dc118cbec69641 | [
"MIT"
] | 2 | 2021-02-01T20:07:13.000Z | 2021-02-10T17:15:45.000Z | build/lib/ialgebra/operations/operator.py | huashen218/pytorch-ialgebra | f498fb2c91c5a48204c66ad5e6dc118cbec69641 | [
"MIT"
] | null | null | null | build/lib/ialgebra/operations/operator.py | huashen218/pytorch-ialgebra | f498fb2c91c5a48204c66ad5e6dc118cbec69641 | [
"MIT"
] | null | null | null | from ialgebra.utils.utils_interpreter import *
from ialgebra.utils.utils_data import to_numpy, to_tensor
name2identity = {
'grad_cam': 'GradCam',
'grad_saliency': 'GradSaliency',
'guided_backprop_grad': 'GuidedBackpropGrad',
'guided_backprop_smoothgrad': 'GuidedBackpropSmoothGrad',
'mask': 'Mask',
'smoothgrad': 'SmoothGrad'
}
class Operator(object):
"""
*Function*:
operate saliency_maps to meet user demands
*Inputs*:
:input:
:model:
:int_map: saliency_map generated by identity
:interpreter_params: set default if no user inputs
:operator_params:
*Returns*:
:opt_map: shape = [B*C*H*W], type = numpy.ndarray
:opt_map+img (might not use): shape = [B*C*H*W], type = numpy.ndarray
"""
def __init__(self, identity_name=None, dataset=None, target_layer=None, device=None):
# parsing identity
self.identity = identity_name
self.dataset = dataset
self.target_layer = target_layer
self._identity_class = getattr(getattr(__import__("ialgebra"), "interpreters"), name2identity[self.identity])
# self.identity_interpreter = self._identity_class(self.model, self.dataset)
def projection(self, bx, by, model):
identity_interpreter = self._identity_class(model, self.dataset)
heatmap, heatmapimg = identity_interpreter(bx, by)
return heatmap, heatmapimg
def selection(self, bx, by, model, region):
identity_interpreter = self._identity_class(model, self.dataset)
# new input
pos0, pos1, pos2, pos3 = region[0], region[1], region[2], region[3]
img = bx
mat = torch.zeros(img.shape)
roi = img[:, int(pos0):int(pos1), int(pos2):int(pos3)]
mat[:, int(pos0):int(pos1), int(pos2):int(pos3)] = roi
mat = mat.to(device)
heatmap, heatmapimg = identity_interpreter(mat, by)
return heatmap, heatmapimg
# same_class x1, x2, model f1,
def join(self, bx_list, by_list, model):
"""
*Function*:
operater join: compare two inputs x and x' from same class and find most informative common features
*Inputs*:
:2 inputs: x, x'
:1 model: f
*Returns*:
:common opt_map:
:opt_map+img_x:
:opt_map+img_x':
"""
bx1, by1, bx2, by2 = bx_list[0], by_list[0], bx_list[1], by_list[1]
# bx.size=(1,3,W,H); by.size=(1)
[bx1, bx2] = [b.unsqueeze(0) if len(b.size()) == 3 else b for b in (bx1, bx2)]
[by1, by2] = [b.unsqueeze(0) if len(b.size()) == 0 else b for b in (by1, by2)]
identity_interpreter = self._identity_class(model, self.dataset)
heatmap1, heatmapimg1 = identity_interpreter(bx1, by1)
heatmap2, heatmapimg2 = identity_interpreter(bx2, by2)
heatmap = 0.5 * (heatmap1 + heatmap2)
heatmapimg1 = heatmap + np.float32(to_numpy(bx1))
heatmapimg1 = (heatmapimg1 / np.max(heatmapimg1)).squeeze(0)
heatmapimg2 = heatmap + np.float32(to_numpy(bx2))
heatmapimg2 = (heatmapimg2 / np.max(heatmapimg2)).squeeze(0)
return heatmap, heatmapimg1, heatmapimg2
def antijoin(self, bx_list, by_list, model_list, model_diff= False):
"""
*Function*:
1: operater anti-join: compare two inputs x and x' from different classes and find most informative and discriminative features
2: operater anti-join: compare one input x, and two different models f1, f2 with different classes, to find most informative and discriminative features
*Inputs*:
:2 inputs: x, x'
:1 model: f
*Returns*:
:heatmap1
:heatmapimg1
:heatmap2
:heatmapimg2
"""
bx1, by1, bx2, by2 = bx_list[0], by_list[0], bx_list[1], by_list[1]
# bx.size=(1,3,W,H); by.size=(1)
[bx1, bx2] = [b.unsqueeze(0) if len(b.size()) == 3 else b for b in (bx1, bx2)]
[by1, by2] = [b.unsqueeze(0) if len(b.size()) == 0 else b for b in (by1, by2)]
model1, model2 = model_list[0], model_list[1]
identity_interpreter1 = self._identity_class(model1, self.dataset)
identity_interpreter2 = self._identity_class(model2, self.dataset)
# case1: 1 input, 2 models
if model_diff:
heatmap1_1, heatmapimg1_1 = identity_interpreter1(bx1, by1) # interpreter1_cls1
heatmap1_2, heatmapimg1_2 = identity_interpreter2(bx1, by1) # interpreter2_cls1
heatmap2_1, heatmapimg2_1 = identity_interpreter1(bx1, by2) # interpreter1_cls2
heatmap2_2, heatmapimg2_2 = identity_interpreter2(bx1, by2) # interpreter2_cls2
# case2: 2 inputs, 1 model
else:
heatmap1_1, heatmapimg1_1 = identity_interpreter1(bx1, by1) # interpreter1_cls1_input1
heatmap1_2, heatmapimg1_2 = identity_interpreter2(bx1, by2) # interpreter2_cls2_input1
heatmap2_1, heatmapimg2_1 = identity_interpreter1(bx2, by1) # interpreter1_cls1_input2
heatmap2_2, heatmapimg2_2 = identity_interpreter2(bx2, by2) # interpreter2_cls2_input2
heatmap1 = 0.5 * (heatmap1_1 + heatmap2_1)
heatmapimg1 = heatmap1 + np.float32(to_numpy(bx1))
heatmapimg1 = (heatmapimg1 / np.max(heatmapimg1)).squeeze(0)
heatmap2 = 0.5 * (heatmap1_2 + heatmap2_2)
heatmapimg2 = heatmap2 + np.float32(to_numpy(bx2))
heatmapimg2 = (heatmapimg2 / np.max(heatmapimg2)).squeeze(0)
return heatmap1, heatmapimg1, heatmap2, heatmapimg2
| 37.422819 | 160 | 0.638271 |
8fd938d0d02e157dbcef0132d057eec11e96d6df | 3,928 | py | Python | grpc/__main__.py | kaustubhhiware/swift | 7089c4e05fd831b864de46ee37b681a6249909e3 | [
"MIT"
] | null | null | null | grpc/__main__.py | kaustubhhiware/swift | 7089c4e05fd831b864de46ee37b681a6249909e3 | [
"MIT"
] | null | null | null | grpc/__main__.py | kaustubhhiware/swift | 7089c4e05fd831b864de46ee37b681a6249909e3 | [
"MIT"
] | null | null | null | """
Each node starts with broadcasting on a port.
Every node responds back with the requesting IP whose download is going on, if any
"""
import argparse
import asyncio
import pickle
import socket
import sys
import threading
import constants
import utils
from node import Node
from message import Message
# shift to constants later
NODE_SEND_PORT = 8192
NODE_RECV_PORT = 8193
TIMEOUT = 5
BUFFER_SIZE = 1048576
DISCOVERY_PORT = 4444
# async send_to_peer(msg, )
# shift function to utils later - maybe
def broadcast_to_peers(msg_type, content, peers):
'''
Returns dictionary of responses to all neighbors
'''
if msg_type == 'ID_REQUEST':
peer_ids = {}
for peer in peers:
# TODO: do some threading stuff
if msg_type == 'ID_REQUEST':
# peer_id = await send_to_peer()
peer_id = 2
peer_ids[peer] = peer_id
if msg_type == 'ID_REQUEST':
return peer_ids
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.bind(('', NODE_SEND_PORT))
# sock.listen(5)
# # if msg_type == 'ID_REQUEST':
# while True:
# client, address = sock.accept()
# client.settimeout(TIMEOUT)
# threading.Thread(target = sendToPeer,args = (client, address)).start()
def get_id_from_neighbors(self_ip, neighbors):
'''
broadcast self ip to all neighbors, asking for their id
'''
# msg = Message(msg_type='ID_REQUEST', content={ip: self_ip}, file_path=None, file=None)
responses = broadcast_to_peers(msg_type='ID_REQUEST', content={ip: self_ip}, peers=neighbors)
# returns dictionary of IP: id
return response_dict
def discover_nodes(discovery_ip):
'''
connect to discoery_ip and populate iplists of neighbors
'''
s = socket.socket()
s.connect((discovery_ip, DISCOVERY_PORT))
iplist = pickle.loads(s.recv(BUFFER_SIZE))
return iplist
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--discovery', action="store", dest="discovery_ip", type=str, help="Specify discovery server IP")
args = parser.parse_args()
utils.print_prompt()
if args.discovery_ip:
discovery_ip = args.discovery_ip
else:
print('Need to provide discovery server IP !')
exit(0)
self_ip = utils.getNetworkIp()
utils.print_log('Starting connection from ' + self_ip)
# First contact discovery server tro obtain list of neighbours connected
iplist = discover_nodes(discovery_ip)
id_ = len(iplist) + 1
iplist = list(set(iplist))
utils.print_log('Received neighbors ' +str(iplist))
exit(0)
# get an id assigned
# send all neighbors in iplist a message enquiring their id
# active_neighbors = get_id_from_neighbors(self_ip=self_ip, neighbors=iplist)
# idlist = active_neighbors.keys()
# assign id
# id_ = 1
# if idlist is not None:
# id_ = max(active_neighbors.values()) + 1
node = Node(ip=self_ip, id=id_, requesting_id=[],
requesting_file=None, dl_queue=None, neighbors=iplist)
msg_socket = socket.socket()
msg_socket.bind(('', NODE_RECV_PORT))
msg_socket.listen(5)
while True:
connection, client_address = msg_socket.accept()
data_list = []
data = connection.recv(BUFFER_SIZE)
while data:
data_list.append(data)
data = connection.recv(BUFFER_SIZE)
data = b''.join(data_list)
msg = pickle.loads(data)
assert isinstance(msg, message.Message), "Received object on socket not of type Message."
# LOT NEEDS TO BE WRITTEN HERE
# when this node is collaborator
if msg.msg_type == 'HEARTBEAT' and node.ip != node.requesting_id[0]:
# do something
pass
# collaborator / manager agnostic
elif msg.msg_type == 'DOWNLOAD_REQUEST':
# handle case if requested file is same as currently downloading file
# do something
pass
# collaborator / manager agnostic
elif msg.msg_type == 'ID_REQUEST':
pass
elif msg.msg_type == 'PAUSE_NEW':
# new node joined, pause all downloads
# this can be sent only by the temp server
pass
# when this node is temporary server
# send back HEARTBEAT
| 25.019108 | 123 | 0.723778 |
b168b179ade54dc24a210b2b3b37438fc8ed6b09 | 59 | py | Python | code/10-12-pytest/day11/test_weather.py | llamafarmer/100_days_of_code | 6af973157aa4c77cd6f88bf1f0fa5e60a375339c | [
"MIT"
] | 1 | 2018-08-04T00:41:32.000Z | 2018-08-04T00:41:32.000Z | code/10-12-pytest/day11/test_weather.py | llamafarmer/100_days_of_code | 6af973157aa4c77cd6f88bf1f0fa5e60a375339c | [
"MIT"
] | null | null | null | code/10-12-pytest/day11/test_weather.py | llamafarmer/100_days_of_code | 6af973157aa4c77cd6f88bf1f0fa5e60a375339c | [
"MIT"
] | null | null | null | import pytest
from weather import weather
@patch.object() | 11.8 | 27 | 0.79661 |
ffead0df733697a9582c2d4daa4dae2c0d0b4927 | 1,436 | py | Python | python/bracket-push/bracket_push.py | rootulp/exercism | 312a053ad1d375752acf0fce062ee7b9c643a149 | [
"MIT"
] | 41 | 2015-02-09T18:08:45.000Z | 2022-03-06T15:23:32.000Z | python/bracket-push/bracket_push.py | DucChuyenSoftwareEngineer/exercism | fb7820a1ba162b888a39f1b86cbe5d3ca3b15d4f | [
"MIT"
] | 21 | 2019-12-28T17:47:06.000Z | 2021-02-27T19:43:00.000Z | python/bracket-push/bracket_push.py | DucChuyenSoftwareEngineer/exercism | fb7820a1ba162b888a39f1b86cbe5d3ca3b15d4f | [
"MIT"
] | 18 | 2016-04-29T14:35:12.000Z | 2021-06-23T07:32:29.000Z | def is_empty(stack):
return stack == []
class CheckBrackets:
BRACKETS = {'{': '}',
'[': ']',
'(': ')'}
OPENING_BRACKET = set(BRACKETS.keys())
CLOSING_BRACKET = set(BRACKETS.values())
def __init__(self, inp):
self.inp = inp
def is_paired(self):
stack = []
for bracket in self.get_brackets(self.inp):
if self.is_opening_bracket(bracket):
stack.append(bracket)
elif self.is_closing_bracket(bracket) and self.closes_existing_bracket(bracket, stack):
stack.pop()
else:
return False # This is an invalid closing bracket
return is_empty(stack) # There are more open brackets left to close
def closes_existing_bracket(self, char, stack):
return stack and self.matching_brackets(stack[-1], char)
def matching_brackets(self, opener, closer):
return self.BRACKETS[opener] == closer
def get_brackets(self, string):
return [char for char in string if self.is_bracket(char)]
def is_opening_bracket(self, bracket):
return bracket in self.OPENING_BRACKET
def is_closing_bracket(self, bracket):
return bracket in self.CLOSING_BRACKET
def is_bracket(self, char):
return self.is_opening_bracket(char) or self.is_closing_bracket(char)
def is_paired(inp):
return CheckBrackets(inp).is_paired()
| 29.916667 | 99 | 0.632312 |
b302bc769779f476d5e3d80ca2f453e589637440 | 724 | py | Python | merchandises/urls.py | it-teaching-abo-akademi/webshop-project-arnelimperial | 98fc7bd8ce031a50c0bd83a6c5c48ed93030e528 | [
"Unlicense"
] | null | null | null | merchandises/urls.py | it-teaching-abo-akademi/webshop-project-arnelimperial | 98fc7bd8ce031a50c0bd83a6c5c48ed93030e528 | [
"Unlicense"
] | null | null | null | merchandises/urls.py | it-teaching-abo-akademi/webshop-project-arnelimperial | 98fc7bd8ce031a50c0bd83a6c5c48ed93030e528 | [
"Unlicense"
] | null | null | null | from .views import (
merchandise_list_view,
merchandise_detail_view,
merchandise_create_view,
merchandise_update_view,
merchandise_delete_view,
)
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
app_name = 'merchandises'
urlpatterns = [
path('', view=merchandise_list_view, name='merchandise_list'),
path('<slug:slug>', view=merchandise_detail_view, name='merchandise_detail'),
path('create/', view=merchandise_create_view, name='merchandise_create'),
path('<slug:slug>/update/', view=merchandise_update_view, name='merchandise_update'),
path('<slug:slug>/delete/', view=merchandise_delete_view, name='merchandise_delete'),
] | 38.105263 | 89 | 0.765193 |
06b0eafe70d6017f03cb7cebffe652c6048d8980 | 8,530 | py | Python | ant.py | twardzikf/aco-in-urban-transport | 89228ced89b425400a240a455d9585d0f7ef1861 | [
"MIT"
] | null | null | null | ant.py | twardzikf/aco-in-urban-transport | 89228ced89b425400a240a455d9585d0f7ef1861 | [
"MIT"
] | null | null | null | ant.py | twardzikf/aco-in-urban-transport | 89228ced89b425400a240a455d9585d0f7ef1861 | [
"MIT"
] | 1 | 2021-06-04T02:17:01.000Z | 2021-06-04T02:17:01.000Z | import numpy as np
class Ant:
"""
Class realizing single ant functionality
Attributes:
*** operational attributes ***
- number: oridnal number of an ant
- node_memory: current edge in form of list of two nodes
- src_node: start and first target node of an ant
- dst_node: second target node of an ant
*** statistical attributes ***
- cost_sum: statistical measure:
- passes: counter of times an ant reached one of its destination node
Methods:
- findNext(graph_data, parameters) -> next_node:
determines next node for the ant to move to
- depositPheromones(graph_data, parameters) -> graph_data:
updates pheromones on the current edge and returns updated graph_data
- move(graph_data, parameters) -> graph_data:
moves an Ant to the next edge determined by findNext() and updated appriopriately
data in graph_data, finally returns updated graph_data
- showState():
prints the curruent state of an ant (all of its attributes values)
- showFindNext(graph_data, probs, next_node):
prints all data relevant in context of findNext() for debugging purposes
"""
def __init__(self, src_node, dst_node, number):
self.number= number
self.node_memory = [src_node,src_node]
self.src_node = src_node
self.dst_node = dst_node
self.passes = []
self.cost_sum = 0.0
def findNext(self, graph_data, parameters):
"""
Determines the next node for the ant to move to based
on the pheromones on an edge: the more pheromones the better an edge
Arguments:
graph_data: all data about nodes & edges as a dict
parameters: dict of all steering parameters
Return:
next_node: id of the node that has been chosen as the next to go
"""
# extract pheromone values for all candidates for next_node
pheromones = list(graph_data[self.node_memory[-1]]['pheromones'].values())
# find the index of the last visited node respective to the order of the pheromones list
if self.node_memory[0] != self.node_memory[-1] and self.node_memory[-1]!= self.dst_node :
prev_node = list(graph_data[self.node_memory[-1]]['pheromones'].keys()).index(self.node_memory[0])
else:
prev_node = -1
# calculate probability for each candidate
probs = []
for i in range(len(pheromones)):
if len(pheromones)==1: # if there is only one possible candidate, take it
probs.append(1)
elif prev_node==i: # if the candidate was visited in last move, do not take it
probs.append(0)
else:
probs.append(pow(pheromones[i],parameters['alpha']))
# convert probabilites list to an array and normalize probabilities such that their sum equals 1
probs = np.asarray( probs )
probs = probs/sum(probs)
# determine next node for the ant to move to
next_node = np.random.choice(list(graph_data[self.node_memory[-1]]['pheromones'].keys()), p=probs)
#self.showFindNext(graph_data, probs, next_node)
return next_node
def depositPheromones(self, graph_data, parameters ):
"""
Deposits pheromones on current edge
increase factor is equal ((max_vol - cur_vol)/max_vol))*enhancement_rate
and is inversely proportional in current volume of an edge
Arguments:
graph_data: all data about nodes & edges as a dict
parameters: dict of all steering parameters
Return:
graph_data: with updated pheromone levels
"""
# extract current and maximum capacity for current edge
cur_vol = graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]]
max_vol = graph_data[self.node_memory[0]]['max_vol'][self.node_memory[-1]]
neighbors = len(graph_data[self.node_memory[-1]]['cur_vol'])
# add costs for current edge to the overall sum for statistics
self.cost_sum += graph_data[self.node_memory[0]]['costs'][self.node_memory[-1]]
# deposite pheromones only if there is capacity available on current edge and it is not a dead end
if cur_vol <= max_vol and neighbors > 1:
pheromone = graph_data[self.node_memory[0]]['pheromones'][self.node_memory[-1]]
cost = graph_data[self.node_memory[0]]['costs'][self.node_memory[-1]] +1
growth = (pheromone/cost)*((max_vol-cur_vol)/(max_vol+1))*parameters["enhancement_rate"]
graph_data[self.node_memory[0]]['pheromones'][self.node_memory[-1]] += growth
graph_data[self.node_memory[-1]]['pheromones'][self.node_memory[0]] += growth
return graph_data
def move(self, graph_data, parameters):
"""
Executes the move of an ant based on the heuristics function
Arguments:
graph_data: all data about nodes & edges as a dict
enhancement_rate: by how much should be the pheromone increase
after visiting an edge
Return:
graph_data: with updated pheromon and current capacity (!) values
"""
nextNode = self.findNext(graph_data, parameters)
# decrement current capacity on current edge only if
# it is not initial state and capaciy is greater than zero
if self.node_memory[-1]!=self.node_memory[0] and graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]] > 0:
graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]] -= 1
graph_data[self.node_memory[-1]]['cur_vol'][self.node_memory[0]] -= 1
self.node_memory.append(nextNode)
self.node_memory.pop(0)
# update pheromones on the current edge
graph_data = self.depositPheromones(graph_data, parameters)
# increment current volume on the new current edge
graph_data[self.node_memory[0]]['cur_vol'][self.node_memory[-1]] += 1
graph_data[self.node_memory[-1]]['cur_vol'][self.node_memory[0]] += 1
if (self.node_memory[-1] == self.dst_node ):
self.src_node, self.dst_node = self.dst_node, self.src_node
self.passes.append(self.cost_sum)
self.cost_sum = 0.0
if(parameters['verbose']):
print("Ant ", self.number, " - took (",self.node_memory[-1],",",self.node_memory[0],")")
return graph_data
def showState(self):
"""
Prints current state of an ant.
"""
print("[ant]: current state of the ant...............................")
print(" ant nr: ",self.number," node_memory: ", self.node_memory," src: ",self.src_node," dst: ",self.dst_node)
def showFindNext(self, graph_data, probs, next_node):
"""
Prints all data relevant in context of findNext():
- current edge
- all possible adge candidates
- their costs
- their current and maximum volumes
- their probabilities
- decision that was undertaken
Arguments:
graph_data:
probs: array of probabilites for all candidates
next_node: the candidate chosen by findNext()
"""
probs = list(probs)
pheromones = list(graph_data[self.node_memory[-1]]['pheromones'].values())
costs = list(graph_data[self.node_memory[-1]]['costs'].values())
cur_vol = list(graph_data[self.node_memory[-1]]['cur_vol'].values())
max_vol = list(graph_data[self.node_memory[-1]]['max_vol'].values())
print("[ant]: I ( ant nr",self.number,")am at edge (",self.node_memory[0],",",self.node_memory[-1],") and I can choose between: ")
for i in list(graph_data[self.node_memory[-1]]['pheromones'].keys()):
print(" edge (",self.node_memory[-1],",",i,") phe: ", pheromones.pop(0)," cost: ",costs.pop(0),"cur_vol: ",cur_vol.pop(0)," max_vol: ",max_vol.pop(0)," prob: ",probs.pop(0))
print(" I decided to take edge (",self.node_memory[-1],",",next_node,")") | 46.108108 | 188 | 0.603048 |
f44ea6424f462ef05c04b15d236fa19743502985 | 4,513 | py | Python | test/utils/barrier_test.py | lbusoni/plico | e4bab48fcc7767a50dcac13644b5e1d6175ca5f0 | [
"MIT"
] | null | null | null | test/utils/barrier_test.py | lbusoni/plico | e4bab48fcc7767a50dcac13644b5e1d6175ca5f0 | [
"MIT"
] | 7 | 2021-08-30T17:18:34.000Z | 2022-03-25T22:42:20.000Z | test/utils/barrier_test.py | lbusoni/plico | e4bab48fcc7767a50dcac13644b5e1d6175ca5f0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import unittest
from plico.utils.barrier import Predicate, Barrier, BarrierTimeout,\
FunctionPredicate
from plico.utils.decorator import override
__version__="$Id: barrier_test.py 26 2018-01-26 19:06:25Z lbusoni $"
class Dummy:
def __init__(self):
self._isTrueInvocations= 0
def isTrue(self):
if self._isTrueInvocations < 3:
self._isTrueInvocations+= 1
return False
return True
def getIsTrueInvocations(self):
return self._isTrueInvocations
class MyTimeModule():
def __init__(self):
self._count= 0
self._returnValues= None
def setReturnValues(self, returnValues):
self._returneValues= returnValues
self._count= 0
def time(self):
ret= self._returnValues[self._count]
self._count+= 1
return ret
class MyPredicate(Predicate):
def __init__(self):
self._errorMessage= None
self._isFullfilledValues= [False]
self._count= 0
def setErrorMessage(self, msg):
self._errorMessage= msg
@override
def errorMessage(self):
return self._errorMessage
def setFullfilledValues(self, fullfilledValues):
self._isFullfilledValues= fullfilledValues
self._count= 0
@override
def isFullfilled(self):
nElements= len(self._isFullfilledValues)
ret= self._isFullfilledValues[self._count % nElements]
self._count+= 1
return ret
class BarrierStaticMethodTest(unittest.TestCase):
def trueFunction(self):
return True
def falseFunction(self):
return False
def test_should_return_True(self):
Barrier.waitUntil(self.trueFunction, 0.5, 0.15)
def test_should_raise_timeout_exception(self):
TIMEOUT_IN_SEC= 0.5
timeModule= MyTimeModule()
timeModule.setReturnValues([0, TIMEOUT_IN_SEC])
self.assertRaises(Exception, Barrier.waitUntil,
self.falseFunction,
TIMEOUT_IN_SEC, 0.15, timeModule)
def test_invocations(self):
dummy= Dummy()
Barrier.waitUntil(dummy.isTrue, 2, 0.01)
self.assertTrue(dummy.getIsTrueInvocations() == 3)
class BarrierDynamicMethodTest(unittest.TestCase):
def setUp(self):
class TimeFakeModule(object):
def __init__(self):
self._currentTime= 0
def time(self):
self._currentTime+= 0.1
return self._currentTime
def sleep(self, sleepDurationInSec):
pass
self.predicate= MyPredicate()
self.predicate.setErrorMessage("predicate failure")
self.barrier= Barrier(
timeoutSec=3, pollingPeriodSec=0.1,
timeModule=TimeFakeModule())
def test_should_wait_for_predicate(self):
self.predicate= MyPredicate()
self.predicate.setFullfilledValues([False, True])
self.barrier.waitFor(self.predicate)
def test_should_detect_timeout(self):
self.predicate= MyPredicate()
self.predicate.setFullfilledValues([False])
self.predicate.setErrorMessage("Tux failure")
exceptionThrown= False
try:
self.barrier.waitFor(self.predicate)
except BarrierTimeout as e:
exceptionThrown= True
self.assertTrue("Tux failure" in str(e))
self.assertTrue(exceptionThrown)
def test_should_forward_predicate_failures(self):
class FailingPredicate(Predicate):
@override
def isFullfilled(self):
raise Exception("test")
@override
def errorMessage(self):
assert False
try:
self.barrier.waitFor(FailingPredicate())
except BarrierTimeout:
self.fail("No barrier timeout expected")
except Exception as e:
self.assertEqual("test", str(e))
return
self.fail("Exception expected")
class FunctionPredicateTest(unittest.TestCase):
def test_creation(self):
def foo(a, b):
return a == b
pred= FunctionPredicate.create(foo, 3, 4)
self.assertFalse(pred.isFullfilled())
pred= FunctionPredicate.create(foo, 1, 1)
self.assertTrue(pred.isFullfilled())
print("pred:", pred.errorMessage())
self.assertTrue("foo" in pred.errorMessage())
if __name__ == "__main__":
unittest.main()
| 24.005319 | 68 | 0.629293 |
041087f68731db3350a77e9b513718ee3f7de281 | 10,126 | py | Python | tools/clang/scripts/update.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 5 | 2019-05-24T01:25:34.000Z | 2020-04-06T05:07:01.000Z | tools/clang/scripts/update.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | null | null | null | tools/clang/scripts/update.py | domenic/mojo | 53dda76fed90a47c35ed6e06baf833a0d44495b8 | [
"BSD-3-Clause"
] | 5 | 2016-12-23T04:21:10.000Z | 2020-06-18T13:52:33.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Windows can't run .sh files, so this is a Python implementation of
update.sh. This script should replace update.sh on all platforms eventually."""
import os
import re
import shutil
import subprocess
import stat
import sys
import time
# Do NOT CHANGE this if you don't know what you're doing -- see
# https://code.google.com/p/chromium/wiki/UpdatingClang
# Reverting problematic clang rolls is safe, though.
# Note: this revision is only used for Windows. Other platforms use update.sh.
LLVM_WIN_REVISION = 'HEAD'
# ASan on Windows is useful enough to use it even while the clang/win is still
# in bringup. Use a pinned revision to make it slightly more stable.
if (re.search(r'\b(asan)=1', os.environ.get('GYP_DEFINES', '')) and
not 'LLVM_FORCE_HEAD_REVISION' in os.environ):
LLVM_WIN_REVISION = '217738'
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
LLVM_DIR = os.path.join(CHROMIUM_DIR, 'third_party', 'llvm')
LLVM_BUILD_DIR = os.path.join(CHROMIUM_DIR, 'third_party', 'llvm-build',
'Release+Asserts')
COMPILER_RT_BUILD_DIR = os.path.join(LLVM_BUILD_DIR, '32bit-compiler-rt')
CLANG_DIR = os.path.join(LLVM_DIR, 'tools', 'clang')
LLD_DIR = os.path.join(LLVM_DIR, 'tools', 'lld')
COMPILER_RT_DIR = os.path.join(LLVM_DIR, 'projects', 'compiler-rt')
STAMP_FILE = os.path.join(LLVM_BUILD_DIR, 'cr_build_revision')
LLVM_REPO_URL='https://llvm.org/svn/llvm-project'
if 'LLVM_REPO_URL' in os.environ:
LLVM_REPO_URL = os.environ['LLVM_REPO_URL']
def ReadStampFile():
"""Return the contents of the stamp file, or '' if it doesn't exist."""
try:
with open(STAMP_FILE, 'r') as f:
return f.read();
except IOError:
return ''
def WriteStampFile(s):
"""Write s to the stamp file."""
if not os.path.exists(LLVM_BUILD_DIR):
os.makedirs(LLVM_BUILD_DIR)
with open(STAMP_FILE, 'w') as f:
f.write(s)
def RmTree(dir):
"""Delete dir."""
def ChmodAndRetry(func, path, _):
# Subversion can leave read-only files around.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
return func(path)
raise
shutil.rmtree(dir, onerror=ChmodAndRetry)
def ClobberChromiumBuildFiles():
"""Clobber Chomium build files."""
print 'Clobbering Chromium build files...'
out_dir = os.path.join(CHROMIUM_DIR, 'out')
if os.path.isdir(out_dir):
RmTree(out_dir)
print 'Removed Chromium out dir: %s.' % (out_dir)
def RunCommand(command, fail_hard=True):
"""Run command and return success (True) or failure; or if fail_hard is
True, exit on failure."""
print 'Running %s' % (str(command))
if subprocess.call(command, shell=True) == 0:
return True
print 'Failed.'
if fail_hard:
sys.exit(1)
return False
def CopyFile(src, dst):
"""Copy a file from src to dst."""
shutil.copy(src, dst)
print "Copying %s to %s" % (src, dst)
def CopyDirectoryContents(src, dst, filename_filter=None):
"""Copy the files from directory src to dst
with an optional filename filter."""
if not os.path.exists(dst):
os.makedirs(dst)
for root, _, files in os.walk(src):
for f in files:
if filename_filter and not re.match(filename_filter, f):
continue
CopyFile(os.path.join(root, f), dst)
def Checkout(name, url, dir):
"""Checkout the SVN module at url into dir. Use name for the log message."""
print "Checking out %s r%s into '%s'" % (name, LLVM_WIN_REVISION, dir)
command = ['svn', 'checkout', '--force', url + '@' + LLVM_WIN_REVISION, dir]
if RunCommand(command, fail_hard=False):
return
if os.path.isdir(dir):
print "Removing %s." % (dir)
RmTree(dir)
print "Retrying."
RunCommand(command)
def AddCMakeToPath():
"""Look for CMake and add it to PATH if it's not there already."""
try:
# First check if cmake is already on PATH.
subprocess.call(['cmake', '--version'])
return
except OSError as e:
if e.errno != os.errno.ENOENT:
raise
cmake_locations = ['C:\\Program Files (x86)\\CMake\\bin',
'C:\\Program Files (x86)\\CMake 2.8\\bin']
for d in cmake_locations:
if os.path.isdir(d):
os.environ['PATH'] = os.environ.get('PATH', '') + os.pathsep + d
return
print 'Failed to find CMake!'
sys.exit(1)
vs_version = None
def GetVSVersion():
global vs_version
if vs_version:
return vs_version
# Try using the toolchain in depot_tools.
# This sets environment variables used by SelectVisualStudioVersion below.
sys.path.append(os.path.join(CHROMIUM_DIR, 'build'))
import vs_toolchain
vs_toolchain.SetEnvironmentAndGetRuntimeDllDirs()
# Use gyp to find the MSVS installation, either in depot_tools as per above,
# or a system-wide installation otherwise.
sys.path.append(os.path.join(CHROMIUM_DIR, 'tools', 'gyp', 'pylib'))
import gyp.MSVSVersion
vs_version = gyp.MSVSVersion.SelectVisualStudioVersion('2013')
return vs_version
def SubversionCmakeArg():
# Since cmake's find_program can only find .exe and .com,
# svn.bat in depot_tools will be ignored.
default_pathext = ('.com', '.exe', '.bat', '.cmd')
for path in os.environ.get('PATH', '').split(os.pathsep):
for ext in default_pathext:
candidate = os.path.join(path, 'svn' + ext)
if os.path.isfile(candidate):
return '-DSubversion_SVN_EXECUTABLE=%s' % candidate
return ''
def UpdateClang():
print 'Updating Clang to %s...' % (LLVM_WIN_REVISION)
if LLVM_WIN_REVISION != 'HEAD' and ReadStampFile() == LLVM_WIN_REVISION:
print 'Already up to date.'
return 0
AddCMakeToPath()
ClobberChromiumBuildFiles()
# Reset the stamp file in case the build is unsuccessful.
WriteStampFile('')
Checkout('LLVM', LLVM_REPO_URL + '/llvm/trunk', LLVM_DIR)
Checkout('Clang', LLVM_REPO_URL + '/cfe/trunk', CLANG_DIR)
Checkout('LLD', LLVM_REPO_URL + '/lld/trunk', LLD_DIR)
Checkout('compiler-rt', LLVM_REPO_URL + '/compiler-rt/trunk', COMPILER_RT_DIR)
if not os.path.exists(LLVM_BUILD_DIR):
os.makedirs(LLVM_BUILD_DIR)
os.chdir(LLVM_BUILD_DIR)
RunCommand(GetVSVersion().SetupScript('x64') +
['&&', 'cmake', '-GNinja', '-DCMAKE_BUILD_TYPE=Release',
'-DLLVM_ENABLE_ASSERTIONS=ON', SubversionCmakeArg(), LLVM_DIR])
RunCommand(GetVSVersion().SetupScript('x64') + ['&&', 'ninja', 'all'])
# Do an x86 build of compiler-rt to get the 32-bit ASan run-time.
# TODO(hans): Remove once the regular build above produces this.
if not os.path.exists(COMPILER_RT_BUILD_DIR):
os.makedirs(COMPILER_RT_BUILD_DIR)
os.chdir(COMPILER_RT_BUILD_DIR)
RunCommand(GetVSVersion().SetupScript('x86') +
['&&', 'cmake', '-GNinja', '-DCMAKE_BUILD_TYPE=Release',
'-DLLVM_ENABLE_ASSERTIONS=ON', LLVM_DIR])
RunCommand(GetVSVersion().SetupScript('x86') + ['&&', 'ninja', 'compiler-rt'])
asan_rt_bin_src_dir = os.path.join(COMPILER_RT_BUILD_DIR, 'bin')
asan_rt_bin_dst_dir = os.path.join(LLVM_BUILD_DIR, 'bin')
CopyDirectoryContents(asan_rt_bin_src_dir, asan_rt_bin_dst_dir,
r'^.*-i386\.dll$')
# TODO(hans): Make this (and the .gypi file) version number independent.
asan_rt_lib_src_dir = os.path.join(COMPILER_RT_BUILD_DIR, 'lib', 'clang',
'3.6.0', 'lib', 'windows')
asan_rt_lib_dst_dir = os.path.join(LLVM_BUILD_DIR, 'lib', 'clang',
'3.6.0', 'lib', 'windows')
CopyDirectoryContents(asan_rt_lib_src_dir, asan_rt_lib_dst_dir,
r'^.*-i386\.lib$')
CopyFile(os.path.join(asan_rt_lib_src_dir, '..', '..', 'asan_blacklist.txt'),
os.path.join(asan_rt_lib_dst_dir, '..', '..'))
# Make an extra copy of the sanitizer headers, to be put on the include path
# of the fallback compiler.
sanitizer_include_dir = os.path.join(LLVM_BUILD_DIR, 'lib', 'clang', '3.6.0',
'include', 'sanitizer')
aux_sanitizer_include_dir = os.path.join(LLVM_BUILD_DIR, 'lib', 'clang',
'3.6.0', 'include_sanitizer',
'sanitizer')
if not os.path.exists(aux_sanitizer_include_dir):
os.makedirs(aux_sanitizer_include_dir)
for _, _, files in os.walk(sanitizer_include_dir):
for f in files:
CopyFile(os.path.join(sanitizer_include_dir, f),
aux_sanitizer_include_dir)
WriteStampFile(LLVM_WIN_REVISION)
print 'Clang update was successful.'
return 0
def main():
if not sys.platform in ['win32', 'cygwin']:
# For non-Windows, fall back to update.sh.
# TODO(hans): Make update.py replace update.sh completely.
# This script is called by gclient. gclient opens its hooks subprocesses
# with (stdout=subprocess.PIPE, stderr=subprocess.STDOUT) and then does
# custom output processing that breaks printing '\r' characters for
# single-line updating status messages as printed by curl and wget.
# Work around this by setting stderr of the update.sh process to stdin (!):
# gclient doesn't redirect stdin, and while stdin itself is read-only, a
# dup()ed sys.stdin is writable, try
# fd2 = os.dup(sys.stdin.fileno()); os.write(fd2, 'hi')
# TODO: Fix gclient instead, http://crbug.com/95350
return subprocess.call(
[os.path.join(os.path.dirname(__file__), 'update.sh')] + sys.argv[1:],
stderr=os.fdopen(os.dup(sys.stdin.fileno())))
if not re.search(r'\b(clang|asan)=1', os.environ.get('GYP_DEFINES', '')):
print 'Skipping Clang update (clang=1 was not set in GYP_DEFINES).'
return 0
if re.search(r'\b(make_clang_dir)=', os.environ.get('GYP_DEFINES', '')):
print 'Skipping Clang update (make_clang_dir= was set in GYP_DEFINES).'
return 0
return UpdateClang()
if __name__ == '__main__':
sys.exit(main())
| 35.529825 | 80 | 0.674501 |
a3e335f40dd653ac30e8d7d5c3e8b98621916f12 | 552 | py | Python | adv/summer_norwin.py | dl-stuff/dl | 185cc8a16339c47ed873768ff30804f8d06090a2 | [
"Apache-2.0"
] | 22 | 2020-04-04T17:34:16.000Z | 2021-09-25T00:22:23.000Z | adv/summer_norwin.py | dl-stuff/dl | 185cc8a16339c47ed873768ff30804f8d06090a2 | [
"Apache-2.0"
] | 92 | 2020-04-04T15:30:34.000Z | 2022-03-24T01:43:11.000Z | adv/summer_norwin.py | dl-stuff/dl | 185cc8a16339c47ed873768ff30804f8d06090a2 | [
"Apache-2.0"
] | 37 | 2020-04-16T02:47:07.000Z | 2021-03-28T23:18:50.000Z | from core.advbase import *
class Summer_Norwin(Adv):
def prerun(self):
self.doleful = 0
self.current_s["s1"] = "a"
self.current_s["s2"] = "a"
def s1_before(self, e):
if e.group == "d":
self.doleful = 0
self.energy.unset_disabled("doleful")
def s2_proc(self, e):
if e.group == "d":
self.add_hp(-self.hp * self.doleful * 0.20)
self.doleful = min(self.doleful + 1, 4)
self.energy.set_disabled("doleful")
variants = {None: Summer_Norwin}
| 24 | 55 | 0.548913 |
bd40f4116cc124fe18225653ab443e9febd9f242 | 8,095 | py | Python | jina/jaml/helper.py | properGrammar/jina | b483002989f642eb33791a0444dc6728929704b2 | [
"Apache-2.0"
] | null | null | null | jina/jaml/helper.py | properGrammar/jina | b483002989f642eb33791a0444dc6728929704b2 | [
"Apache-2.0"
] | null | null | null | jina/jaml/helper.py | properGrammar/jina | b483002989f642eb33791a0444dc6728929704b2 | [
"Apache-2.0"
] | null | null | null | import collections
import json
import os
from typing import Union, TextIO, Dict, Tuple, Optional
from yaml import MappingNode
from yaml.composer import Composer
from yaml.constructor import FullConstructor, ConstructorError
from yaml.parser import Parser
from yaml.reader import Reader
from yaml.resolver import Resolver
from yaml.scanner import Scanner
from jina.excepts import BadConfigSource
from jina.helper import is_yaml_filepath
from jina.importer import PathImporter
class JinaConstructor(FullConstructor):
"""Convert List into tuple when doing hashing."""
def get_hashable_key(self, key):
"""
Get the hash value of key.
:param key: key value to be hashed.
:return: Hash value of key.
"""
try:
hash(key)
except:
if isinstance(key, list):
for i in range(len(key)):
if not isinstance(key[i], collections.abc.Hashable):
key[i] = self.get_hashable_key(key[i])
key = tuple(key)
return key
raise ValueError(f'unhashable key: {key}')
return key
def construct_mapping(self, node, deep=True):
"""
Build the mapping from node.
:param node: the node to traverse
:param deep: required param from YAML constructor
:return: Mapped data
"""
if isinstance(node, MappingNode):
self.flatten_mapping(node)
return self._construct_mapping(node, deep=deep)
def _construct_mapping(self, node, deep=True):
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
'expected a mapping node, but found %s' % node.id,
node.start_mark,
)
mapping = {}
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=True)
if not isinstance(key, collections.abc.Hashable):
try:
key = self.get_hashable_key(key)
except Exception as exc:
raise ConstructorError(
'while constructing a mapping',
node.start_mark,
'found unacceptable key (%s)' % exc,
key_node.start_mark,
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
class JinaResolver(Resolver):
"""Remove `on|On|ON` as bool resolver."""
pass
class JinaLoader(Reader, Scanner, Parser, Composer, JinaConstructor, JinaResolver):
"""
The Jina loader which should be able to load YAML safely.
:param stream: the stream to load.
"""
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
JinaConstructor.__init__(self)
JinaResolver.__init__(self)
# remove on|On|ON resolver
JinaResolver.yaml_implicit_resolvers.pop('o')
JinaResolver.yaml_implicit_resolvers.pop('O')
def parse_config_source(
path: Union[str, TextIO, Dict],
allow_stream: bool = True,
allow_yaml_file: bool = True,
allow_builtin_resource: bool = True,
allow_raw_yaml_content: bool = True,
allow_class_type: bool = True,
allow_dict: bool = True,
allow_json: bool = True,
*args,
**kwargs,
) -> Tuple[TextIO, Optional[str]]:
"""
Check if the text or text stream is valid.
.. # noqa: DAR401
:param path: the multi-kind source of the configs.
:param allow_stream: flag
:param allow_yaml_file: flag
:param allow_builtin_resource: flag
:param allow_raw_yaml_content: flag
:param allow_class_type: flag
:param allow_dict: flag
:param allow_json: flag
:param args: unused
:param kwargs: unused
:return: a tuple, the first element is the text stream, the second element is the file path associate to it
if available.
"""
import io
from pkg_resources import resource_filename
if not path:
raise BadConfigSource
elif allow_dict and isinstance(path, dict):
from . import JAML
tmp = JAML.dump(path)
return io.StringIO(tmp), None
elif allow_stream and hasattr(path, 'read'):
# already a readable stream
return path, None
elif allow_yaml_file and is_yaml_filepath(path):
comp_path = complete_path(path)
return open(comp_path, encoding='utf8'), comp_path
elif (
allow_builtin_resource
and path.lstrip().startswith('_')
and os.path.exists(
resource_filename('jina', '/'.join(('resources', f'executors.{path}.yml')))
)
):
# NOTE: this returns a binary stream
comp_path = resource_filename(
'jina', '/'.join(('resources', f'executors.{path}.yml'))
)
return open(comp_path, encoding='utf8'), comp_path
elif allow_raw_yaml_content and path.lstrip().startswith(('!', 'jtype')):
# possible YAML content
path = path.replace('|', '\n with: ')
return io.StringIO(path), None
elif allow_class_type and path.isidentifier():
# possible class name
return io.StringIO(f'!{path}'), None
elif allow_json and isinstance(path, str):
try:
from . import JAML
tmp = json.loads(path)
tmp = JAML.dump(tmp)
return io.StringIO(tmp), None
except json.JSONDecodeError:
raise BadConfigSource(path)
else:
raise BadConfigSource(
f'{path} can not be resolved, it should be a readable stream,'
' or a valid file path, or a supported class name.'
)
def complete_path(path: str, extra_search_paths: Optional[Tuple[str]] = None) -> str:
"""
Complete the path of file via searching in abs and relative paths.
:param path: path of file.
:param extra_search_paths: extra paths to conduct search
:return: Completed file path.
"""
_p = None
if os.path.exists(path):
# this checks both abs and relative paths already
_p = path
else:
_p = _search_file_in_paths(path, extra_search_paths)
if _p:
return os.path.abspath(_p)
else:
raise FileNotFoundError(f'can not find {path}')
def _search_file_in_paths(path, extra_search_paths: Optional[Tuple[str]] = None):
"""
Search in all dirs of the PATH environment variable and all dirs of files used in the call stack.
:param path: the path to search for
:param extra_search_paths: any extra locations to search for
:return: the path (if found)
"""
import inspect
search_paths = []
if extra_search_paths:
search_paths.extend((v for v in extra_search_paths))
frame = inspect.currentframe()
# iterates over the call stack
while frame:
search_paths.append(os.path.dirname(inspect.getfile(frame)))
frame = frame.f_back
search_paths += os.environ['PATH'].split(os.pathsep)
# return first occurrence of path. If it does not exist, return None.
for p in search_paths:
_p = os.path.join(p, path)
if os.path.exists(_p):
return _p
def load_py_modules(d: Dict, extra_search_paths: Optional[Tuple[str]] = None) -> None:
"""
Find 'py_modules' in the dict recursively and then load them.
:param d: the dictionary to traverse
:param extra_search_paths: any extra paths to search
"""
mod = []
def _finditem(obj, key='py_modules'):
value = obj.get(key, [])
if isinstance(value, str):
mod.append(value)
elif isinstance(value, (list, tuple)):
mod.extend(value)
for k, v in obj.items():
if isinstance(v, dict):
_finditem(v, key)
_finditem(d)
if mod:
mod = [complete_path(m, extra_search_paths) for m in mod]
PathImporter.add_modules(*mod)
| 31.254826 | 111 | 0.619271 |
cb6d4eb1ae04510d94c3c051d7b193b12522b04a | 23,658 | py | Python | naslib/utils/utils.py | saiprasadbarke/NASLib_Exercise_DLLab21 | b8ceff881ce0028611ff373c7c6a546ed74ec916 | [
"Apache-2.0"
] | null | null | null | naslib/utils/utils.py | saiprasadbarke/NASLib_Exercise_DLLab21 | b8ceff881ce0028611ff373c7c6a546ed74ec916 | [
"Apache-2.0"
] | null | null | null | naslib/utils/utils.py | saiprasadbarke/NASLib_Exercise_DLLab21 | b8ceff881ce0028611ff373c7c6a546ed74ec916 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import sys
import logging
import argparse
import torchvision.datasets as dset
from torch.utils.data import Dataset
from sklearn import metrics
from scipy import stats
from copy import copy
from collections import OrderedDict
import random
import os
import os.path
import shutil
from functools import wraps, partial
from pathlib import Path
import numpy as np
import torch
import torchvision.transforms as transforms
import yaml
from fvcore.common.checkpoint import Checkpointer as fvCheckpointer
from fvcore.common.config import CfgNode
cat_channels = partial(torch.cat, dim=1)
logger = logging.getLogger(__name__)
def get_project_root() -> Path:
"""
Returns the root path of the project.
"""
return Path(__file__).parent.parent
def iter_flatten(iterable):
"""
Flatten a potentially deeply nested python list
"""
# taken from https://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
for f in iter_flatten(e):
yield f
else:
yield e
def default_argument_parser():
"""
Returns the argument parser with the default options.
Inspired by the implementation of FAIR's detectron2
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# parser.add_argument("--config-file", default="{}/benchmarks/predictors/predictor_config.yaml".format(get_project_root()), metavar="FILE", help="path to config file")
parser.add_argument("--config-file",
default="{}/benchmarks/nas_predictors/discrete_config.yaml".format(get_project_root()), metavar="FILE", help="path to config file")
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--seed", default=0, help="random seed")
parser.add_argument("--resume", action="store_true", help="Resume from last checkpoint")
parser.add_argument("--model-path", type=str, default=None, help="Path to saved model weights")
parser.add_argument('--world-size', default=1, type=int, help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:8888',
type=str, help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def parse_args(parser=default_argument_parser(), args=sys.argv[1:]):
if '-f' in args:
args = args[2:]
return parser.parse_args(args)
def pairwise(iterable):
"""
Iterate pairwise over list.
from https://stackoverflow.com/questions/5389507/iterating-over-every-two-elements-in-a-list
"""
"s -> (s0, s1), (s2, s3), (s4, s5), ..."
a = iter(iterable)
return zip(a, a)
def get_config_from_args(args=None, config_type='nas'):
"""
Parses command line arguments and merges them with the defaults
from the config file.
Prepares experiment directories.
Args:
args: args from a different argument parser than the default one.
"""
if config_type == 'nas':
# load the default base
with open(os.path.join(get_project_root(), 'defaults', 'darts_defaults.yaml')) as f:
config = CfgNode.load_cfg(f)
elif config_type == 'predictor':
# load the default base
with open(os.path.join(get_project_root(), 'benchmarks/predictors', 'predictor_config.yaml')) as f:
config = CfgNode.load_cfg(f)
elif config_type == 'nas_predictor':
# load the default base
#with open(os.path.join(get_project_root(), 'benchmarks/nas_predictors', 'nas_predictor_config.yaml')) as f:
with open(os.path.join(get_project_root(), 'benchmarks/nas_predictors', 'discrete_config.yaml')) as f:
config = CfgNode.load_cfg(f)
elif config_type == 'oneshot':
with open(os.path.join(get_project_root(), 'benchmarks/nas_predictors', 'nas_predictor_config.yaml')) as f:
config = CfgNode.load_cfg(f)
if args is None:
args = parse_args()
else:
parser = default_argument_parser()
args = parser.parse_args(args)
print(args)
logger.info("Command line args: {}".format(args))
# load config file
#with open(args.config_file, 'r') as f:
#config = AttrDict(yaml.safe_load(f))
#for k, v in config.items():
#if isinstance(v, dict):
#config[k] = AttrDict(v)
# Override file args with ones from command line
try:
for arg, value in pairwise(args.opts):
if '.' in arg:
arg1, arg2 = arg.split('.')
config[arg1][arg2] = type(config[arg1][arg2])(value)
else:
config[arg] = value
config.eval_only = args.eval_only
config.resume = args.resume
config.model_path = args.model_path
if config_type != 'nas_predictor':
config.seed = args.seed
# load config file
config.merge_from_file(args.config_file)
config.merge_from_list(args.opts)
except AttributeError:
with open(args[1]) as f:
config = CfgNode.load_cfg(f)
# prepare the output directories
if config_type == 'nas':
#config.seed = args.seed
config.search.seed = config.seed
#config.optimizer = args.optimizer
config.evaluation.world_size = args.world_size
config.gpu = config.search.gpu = config.evaluation.gpu = args.gpu
config.evaluation.rank = args.rank
config.evaluation.dist_url = args.dist_url
config.evaluation.dist_backend = args.dist_backend
config.evaluation.multiprocessing_distributed = args.multiprocessing_distributed
config.save = '{}/{}/{}/{}'.format(config.out_dir, config.dataset, config.optimizer, config.seed)
elif config_type == 'predictor':
if config.predictor == 'lcsvr' and config.experiment_type == 'vary_train_size':
config.save = '{}/{}/{}/{}_train/{}'.format(config.out_dir, config.dataset, 'predictors', config.predictor, config.seed)
elif config.predictor == 'lcsvr' and config.experiment_type == 'vary_fidelity':
config.save = '{}/{}/{}/{}_fidelity/{}'.format(config.out_dir, config.dataset, 'predictors', config.predictor, config.seed)
else:
config.save = '{}/{}/{}/{}/{}'.format(config.out_dir, config.dataset, 'predictors', config.predictor, config.seed)
elif config_type == 'nas_predictor':
config.search.seed = config.seed
config.save = '{}/{}/{}/{}/{}/{}'.format(config.out_dir, config.dataset, 'nas_predictors',
config.search_space,
config.search.predictor_type,
config.seed)
elif config_type == 'oneshot':
config.save = '{}/{}/{}/{}/{}/{}'.format(config.out_dir, config.dataset, 'nas_predictors',
config.search_space,
config.search.predictor_type,
config.seed)
else:
print('invalid config type in utils/utils.py')
config.data = "{}/data".format(get_project_root())
create_exp_dir(config.save)
create_exp_dir(config.save + "/search") # required for the checkpoints
create_exp_dir(config.save + "/eval")
return config
def get_train_val_loaders(config, mode):
"""
Constructs the dataloaders and transforms for training, validation and test data.
"""
data = config.data
dataset = config.dataset
seed = config.search.seed
config = config.search if mode=='train' else config.evaluation
if dataset == 'cifar10':
train_transform, valid_transform = _data_transforms_cifar10(config)
train_data = dset.CIFAR10(root=data, train=True, download=True, transform=train_transform)
test_data = dset.CIFAR10(root=data, train=False, download=True, transform=valid_transform)
elif dataset == 'cifar100':
train_transform, valid_transform = _data_transforms_cifar100(config)
train_data = dset.CIFAR100(root=data, train=True, download=True, transform=train_transform)
test_data = dset.CIFAR100(root=data, train=False, download=True, transform=valid_transform)
elif dataset == 'svhn':
train_transform, valid_transform = _data_transforms_svhn(config)
train_data = dset.SVHN(root=data, split='train', download=True, transform=train_transform)
test_data = dset.SVHN(root=data, split='test', download=True, transform=valid_transform)
elif dataset == 'ImageNet16-120':
from naslib.utils.DownsampledImageNet import ImageNet16
train_transform, valid_transform = _data_transforms_ImageNet_16_120(config)
data_folder = f'{data}/{dataset}'
train_data = ImageNet16(root=data_folder, train=True, transform=train_transform, use_num_of_class_only=120)
test_data = ImageNet16(root=data_folder, train=False, transform=valid_transform, use_num_of_class_only=120)
else:
raise ValueError("Unknown dataset: {}".format(dataset))
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(config.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=0, worker_init_fn=np.random.seed(seed))
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=config.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=0, worker_init_fn=np.random.seed(seed))
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=config.batch_size, shuffle=False,
pin_memory=True, num_workers=0, worker_init_fn=np.random.seed(seed))
return train_queue, valid_queue, test_queue, train_transform, valid_transform
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def _data_transforms_svhn(args):
SVHN_MEAN = [0.4377, 0.4438, 0.4728]
SVHN_STD = [0.1980, 0.2010, 0.1970]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar100(args):
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def _data_transforms_ImageNet_16_120(args):
IMAGENET16_MEAN = [x / 255 for x in [122.68, 116.66, 104.01]]
IMAGENET16_STD = [x / 255 for x in [63.22, 61.26 , 65.09]]
train_transform = transforms.Compose([
transforms.RandomCrop(16, padding=2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(IMAGENET16_MEAN, IMAGENET16_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(IMAGENET16_MEAN, IMAGENET16_STD),
])
return train_transform, valid_transform
class TensorDatasetWithTrans(Dataset):
"""
TensorDataset with support of transforms.
"""
def __init__(self, tensors, transform=None):
assert all(tensors[0].size(0) == tensor.size(0) for tensor in tensors)
self.tensors = tensors
self.transform = transform
def __getitem__(self, index):
x = self.tensors[0][index]
if self.transform:
x = self.transform(x)
y = self.tensors[1][index]
return x, y
def __len__(self):
return self.tensors[0].size(0)
def set_seed(seed):
"""
Set the seeds for all used libraries.
"""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = True
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
def get_last_checkpoint(config, search=True):
"""
Finds the latest checkpoint in the experiment directory.
Args:
config (AttrDict): The config from config file.
search (bool): Search or evaluation checkpoint
Returns:
(str): The path to the latest checkpoint file.
"""
try:
path = os.path.join(config.save, "search" if search else "eval", "last_checkpoint")
with open(path, 'r') as f:
checkpoint_name = f.readline()
return os.path.join(config.save, "search" if search else "eval", checkpoint_name)
except:
return ""
def accuracy(output, target, topk=(1,)):
"""
Calculate the accuracy given the softmax output and the target.
"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def count_parameters_in_MB(model):
"""
Returns the model parameters in mega byte.
"""
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if
"auxiliary" not in name) / 1e6
def log_args(args):
"""
Log the args in a nice way.
"""
for arg, val in args.items():
logger.info(arg + '.' * (50 - len(arg) - len(str(val))) + str(val))
def create_exp_dir(path):
"""
Create the experiment directories.
"""
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
logger.info('Experiment dir : {}'.format(path))
def cross_validation(xtrain, ytrain, predictor, split_indices, score_metric='kendalltau'):
validation_score = []
for train_indices, validation_indices in split_indices:
xtrain_i = [xtrain[j] for j in train_indices]
ytrain_i = [ytrain[j] for j in train_indices]
xval_i = [xtrain[j] for j in train_indices]
yval_i = [ytrain[j] for j in train_indices]
predictor.fit(xtrain_i, ytrain_i)
ypred_i = predictor.query(xval_i)
#If the predictor is an ensemble, take the mean
if len(ypred_i.shape) > 1:
ypred_i = np.mean(ypred_i, axis=0)
# use Pearson correlation to be the metric -> maximise Pearson correlation
if score_metric == 'pearson':
score_i = np.abs(np.corrcoef(yval_i, ypred_i)[1,0])
elif score_metric == 'mae':
score_i = np.mean(abs(ypred_i - yval_i))
elif score_metric == 'rmse':
score_i = metrics.mean_squared_error(yval_i, ypred_i, squared=False)
elif score_metric == 'spearman':
score_i = stats.spearmanr(yval_i, ypred_i)[0]
elif score_metric == 'kendalltau':
score_i = stats.kendalltau(yval_i, ypred_i)[0]
elif score_metric == 'kt_2dec':
score_i = stats.kendalltau(yval_i, np.round(ypred_i, decimals=2))[0]
elif score_metric == 'kt_1dec':
score_i = stats.kendalltau(yval_i, np.round(ypred_i, decimals=1))[0]
validation_score.append(score_i)
return np.mean(validation_score)
def generate_kfold(n, k):
'''
Input:
n: number of training examples
k: number of folds
Returns:
kfold_indices: a list of len k. Each entry takes the form
(training indices, validation indices)
'''
assert k >= 2
kfold_indices = []
indices = np.array(range(n))
fold_size = n // k
fold_indices = [indices[i * fold_size: (i + 1) * fold_size] for i in range(k - 1)]
fold_indices.append(indices[(k - 1) * fold_size:])
for i in range(k):
training_indices = [fold_indices[j] for j in range(k) if j != i]
validation_indices = fold_indices[i]
kfold_indices.append((np.concatenate(training_indices), validation_indices))
return kfold_indices
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class AverageMeterGroup:
"""Average meter group for multiple average meters, ported from Naszilla repo."""
def __init__(self):
self.meters = OrderedDict()
def update(self, data, n=1):
for k, v in data.items():
if k not in self.meters:
self.meters[k] = NamedAverageMeter(k, ":4f")
self.meters[k].update(v, n=n)
def __getattr__(self, item):
return self.meters[item]
def __getitem__(self, item):
return self.meters[item]
def __str__(self):
return " ".join(str(v) for v in self.meters.values())
def summary(self):
return " ".join(v.summary() for v in self.meters.values())
class NamedAverageMeter:
"""Computes and stores the average and current value, ported from naszilla repo"""
def __init__(self, name, fmt=':f'):
"""
Initialization of AverageMeter
Parameters
----------
name : str
Name to display.
fmt : str
Format string to print the values.
"""
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = '{name}: {avg' + self.fmt + '}'
return fmtstr.format(**self.__dict__)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
class Cutout(object):
def __init__(self, length, prob=1.0):
self.length = length
self.prob = prob
def __call__(self, img):
if np.random.binomial(1, self.prob):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
from typing import Any, Dict, Iterable, List, NamedTuple, Optional, Tuple
from fvcore.common.file_io import PathManager
import os
class Checkpointer(fvCheckpointer):
def load(self, path: str, checkpointables: Optional[List[str]] = None) -> object:
"""
Load from the given checkpoint. When path points to network file, this
function has to be called on all ranks.
Args:
path (str): path or url to the checkpoint. If empty, will not load
anything.
checkpointables (list): List of checkpointable names to load. If not
specified (None), will load all the possible checkpointables.
Returns:
dict:
extra data loaded from the checkpoint that has not been
processed. For example, those saved with
:meth:`.save(**extra_data)`.
"""
if not path:
# no checkpoint provided
self.logger.info("No checkpoint found. Initializing model from scratch")
return {}
self.logger.info("Loading checkpoint from {}".format(path))
if not os.path.isfile(path):
path = PathManager.get_local_path(path)
assert os.path.isfile(path), "Checkpoint {} not found!".format(path)
checkpoint = self._load_file(path)
incompatible = self._load_model(checkpoint)
if (
incompatible is not None
): # handle some existing subclasses that returns None
self._log_incompatible_keys(incompatible)
for key in self.checkpointables if checkpointables is None else checkpointables:
if key in checkpoint: # pyre-ignore
self.logger.info("Loading {} from {}".format(key, path))
obj = self.checkpointables[key]
try:
obj.load_state_dict(checkpoint.pop(key)) # pyre-ignore
except:
print("exception loading")
# return any further checkpoint data
return checkpoint
| 35.10089 | 171 | 0.627103 |
320d7f5e4d58711563baf620fbe738d9bf282f26 | 685 | py | Python | examples/modes/occurences.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | examples/modes/occurences.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | examples/modes/occurences.py | haesleinhuepf/pyqode.core | 88b9bab081fd580d4de86f3d926a9f0b19146d28 | [
"MIT"
] | null | null | null | """
Minimal example showing the use of the OccurrencesHighlighterMode.
"""
import logging
logging.basicConfig(level=logging.DEBUG)
import sys
from qtpy import QtWidgets
from pyqode.core.api import CodeEdit
from pyqode.core.backend import server
from pyqode.core.modes import OccurrencesHighlighterMode
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
editor = CodeEdit()
editor.backend.start(server.__file__)
editor.resize(800, 600)
print(editor.modes.append(OccurrencesHighlighterMode()))
editor.setPlainText(
'mode\nmode\nmodes\npanel\nmode', '', '')
editor.show()
app.exec_()
editor.close()
del editor
del app
| 25.37037 | 66 | 0.732847 |
b7640f19cb64a2316b6dcd152216b9ba51ac393a | 2,087 | py | Python | tests/test_openeye_patterns.py | flc/chem-fingerprints | 8c1b27374355f08105c5332db376b3428b6e30b2 | [
"MIT"
] | null | null | null | tests/test_openeye_patterns.py | flc/chem-fingerprints | 8c1b27374355f08105c5332db376b3428b6e30b2 | [
"MIT"
] | null | null | null | tests/test_openeye_patterns.py | flc/chem-fingerprints | 8c1b27374355f08105c5332db376b3428b6e30b2 | [
"MIT"
] | null | null | null | import sys
import unittest2
import support
try:
from openeye.oechem import OEGraphMol, OEParseSmiles
skip_oechem = False
except ImportError:
skip_oechem = support.can_skip("oe")
else:
from chemfp import openeye_patterns
import test_patterns
def parse_smiles(smiles):
mol = OEGraphMol()
OEParseSmiles(mol, smiles)
return mol
def _count(it):
return sum(1 for item in it)
class ReferenceMixin(object):
def test_reference_data_set(self):
largest = min(self.reference_limit, max(v for (k,v) in self.reference_cases))
matcher = self.reference_class(largest)
for (smiles, expected) in self.reference_cases:
mol = parse_smiles(smiles)
self.assertEquals(matcher.SingleMatch(mol), bool(expected), smiles)
expected = min(expected, largest)
self.assertGreaterEqual(_count(matcher.Match(mol)), expected, smiles)
def test_match_limit(self):
largest = min(5, self.reference_limit)
for max_count in range(1, largest+1):
matcher = self.reference_class(max_count)
for (smiles, expected) in self.reference_cases:
mol = parse_smiles(smiles)
expected = min(expected, max_count)
self.assertGreaterEqual(_count(matcher.Match(mol)), expected, smiles)
class TestHydrogenMatcher(ReferenceMixin, unittest2.TestCase):
if not skip_oechem:
reference_class = openeye_patterns.HydrogenMatcher
reference_cases = test_patterns.hydrogen_test_cases
reference_limit = 100
TestHydrogenMatcher = unittest2.skipIf(skip_oechem, "OEChem not installed")(
TestHydrogenMatcher)
class TestAromaticRingMatcher(ReferenceMixin, unittest2.TestCase):
if not skip_oechem:
reference_class = openeye_patterns.AromaticRings
reference_cases = test_patterns.aromatic_ring_cases
reference_limit = 2
TestAromaticRingMatcher = unittest2.skipIf(skip_oechem, "OEChem not installed")(
TestAromaticRingMatcher)
if __name__ == "__main__":
unittest2.main()
| 31.149254 | 85 | 0.70436 |
4cecd069017434a0875d2f030fed50f2432fe587 | 923 | py | Python | web_dynamic/3-hbnb.py | Zevrov/AirBnB_clone_v3 | 92a1863e4395404da5b548d3cab10627610e64a9 | [
"MIT"
] | 1 | 2021-03-03T17:29:11.000Z | 2021-03-03T17:29:11.000Z | web_dynamic/3-hbnb.py | Zevrov/AirBnB_clone_v4 | 92a1863e4395404da5b548d3cab10627610e64a9 | [
"MIT"
] | null | null | null | web_dynamic/3-hbnb.py | Zevrov/AirBnB_clone_v4 | 92a1863e4395404da5b548d3cab10627610e64a9 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
'''Flask app to generate complete html page containing location/amenity
dropdown menus and rental listings
'''
from flask import Flask, render_template
from models import storage
import uuid
app = Flask('web_dynamic')
app.url_map.strict_slashes = False
@app.route('/3-hbnb')
def display_hbnb():
'''Generate page with popdown menu of states/cities'''
states = storage.all('State')
amenities = storage.all('Amenity')
places = storage.all('Place')
cached_id = uuid.uuid4()
return render_template('3-hbnb.html',
states=states,
amenities=amenities,
places=places,
cached_id=cached_id)
@app.teardown_appcontext
def teardown_db(*args, **kwargs):
'''Close database or file storage'''
storage.close()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 27.147059 | 71 | 0.63597 |
4be8e0c7ad0316d949bf38c07cfc57aae6cf7325 | 505 | py | Python | project/settings_production.py | pmoran13800/rhgamestation-manager | dd5ca1190ae92455fce10a3475a9a14e684024d8 | [
"MIT"
] | null | null | null | project/settings_production.py | pmoran13800/rhgamestation-manager | dd5ca1190ae92455fce10a3475a9a14e684024d8 | [
"MIT"
] | null | null | null | project/settings_production.py | pmoran13800/rhgamestation-manager | dd5ca1190ae92455fce10a3475a9a14e684024d8 | [
"MIT"
] | null | null | null | from .settings import *
# Update SITE infos to use the common port 80 to publish the webapp
SITE_FIXED = {
'name': "RH Gamestation Manager",
'ip': None, # If 'None' find the ip automatically. Use a string to define another ip/hostname
'port': None, # If 'None' no port is added to hostname, so the server have to be reachable from port 80
}
# Production path to the RHGamestation logs file
RHGAMESTATION_LOGFILE_PATH = "/root/rhgamestation.log"
# Use packaged assets
ASSETS_PACKAGED = True
| 33.666667 | 107 | 0.734653 |
ddbafa3d2554a4cf863210c9daba66ea2da30499 | 102,965 | py | Python | cvpysdk/schedules.py | Jayesh-Jain/SDK | 5fe5130b1eeacad9944ba34714e583c6c743482f | [
"Apache-2.0"
] | null | null | null | cvpysdk/schedules.py | Jayesh-Jain/SDK | 5fe5130b1eeacad9944ba34714e583c6c743482f | [
"Apache-2.0"
] | null | null | null | cvpysdk/schedules.py | Jayesh-Jain/SDK | 5fe5130b1eeacad9944ba34714e583c6c743482f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#class Schedules
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Main file for performing schedule related operations for client/agent/backupset/subclient.
SchedulePattern: Class for creating the necessary pattern for a schedule creation/modification
SchedulePattern:
__init__(class_object) -- initialise object of the SchedulePattern
class
_time_converter(_time_string, time_format) -- converts utc to epoch and vice versa
_pattern_json(pattern_option_dict) -- forms the pattern json based on the
dict provided
_one_time(pattern_dict) -- sets the one time schedule pattern
_daily(pattern_dict) -- sets the daily schedule pattern
_weekly(pattern_dict) -- sets the weekly schedule pattern
_monthly(pattern_dict) -- sets the monthly schedule pattern
_monthly_relative(pattern_dict) -- set the monthly_relative schedule pattern
_yearly(pattern_dict) -- sets the yearly schedule pattern
_yearly_relative(pattern_dict) -- sets the yearly_relative schedule pattern
_continuous(pattern_dict) -- sets the continuous schedule pattern
_automatic(pattern_dict) -- sets the automatic schedule pattern
_after_job_completes(pattern_dict) -- sets the after job completes schedule pattern
create_schedule_pattern(pattern_dict) -- creates a schedule pattern for the user
given pattern
create_schedule(task_req,pattern_dict) -- creates a scheduling request from the
pattern provided
Schedules: Initializes instance of all schedules for a commcell entity.
Schedules:
__init__(class_object) -- initialise object of the Schedules class
__str__() -- string of all schedules associated with the commcell entity
__repr__() -- returns the string for the instance of the Schedules class
_get_sch_id_from_task_id() -- gets the schedule id from the provided task id
_get_schedule_id() -- gets the schedule if with the provided inputs
_get_schedules() -- gets all the schedules associated with the commcell entity
has_schedule(schedule_name) -- checks if schedule exists for the comcell entity or not
delete(schedule_name) -- deletes the given schedule
refresh() -- refresh the schedules associated with the commcell entity
Schedule: Class for performing operations for a specific Schedule.
Schedule:
__init__(class_object) -- initialise object of the Schedule class
_get_schedule_properties -- get all schedule properties
schedule_freq_type -- gets the schedule frequence type
one_time -- gets the one time schedule pattern dict
one_time(pattern_dict) -- sets the one time schedule pattern
daily -- gets the daily schedule pattern
daily(pattern_dict) -- sets the daily schedule pattern
weekly -- gets the weekly schedule pattern
weekly(pattern_dict) -- sets the weekly schedule pattern
monthly -- gets the monthly schedule pattern
monthly(pattern_dict) -- gets the monthly schedule pattern
monthly_relative -- gets the monthly_relative schedule pattern
monthly_relative(pattern_dict) -- set the monthly_relative schedule pattern
yearly -- gets the yearly schedule pattern
yearly(pattern_dict) -- sets the yearly schedule pattern
yearly_relative -- gets the yearly_relative schedule pattern
yearly_relative(pattern_dict) -- sets the yearly_relative schedule pattern
continuous -- gets the continuous schedule pattern
continuous(pattern_dict) -- sets the continuous schedule pattern
automatic -- gets the automatic schedule pattern
automatic(pattern_dict) -- sets the automatic schedule pattern
active_start_date -- gets the start date of schedule pattern
active_start_date(active_start_date) -- sets the start date of schedule pattern
active_start_time -- gets the start time of schedule pattern
active_start_time(active_start_time) -- sets the start time of schedule pattern
enable() -- enables the schedule
disable() -- disables the schedule
run_now() -- Triggers the schedule immediately
_modify_task_properties -- modifies the schedule properties
based on the setters
_process_schedule_update_response -- processes the response and
gives the error_code and message
refresh() -- refresh the properties of the schedule
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import datetime
from past.builtins import basestring
import calendar
from .exception import SDKException
class OperationType:
""" Operation Types supported to get schedules of particular optype"""
REPORTS = 'Reports'
DATA_AGING = 'DATA_AGING'
class SchedulePattern:
"""Class for getting the schedule pattern"""
_days_to_run = {
2: 'monday',
4: 'tuesday',
8: 'wednesday',
16: 'thursday',
32: 'friday',
64: 'saturday',
1: 'sunday',
}
_relative_weekday = {
1: 'sunday',
2: 'monday',
3: 'tuesday',
4: 'wednesday',
5: 'thursday',
6: 'friday',
7: 'saturday',
8: 'days',
9: 'weekday',
10: 'weekend_day'
}
_relative_day = {
1: 'first',
2: 'second',
3: 'third',
4: 'fourth',
5: 'last'
}
def __init__(self, schedule_pattern=None):
"""initialise object of the SchedulePattern class"""
if not schedule_pattern:
self._pattern = {'freq_type': 'Init'}
else:
self._pattern = schedule_pattern
@staticmethod
def _time_converter(_time, time_format, utc_to_epoch=True):
"""
converts a time string to epoch time based on the time format provided
Args:
_time (str/int) -- UTC time or EPOCH time
time_format (str) -- format of the time you need process
Raises:
SDKException if time format is wrong
"""
try:
if utc_to_epoch:
date_time = datetime.strptime(_time, time_format)
return int(
(date_time - datetime.utcfromtimestamp(0)).total_seconds())
utc_time = datetime.utcfromtimestamp(_time)
return utc_time.strftime(time_format)
except ValueError:
raise SDKException(
'Schedules',
'102',
"Incorrect data format, should be {0}".format(time_format))
def _pattern_json(self, pattern_option_dict):
"""
forms a pattern json and set the class variable
Args:
pattern_option_dict (dict) -- dictionary with the parameters needed for
forming the corresponding pattern
{'freq_type',
'active_start_date',
'active_start_time',
'freq_recurrence_factor',
'freq_interval'}
"""
if ('freq_type' not in pattern_option_dict) or (
pattern_option_dict['freq_type'] == self._pattern['freq_type']):
for key, value in pattern_option_dict.items():
if key in ('active_start_date', 'active_end_date'):
self._pattern[key] = self._time_converter(
pattern_option_dict[key] + ' 00:00', '%m/%d/%Y %H:%M')
elif key in ('active_start_time', 'active_end_time'):
self._pattern[key] = self._time_converter(
'1/1/1970 ' + pattern_option_dict[key], '%m/%d/%Y %H:%M')
else:
self._pattern[key] = value
else:
if pattern_option_dict['freq_type'] == 'One_Time':
default_start_time = str(datetime.now().strftime('%H:%M'))
else:
default_start_time = '09:00'
_active_start_date = pattern_option_dict.get(
'active_start_date', str(datetime.now().strftime('%m/%d/%Y')))
_active_start_time = pattern_option_dict.get(
'active_start_time', default_start_time)
self._pattern = {
'freq_type': pattern_option_dict['freq_type'],
'active_start_date': self._time_converter(
_active_start_date + ' 00:00',
'%m/%d/%Y %H:%M'),
'active_start_time': self._time_converter(
'1/1/1970 ' + _active_start_time,
'%m/%d/%Y %H:%M'),
'freq_recurrence_factor': pattern_option_dict.get(
'freq_recurrence_factor',
0),
'freq_interval': pattern_option_dict.get(
'freq_interval',
0),
'freq_relative_interval': pattern_option_dict.get(
'freq_relative_interval',
0),
'timeZone': {
'TimeZoneName': pattern_option_dict.get(
'time_zone',
'')}}
if "active_end_date" in pattern_option_dict:
self._pattern["active_end_date"] = self._time_converter(
pattern_option_dict["active_end_date"] + ' 00:00', '%m/%d/%Y %H:%M')
if "exception_dates" in pattern_option_dict:
self._pattern["repeatPattern"] = [{"exception": True,
"onDayNumber": self.exception_dates(
pattern_option_dict["exception_dates"])}
]
if "end_after" in pattern_option_dict:
self._pattern["active_end_occurence"] = pattern_option_dict["end_after"]
if "repeat_every" in pattern_option_dict:
self._pattern.update(self._repeat_pattern(pattern_option_dict))
@staticmethod
def _repeat_pattern(pattern_dict):
"""
Forms repeat pattern json based on the pattern dict provided
Args:
pattern_dict (dict) -- Dictionary containing repeat_every and repeat_end parameters
{
"repeat_every": "08:00",
"repeat_end": "23:00"
}
Returns:
Dict with subdayinterval and endtime information to plug into the pattern json
"""
_repeat_time = datetime.strptime(
pattern_dict.get(
"repeat_every", "08:00"), "%H:%M")
_freq_subday = (_repeat_time.hour * 3600 + _repeat_time.minute * 60)
return {'freq_subday_interval': _freq_subday,
'active_end_time': SchedulePattern._time_converter(
'1/1/1970 ' + pattern_dict["repeat_end"], '%m/%d/%Y %H:%M')
}
def _one_time(self, pattern_dict):
"""
sets the pattern type as one time with the parameters provided,
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictonary with the schedule pattern
{
"active_start_date": date_in_%m/%d/%y (str),
"active_start_time": time_in_%h:%m (str)
}
"""
pattern_dict['freq_type'] = 1
self._pattern_json(pattern_dict)
def _daily(self, pattern_dict):
"""
sets the pattern type as daily with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_days": days_to_repeat (int)
}
"""
_repeat_days = 1
if self._pattern['freq_type'] == 4:
_repeat_days = self._pattern['freq_recurrence_factor']
_freq_recurrence_factor = pattern_dict.get('repeat_days', _repeat_days)
pattern_dict['freq_type'] = 4
pattern_dict['freq_recurrence_factor'] = 1 if not isinstance(
_freq_recurrence_factor, int) else _freq_recurrence_factor
self._pattern_json(pattern_dict)
def _weekly(self, pattern_dict):
"""
sets the pattern type as weekly with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_weeks": weeks_to_repeat (int)
"weekdays": list of weekdays ['Monday','Tuesday']
}
"""
try:
_repeat_weeks = 1
_freq_interval = 0
if self._pattern['freq_type'] == 8:
_repeat_weeks = self._pattern['freq_recurrence_factor']
_freq_interval = self._pattern['freq_interval']
pattern_dict['freq_type'] = 8
# encoding
if 'weekdays' in pattern_dict:
_freq_interval_list = pattern_dict['weekdays']
for weekday in _freq_interval_list:
_freq_interval += (
list(
self._days_to_run.keys())[
list(
self._days_to_run.values()).index(
weekday.lower())])
elif _freq_interval == 0:
o_str = 'Weekdays need to be specified'
raise SDKException('Schedules', '102', o_str)
_freq_recurrence_factor = pattern_dict.get(
'_repeat_weeks', _repeat_weeks)
pattern_dict['freq_interval'] = _freq_interval
pattern_dict['freq_recurrence_factor'] = 1 if not isinstance(
_freq_recurrence_factor, int) else _freq_recurrence_factor
self._pattern_json(pattern_dict)
except ValueError:
raise SDKException('Schedules', '102',
"Incorrect weekday specified")
def _monthly(self, pattern_dict):
"""
sets the pattern type as monthly with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_months": months_to_repeat (int)
"on_day": Day to run schedule (int)
}
"""
_repeat_months = 1
_on_day = 10
if self._pattern['freq_type'] == 16:
_repeat_months = self._pattern['freq_recurrence_factor']
_on_day = self._pattern['freq_interval']
_freq_recurrence_factor = pattern_dict.get(
'repeat_months', _repeat_months)
_freq_interval = pattern_dict.get('on_day', _on_day)
pattern_dict['freq_recurrence_factor'] = 1 if not isinstance(
_freq_recurrence_factor, int) else _freq_recurrence_factor
pattern_dict['freq_interval'] = 1 if not isinstance(
_freq_interval, int) else _freq_interval
pattern_dict['freq_type'] = 16
self._pattern_json(pattern_dict)
def _monthly_relative(self, pattern_dict):
"""
sets the pattern type as monthly_relative with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str) 'first',
'second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"repeat_months": months_to_repeat
}
"""
_freq_recurrence_factor = 1
_freq_interval = 1
_freq_relative_interval = 1
try:
if self._pattern['freq_type'] == 32:
_freq_recurrence_factor = self._pattern['freq_recurrence_factor']
_freq_interval = self._pattern['freq_interval']
_freq_relative_interval = self._pattern['freq_relative_interval']
if 'relative_time' in pattern_dict:
_freq_relative_interval = (
list(
self._relative_day.keys())[
list(
self._relative_day.values()).index(
pattern_dict['relative_time'].lower())])
if 'relative_weekday' in pattern_dict:
_freq_interval = (
list(
self._relative_weekday.keys())[
list(
self._relative_weekday.values()).index(
pattern_dict['relative_weekday'].lower())])
_freq_recurrence_factor = pattern_dict.get(
'repeat_months', _freq_recurrence_factor)
pattern_dict['freq_recurrence_factor'] = 1 if not isinstance(
_freq_recurrence_factor, int) else _freq_recurrence_factor
pattern_dict['freq_interval'] = _freq_interval
pattern_dict['freq_relative_interval'] = _freq_relative_interval
pattern_dict['freq_type'] = 32
self._pattern_json(pattern_dict)
except ValueError as v_error:
raise SDKException('Schedules', '102',
str(v_error))
def _yearly(self, pattern_dict):
"""
sets the pattern type as monthly with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"on_month": month to run schedule (str) January, Febuary...
"on_day": Day to run schedule (int)
}
"""
try:
_freq_recurrence_factor = 1
_freq_interval = 10
if self._pattern['freq_type'] == 64:
_freq_recurrence_factor = self._pattern['freq_recurrence_factor']
_freq_interval = self._pattern['freq_interval']
if 'on_month' in pattern_dict:
_freq_recurrence_factor = list(
calendar.month_name).index(
pattern_dict['on_month'].title())
_freq_interval = pattern_dict.get('on_day', _freq_interval)
pattern_dict['freq_recurrence_factor'] = _freq_recurrence_factor
pattern_dict['freq_interval'] = 1 if not isinstance(
_freq_interval, int) else _freq_interval
pattern_dict['freq_type'] = 64
self._pattern_json(pattern_dict)
except ValueError as ve:
raise SDKException('Schedules', '102',
str(ve))
def _yearly_relative(self, pattern_dict):
"""
sets the pattern type as monthly_relative with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str) 'first',
'second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"on_month": month to run the schedule(str) January, February...
}
"""
_freq_recurrence_factor = 1
_freq_interval = 1
_freq_relative_interval = 1
try:
if self._pattern['freq_type'] == 128:
_freq_recurrence_factor = self._pattern['freq_recurrence_factor']
_freq_interval = self._pattern['freq_interval']
_freq_relative_interval = self._pattern['freq_relative_interval']
if 'relative_time' in pattern_dict:
_freq_relative_interval = (
list(
self._relative_day.keys())[
list(
self._relative_day.values()).index(
pattern_dict['relative_time'].lower())])
if 'relative_weekday' in pattern_dict:
_freq_interval = (
list(
self._relative_weekday.keys())[
list(
self._relative_weekday.values()).index(
pattern_dict['relative_weekday'].lower())])
if 'on_month' in pattern_dict:
_freq_recurrence_factor = list(
calendar.month_name).index(
pattern_dict['on_month'].title())
pattern_dict['freq_recurrence_factor'] = _freq_recurrence_factor
pattern_dict['freq_interval'] = _freq_interval
pattern_dict['freq_relative_interval'] = _freq_relative_interval
pattern_dict['freq_type'] = 128
self._pattern_json(pattern_dict)
except ValueError as ve:
raise SDKException('Schedules', '102',
str(ve))
def _continuous(self, pattern_dict):
"""
sets the pattern type as one time with the parameters provided,
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
job_interval: interval between jobs in mins(int)
}
"""
_freq_recurrence_factor = pattern_dict.get('job_interval', 30)
pattern_dict['freq_interval'] = 30 if not isinstance(
_freq_recurrence_factor, int) else _freq_recurrence_factor
pattern_dict['freq_type'] = 4096
self._pattern_json(pattern_dict)
def _automatic(self, pattern_dict):
"""
sets the pattern type as one time with the parameters provided,
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
min_interval_hours: minimum hours between jobs(int)
min_interval_minutes: minimum minutes between jobs(int)
max_interval_hours: maximum hours between jobs(int)
max_interval_minutes: maximum minutes between jobs(int)
min_sync_interval_hours: minimum sync hours
between jobs(int)
min_sync_interval_minutes: minimum sync minutes
between jobs(int)
ignore_opwindow_past_maxinterval: (bool)
wired_network_connection: (bool)
min_network_bandwidth: (int) kbps
specific_network: (dict){ip_address:(str),subnet:(int)}
dont_use_metered_network: (bool)
ac_power: (bool)
stop_if_on_battery: (bool)
stop_sleep_if_runningjob: (bool)
cpu_utilization_below : (int)%
cpu_utilization_above : (int)%
}
"""
automatic_pattern = {
"maxBackupInterval": pattern_dict.get("max_interval_hours",
self._pattern.get("maxBackupInterval", 72)),
"ignoreOpWindowPastMaxInterval": pattern_dict.get("ignore_opwindow_past_maxinterval",
self._pattern.get(
"ignoreOpWindowPastMaxInterval",
False)),
"minBackupIntervalMinutes": pattern_dict.get("min_interval_minutes",
self._pattern.get(
"minBackupIntervalMinutes", 15)),
"maxBackupIntervalMinutes": pattern_dict.get("max_interval_minutes",
self._pattern.get(
"maxBackupIntervalMinutes", 0)),
"minSyncInterval": pattern_dict.get("min_sync_interval_hours",
self._pattern.get("minSyncInterval", 0)),
"minBackupInterval": pattern_dict.get("min_interval_hours",
self._pattern.get("minBackupInterval", 0)),
"minSyncIntervalMinutes": pattern_dict.get("min_sync_interval_minutes",
self._pattern.get("minSyncIntervalMinutes",
2)),
"stopIfOnBattery": {
"enabled": pattern_dict.get("stop_if_on_battery",
self._pattern.get("stopIfOnBattery",
{'enabled': False})['enabled'])
},
"acPower": {
"enabled": pattern_dict.get("ac_power",
self._pattern.get("acPower",
{'enabled': False})['enabled'])
},
"specfificNetwork": {
"enabled": True if 'specific_network' in pattern_dict
else (self._pattern.get("specfificNetwork",
{'enabled': False})['enabled']),
"ipAddress": {
"family": 32,
"address": pattern_dict.get('specific_network',
{"ip_address": "0.0.0.0"})["ip_address"],
"subnet": pattern_dict.get('specific_network',
{"subnet": 24})["subnet"],
}
},
"stopSleepIfBackUp": {
"enabled": pattern_dict.get("stop_sleep_if_runningjob",
self._pattern.get("stopSleepIfBackUp",
{'enabled': False})['enabled'])
},
"emergencyBackup": {
"emergencyBackupCommandName": "",
"emergencyBackup": {
"enabled": False
}
},
"cpuUtilization": {
"enabled": True if 'cpu_utilization_below' in pattern_dict
else (self._pattern.get("cpuUtilization",
{'enabled': False})['enabled']),
"threshold": pattern_dict.get("cpu_utilization_below",
self._pattern.get("cpuUtilization",
{'threshold': 10})['threshold'])
},
"dontUseMeteredNetwork": {
"enabled": pattern_dict.get("dont_use_metered_network",
self._pattern.get("dontUseMeteredNetwork",
{'enabled': False})['enabled'])
},
"cpuUtilizationAbove": {
"enabled": True if 'cpu_utilization_above' in pattern_dict
else (self._pattern.get("cpuUtilizationAbove",
{'enabled': False})['enabled']),
"threshold": pattern_dict.get("cpu_utilization_above",
self._pattern.get("cpuUtilizationAbove",
{'threshold': 10})['threshold'])
},
"wiredNetworkConnection": {
"enabled": pattern_dict.get("wired_network_connection",
self._pattern.get("wiredNetworkConnection",
{'enabled': False})['enabled'])
},
"minNetworkBandwidth": {
"enabled": True if 'min_network_bandwidth' in pattern_dict
else (self._pattern.get("minNetworkBandwidth",
{'enabled': False})['enabled']),
"threshold": pattern_dict.get("min_network_bandwidth",
self._pattern.get("minNetworkBandwidth",
{'threshold': 128})['threshold'])
},
"sweepStartTime": pattern_dict.get("sweep_start_time",
self._pattern.get("sweepStartTime", 3600)
),
"useStorageSpaceFromMA": pattern_dict.get("use_storage_space_ma",
self._pattern.get("useStorageSpaceFromMA", False)
)
}
self._pattern = automatic_pattern
def _after_job_completes(self, pattern_dict):
"""
sets the pattern type as after job completes with the parameters provided,
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_date": date_in_%m/%d/%y (str),
"active_start_time": time_in_%H/%S (str),
"repeat_days": days_to_repeat (int)
}
"""
pattern_dict['freq_type'] = 'After_Job_Completes'
pattern_dict['freq_recurrence_factor'] = pattern_dict.get('repeat_days', 4096)
self._pattern_json(pattern_dict)
@staticmethod
def exception_dates(day_list):
"""
Provided a Scheduler version of exception as an on day number
Args:
day_list (list) -- List of exception dates [1,2,3]
Returns (int) -- on_day number for the pattern json
"""
on_day = 0
for value in day_list:
on_day |= (1 << (value - 1))
return on_day
def create_schedule_pattern(self, pattern_dict):
"""
calls the required type of schedule module and forms the pattern json
Args:
pattern_dict (Dict) --
freq_type is mandatory, all other fields specified below can be skipped and system
defaults will be set
for reference on pattern_dict check create_schedule
Returns:
pattern which can be plugged into the create or modify task request to
create or modify schedules
"""
if 'freq_type' not in pattern_dict:
raise SDKException('Schedules', '102',
"Frequency type is required to create pattern")
try:
getattr(
self,
'_' +
pattern_dict['freq_type'].lower())(pattern_dict)
return self._pattern
except AttributeError:
raise SDKException('Schedules', '102',
"freq_type specified is wrong")
def create_schedule(self, task_req, pattern_dict, schedule_id=None):
"""
returns a schedule task_req after including pattern
Args:
task_req: task_req for immediate job operation to be converted to a schedule
freq_type is mandatory, all other fields specified below can be skipped and system
defaults will be set
with the same dict, pass
time_zone: Time Zone Name(default is taken as COmmServe Time Zone)
Common Time Zones examples -- 'CommServe Time Zone', 'Client Time Zone', 'UTC'
for one_time: {
"freq_type": 'one_time',
"active_start_date": date_in_%m/%d/%y (str),
"active_start_time": time_in_%h:%m (str)
}
for daily: {
"freq_type": 'daily',
"active_start_time": time_in_%H/%S (str),
"repeat_days": days_to_repeat (int)
}
for weekly: {
"freq_type": 'weekly',
"active_start_time": time_in_%H/%S (str),
"repeat_weeks": weeks_to_repeat (int)
"weekdays": list of weekdays ['Monday','Tuesday']
}
for monthly: {
"freq_type": 'monthly',
"active_start_time": time_in_%H/%S (str),
"repeat_months": weeks_to_repeat (int)
"on_day": Day to run schedule (int)
}
for monthly_relative: {
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str) 'first',
'second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"repeat_months": months_to_repeat
}
for yearly: {
"active_start_time": time_in_%H/%S (str),
"on_month": month to run schedule (str) January, Febuary...
"on_day": Day to run schedule (int)
}
for yearly_relative: {
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str) 'first',
'second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"on_month": month to run the schedule(str) January, Febuary...
}
for continuous: {
job_interval: interval between jobs in mins(int)
}
for automatic: {
min_interval_hours: minimum hours between jobs(int)
min_interval_minutes: minimum minutes between jobs(int)
max_interval_hours: maximum hours between jobs(int)
max_interval_minutes: maximum minutes between jobs(int)
min_sync_interval_hours: minimum sync hours
between jobs(int)
min_sync_interval_minutes: minimum sync minutes
between jobs(int)
ignore_opwindow_past_maxinterval: (bool)
wired_network_connection: (bool)
min_network_bandwidth: (int) kbps
specific_network: (dict){ip_address:(str),subnet:(int)}
dont_use_metered_network: (bool)
ac_power: (bool)
stop_if_on_battery: (bool)
stop_sleep_if_runningjob: (bool)
cpu_utilization_below : (int)%
cpu_utilization_above : (int)%
}
for after_job_completes : {
"freq_type": 'after_job_completes',
"active_start_date": date_in_%m/%d/%y (str),
"active_start_time": time_in_%H/%S (str),
"repeat_days": days_to_repeat (int)
}
Sample Usage inside the individual operation function:
Add a schedule_pattern parameter to the function and include the below line before making
the sdk make_request call
if schedule_pattern:
request_json = SchedulePattern().create_schedule(task_req,schedule_pattern)
pattern_dict (Dict) -- schedule pattern to be merged with the task request
Returns:
Schedule task request
"""
_automatic_pattern = {}
if pattern_dict["freq_type"] == 'automatic':
_pattern = {"freq_type": 1024}
_automatic_pattern = self.create_schedule_pattern(pattern_dict)
else:
_pattern = self.create_schedule_pattern(pattern_dict)
_task_info = task_req["taskInfo"]
if _task_info.get("task"):
_task_info["task"]["taskType"] = 2
for subtask in _task_info['subTasks']:
if schedule_id:
if subtask["subTask"]['subTaskId'] != schedule_id:
continue
else:
subtask["subTask"]['subTaskName'] = pattern_dict.get(
'schedule_name', '')
subtask["pattern"] = _pattern
if pattern_dict["freq_type"] == 'automatic':
if 'options' in subtask:
_task_options = subtask['options']
if 'commonOpts' in _task_options:
_task_options["commonOpts"]["automaticSchedulePattern"] = _automatic_pattern
else:
_task_options["commonOpts"] = \
{"automaticSchedulePattern": _automatic_pattern}
if 'run_synthetic_full' in pattern_dict:
synthetic_pattern = pattern_dict['run_synthetic_full']
if synthetic_pattern == 'every_x_days':
synthetic_interval = pattern_dict.get(
'days_between_synthetic_full', 30)
else:
synthetic_interval = 30
_data_opt = {
'autoCopy': True,
'daysBetweenSyntheticBackup': synthetic_interval,
'useAutomaticIntervalForSyntheticFull': (
synthetic_pattern == 'extended_retention'),
'enableRunFullConsolidationBackup': (
synthetic_pattern == 'space_reclaim')
}
if 'backupOpts' in _task_options:
if 'dataOpt' in _task_options["backupOpts"]:
_task_options['backupOpts']['dataOpt'].update(_data_opt)
else:
_task_options['backupOpts']['dataOpt'] = _data_opt
else:
_task_options['backupOpts'] = {
'dataOpt': _data_opt
}
else:
subtask['options'] = {
'commonOpts': {
'automaticSchedulePattern': _automatic_pattern}}
task_req["taskInfo"] = _task_info
return task_req
class Schedules:
"""Class for getting the schedules of a commcell entity."""
def __init__(self, class_object, operation_type=None):
"""Initialise the Schedules class instance.
Args:
class_object(object) -- instance of client/agent/backupset/subclient/CommCell class
operation_type -- required when commcell object is passed
refer OperationType class for supported op types
Returns:
object - instance of the Schedule class
Raises:
SDKException:
if class object does not belong to any of the Client or Agent or Backupset or
Subclient class
"""
# imports inside the __init__ method definition to avoid cyclic imports
from .commcell import Commcell
from .client import Client
from .agent import Agent
from .backupset import Backupset
from .subclient import Subclient
from .instance import Instance
self.class_object = class_object
self._repr_str = ""
if isinstance(class_object, Commcell):
self._commcell_object = class_object
if operation_type == OperationType.REPORTS:
self._SCHEDULES = class_object._services['REPORT_SCHEDULES']
self._repr_str = "Reports in Commcell: {0}".format(
class_object.commserv_name)
elif operation_type == OperationType.DATA_AGING:
self._SCHEDULES = class_object._services['OPTYPE_SCHEDULES'] % (
operation_type)
self._repr_str = "Dataging in Commcell: {0}".format(
class_object.commserv_name)
elif not operation_type:
self._SCHEDULES = class_object._services['COMMCELL_SCHEDULES']
self._repr_str = "Schedules in Commcell: {0}".format(
class_object.commserv_name)
else:
raise SDKException('Schedules', '103')
elif isinstance(class_object, Client):
self._SCHEDULES = class_object._commcell_object._services['CLIENT_SCHEDULES'] % (
class_object.client_id)
self._repr_str = "Client: {0}".format(class_object.client_name)
self._commcell_object = class_object._commcell_object
elif isinstance(class_object, Agent):
self._SCHEDULES = class_object._commcell_object._services['AGENT_SCHEDULES'] % (
class_object._client_object.client_id, class_object.agent_id)
self._repr_str = "Agent: {0}".format(class_object.agent_name)
self._commcell_object = class_object._commcell_object
elif isinstance(class_object, Instance):
self._SCHEDULES = class_object._commcell_object._services['INSTANCE_SCHEDULES'] % (
class_object._agent_object._client_object.client_id,
class_object._agent_object.agent_id,
class_object.instance_id
)
self._repr_str = "Instance: {0}".format(
class_object.instance_name)
self._commcell_object = class_object._commcell_object
elif isinstance(class_object, Backupset):
self._SCHEDULES = class_object._commcell_object._services['BACKUPSET_SCHEDULES'] % (
class_object._agent_object._client_object.client_id,
class_object._agent_object.agent_id,
class_object.backupset_id
)
self._repr_str = "Backupset: {0}".format(
class_object.backupset_name)
self._commcell_object = class_object._commcell_object
elif isinstance(class_object, Subclient):
self._SCHEDULES = class_object._commcell_object._services['SUBCLIENT_SCHEDULES'] % (
class_object._backupset_object._agent_object._client_object.client_id,
class_object._backupset_object._agent_object.agent_id,
class_object._backupset_object.backupset_id,
class_object.subclient_id
)
self._repr_str = "Subclient: {0}".format(
class_object.subclient_name)
self._commcell_object = class_object._commcell_object
else:
raise SDKException('Schedules', '101')
self.schedules = None
self.refresh()
def __str__(self):
"""Representation string consisting of all schedules of the commcell entity.
Returns:
str - string of all the schedules associated with the commcell entity
"""
if self.schedules:
representation_string = '{:^5}\t{:^20}\n\n'.format(
'S. No.', 'Schedule')
for index, schedule in enumerate(self.schedules):
sub_str = '{:^5}\t{:20}\n'.format(index + 1, schedule)
representation_string += sub_str
else:
representation_string = 'No Schedules are associated to this Commcell Entity'
return representation_string.strip()
def __repr__(self):
"""Representation string for the instance of the Schedules class."""
return "Schedules class instance for {0}".format(self._repr_str)
def _get_schedules(self):
"""Gets the schedules associated with the input commcell entity.
Client / Agent / Backupset / Subclient
Returns:
dict - consists of all schedules for the commcell entity
{
"schedule_id": {
'task_id': task_id,
'schedule_name': schedule_name,
'description': description
}
"schedule_id": {
'task_id': task_id,
'schedule_name': schedule_name,
'description': description
}
}
Raises:
SDKException:
if response is not success
"""
flag, response = self._commcell_object._cvpysdk_object.make_request(
'GET', self._SCHEDULES)
if flag:
if response.json() and 'taskDetail' in response.json():
subtask_dict = {}
for schedule in response.json()['taskDetail']:
task_id = schedule['task']['taskId']
description = ''
if 'subTasks' in schedule:
for subtask in schedule['subTasks']:
schedule_id = subtask['subTask']['subTaskId']
if 'description' in subtask['subTask']:
description = subtask['pattern']['description'].lower(
)
if 'subTaskName' in subtask['subTask']:
subtask_name = subtask['subTask']['subTaskName'].lower(
)
elif description:
subtask_name = description
else:
subtask_name = str(schedule_id)
#change schedule_id as key
subtask_dict[schedule_id] = {
'task_id': task_id,
'schedule_name': subtask_name,
'description': description
}
return subtask_dict
else:
return {}
else:
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
def _get_sch_id_from_task_id(self, task_id):
"""
Gets the schedule id from the task id
Args:
task_id (int): task id of the schedule
Returns:
(int) schedule id of the schedule
"""
task_ids = [k for k, v in self.schedules.items() if v['task_id'] == task_id]
if task_ids:
return task_ids[0]
else:
raise SDKException('Schedules', '102', 'Schedule id not found for corresponding task id')
def _get_schedule_id(self, schedule_name=None, schedule_id=None, task_id=None):
"""Gets the schedule id from the provided inputs.
Args:
schedule_name (str) -- name of the schedule
schedule_id (int) -- id of the schedule
task_id (int) -- task id of the schedule
Returns:
(int) schedule id of the schedule
"""
if not task_id and not schedule_name and not schedule_id:
raise SDKException(
'Schedules',
'102',
'Either Schedule Name or Schedule Id is needed')
if schedule_name and not isinstance(schedule_name, basestring):
raise SDKException('Schedules', '102')
if schedule_id and not isinstance(schedule_id, int):
raise SDKException('Schedules', '102')
if task_id and not isinstance(task_id, int):
raise SDKException('Schedules', '102')
if schedule_name:
schedule_name = schedule_name.lower()
for subtask_id, subtask_dict in self.schedules.items():
if subtask_dict['schedule_name'] == schedule_name:
schedule_id = subtask_id
elif task_id:
schedule_id = self._get_sch_id_from_task_id(task_id)
if self.schedules and schedule_id in self.schedules:
return schedule_id
def has_schedule(self, schedule_name=None, schedule_id=None, task_id=None):
"""Checks if a schedule exists for the commcell entity with the input schedule name.
Args:
schedule_name (str) -- name of the schedule
schedule_id (int) -- id of the schedule
task_id (int) -- task id of the schedule
Returns:
bool - boolean output whether the schedule exists for the commcell entity or not
Raises:
SDKException:
if type of the schedule name argument is not string
"""
if self._get_schedule_id(schedule_name, schedule_id, task_id):
return True
return False
def get(self, schedule_name=None, schedule_id=None, task_id=None):
"""Returns a schedule object of the specified schedule name.
Args:
schedule_name (str) -- name of the Schedule
schedule_id (int) -- id of the schedule
task_id (int) -- task id of the schedule
Returns:
object - instance of the schedule class for the given schedule name
Raises:
SDKException:
if type of the schedule name argument is not string
if no schedule exists with the given name
"""
schedule_id = self._get_schedule_id(schedule_name, schedule_id, task_id)
if schedule_id:
return Schedule(self.class_object, schedule_id=schedule_id, task_id=self.schedules[schedule_id]['task_id'])
raise SDKException('Schedules','105')
def delete(self, schedule_name=None, schedule_id=None, task_id=None):
"""deletes the specified schedule name.
Args:
schedule_name (str) -- name of the Schedule
schedule_id (int) -- id of the schedule
task_id (int) -- task id of the schedule
Raises:
SDKException:
if type of the schedule name argument is not string
if no schedule exists with the given name
"""
schedule_id = self._get_schedule_id(schedule_name, schedule_id, task_id)
if schedule_id:
request_json = {
"TMMsg_TaskOperationReq":
{
"opType": 3,
"subtaskEntity":
[
{
"_type_": 68,
"subtaskId": schedule_id
}
]
}
}
modify_schedule = self._commcell_object._services['EXECUTE_QCOMMAND']
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', modify_schedule, request_json)
if flag:
if response.json():
if 'errorCode' in response.json():
if response.json()['errorCode'] == 0:
self.refresh()
else:
raise SDKException(
'Schedules', '102', response.json()['errorMessage'])
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(
response.text)
exception_message = 'Failed to delete schedule\nError: "{0}"'.format(
response_string)
raise SDKException('Schedules', '102', exception_message)
else:
raise SDKException('Schedules','105')
def refresh(self):
"""Refresh the Schedules associated with the Client / Agent / Backupset / Subclient."""
self.schedules = self._get_schedules()
class Schedule:
"""Class for performing operations for a specific Schedule."""
def __init__(self, class_object, schedule_name=None, schedule_id=None, task_id=None):
"""Initialise the Schedule class instance.
Args:
class_object (object) -- instance of Class Object
schedule_name (str) -- name of the Schedule
schedule_id (int) -- task ids of the Schedule
Returns:
object - instance of the Schedule class
"""
from .commcell import Commcell
self.class_object = class_object
if isinstance(class_object, Commcell):
self._commcell_object = class_object
else:
self._commcell_object = class_object._commcell_object
self.schedule_name = ''
if not schedule_name and not schedule_id:
raise SDKException(
'Schedules',
'102',
'Either Schedule Name or Schedule Id is needed')
if schedule_name:
self.schedule_name = schedule_name.lower()
if schedule_id:
self.schedule_id = schedule_id
else:
self.schedule_id = self._get_schedule_id()
if task_id:
self.task_id = task_id
else:
self.task_id = self._get_task_id()
self._SCHEDULE = self._commcell_object._services['SCHEDULE'] % (
self.task_id)
self._MODIFYSCHEDULE = self._commcell_object._services['EXECUTE_QCOMMAND']
self._freq_type = {
1: 'One_Time',
2: 'On_Demand',
4: 'Daily',
8: 'Weekly',
16: 'Monthly',
32: 'Monthly_Relative',
64: 'Yearly',
128: 'Yearly_Relative',
1024: 'Automatic',
4096: 'Continuous'
}
self._week_of_month = {
'1': 'First',
'2': 'Second',
'3': 'Third',
'4': 'Fourth',
'5': 'Last'
}
self.task_operation_type = {
1: 'ALL_BACKUP_JOBS',
2: 'BACKUP',
1001: ' RESTORE',
2000: 'ADMIN',
2001: 'WORK_FLOW',
4002: 'DRBACKUP',
4003: 'AUX_COPY',
4004: 'REPORT',
4018: 'DATA_AGING',
4019: 'DOWNLOAD_UPDATES',
4020: 'INSTALL_UPDATES'
}
self._criteria = {}
self._pattern = {}
self._task_options = {}
self._associations_json = {}
self._description = None
self._alert_type = None
self._sub_task_option = None
self._automatic_pattern = {}
self.refresh()
@property
def subtask_id(self):
"""
Property which returns subtask id of the schedule
Returns (int) -- Subtask id
"""
return self.schedule_id
def _get_schedule_id(self):
"""
Gets a schedule ID dict for the schedule
Returns (int) -- schedule ID
"""
schedules_obj = Schedules(self.class_object)
return schedules_obj.get(self.schedule_name).schedule_id
def _get_task_id(self):
"""
Gets a schedule ID dict for the schedule
Returns (int) -- schedule ID
"""
schedules_obj = Schedules(self.class_object)
return schedules_obj.schedules.get(self.schedule_id).get('task_id')
def _get_schedule_properties(self):
"""Gets the properties of this Schedule.
Returns:
dict - dictionary consisting of the properties of this Schedule
Raises:
SDKException:
if response is empty
if response is not success
"""
flag, response = self._commcell_object._cvpysdk_object.make_request(
'GET', self._SCHEDULE)
if flag:
if response.json() and 'taskInfo' in response.json():
_task_info = response.json()['taskInfo']
if 'associations' in _task_info:
self._associations_json = _task_info['associations']
if 'task' in _task_info:
self._task_json = _task_info['task']
for subtask in _task_info['subTasks']:
self._sub_task_option = subtask['subTask']
if self._sub_task_option['subTaskId'] == self.schedule_id:
self.schedule_name = self._sub_task_option['subTaskName']
if 'operationType' in subtask['subTask']:
self.operation_type = subtask['subTask']['operationType']
else:
continue
if 'pattern' in subtask:
self._pattern = subtask['pattern']
else:
continue
if 'options' in subtask:
self._task_options = subtask['options']
if 'commonOpts' in self._task_options:
if 'automaticSchedulePattern' in self._task_options["commonOpts"]:
self._automatic_pattern = self._task_options[
"commonOpts"]['automaticSchedulePattern']
if 'backupOpts' in self._task_options:
if 'dataOpt' in self._task_options['backupOpts']:
if isinstance(self._automatic_pattern, dict):
_data_opt = self._task_options['backupOpts']['dataOpt']
self._automatic_pattern.update(_data_opt)
else:
raise SDKException('Response', '102')
else:
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
@property
def schedule_freq_type(self):
"""
get the schedule frequency type
Returns:
(str) the schedule frequency type
"""
return self._freq_type[self._pattern['freq_type']]
@property
def one_time(self):
"""
gets the one time schedule pattern
Returns:
(dict) The schedule pattern
{
"active_start_date": date_in_%m/%d/%y (str),
"active_start_time": time_in_%h:%m (str)
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'One_Time':
return {
'active_start_date': SchedulePattern._time_converter(
self._pattern['active_start_date'],
'%m/%d/%Y', False),
'active_start_time': SchedulePattern._time_converter(
self._pattern['active_start_time'],
'%H:%M', False)
}
return False
@one_time.setter
def one_time(self, pattern_dict):
"""
sets the pattern type as one time with the parameters provided,
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictonary with the schedule pattern
{
"active_start_date": date_in_%m/%d/%y (str),
"active_start_time": time_in_%h:%m (str)
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'one_time'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def daily(self):
"""
gets the daily schedule
Returns: (dict) -- The schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_days": days_to_repeat (int)
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Daily':
return {'active_start_time': SchedulePattern._time_converter(
self._pattern['active_start_time'], '%H:%M', False),
'repeat_days': self._pattern['freq_recurrence_factor']
}
return False
@daily.setter
def daily(self, pattern_dict):
"""
sets the pattern type as daily with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_days": days_to_repeat (int)
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'daily'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def weekly(self):
"""
gets the weekly schedule
Returns (dict) -- The schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_weeks": weeks_to_repeat (int)
"weekdays": list of weekdays ['Monday','Tuesday']
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Weekly':
_freq = self._pattern['freq_interval']
return {
'active_start_time': SchedulePattern._time_converter(
self._pattern['active_start_time'],
'%H:%M',
False),
'repeat_weeks': self._pattern['freq_recurrence_factor'],
'weekdays': [
SchedulePattern._days_to_run[x] for x in list(
SchedulePattern._days_to_run.keys()) if _freq & x > 0]}
return False
@weekly.setter
def weekly(self, pattern_dict):
"""
sets the pattern type as weekly with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_weeks": weeks_to_repeat (int)
"weekdays": list of weekdays ['Monday','Tuesday']
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'weekly'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def monthly(self):
"""
gets the monthly schedule
Returns: (dict) -- the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_months": months_to_repeat (int)
"on_day": Day to run schedule (int)
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Monthly':
return {
'active_start_time': SchedulePattern._time_converter(
self._pattern['active_start_time'],
'%H:%M',
False),
'repeat_months': self._pattern['freq_recurrence_factor'],
'on_day': self._pattern['freq_interval']}
return False
@monthly.setter
def monthly(self, pattern_dict):
"""
sets the pattern type as monthly with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"repeat_months": months_to_repeat (int)
"on_day": Day to run schedule (int)
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'monthly'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def monthly_relative(self):
"""
gets the monthly_relative schedule
Returns: (dict) -- The schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str)'first','second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"repeat_months": months_to_repeat
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Monthly_Relative':
return {
'active_start_time': SchedulePattern._time_converter(
self._pattern['active_start_time'],
'%H:%M',
False),
'relative_time': SchedulePattern._relative_day[
self._pattern['freq_relative_interval']],
'relative_weekday': SchedulePattern._relative_weekday[
self._pattern['freq_interval']],
'repeat_months': self._pattern['freq_recurrence_factor']}
return False
@monthly_relative.setter
def monthly_relative(self, pattern_dict):
"""
sets the pattern type as monthly_relative with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str) 'first',
'second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"repeat_months": months_to_repeat
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'monthly_relative'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def yearly(self):
"""
gets the yearly schedule
Returns: (dict) -- The schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"on_month": month to run schedule (str) January, Febuary...
"on_day": Day to run schedule (int)
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Yearly':
return {'active_start_time':
SchedulePattern._time_converter(self._pattern['active_start_time'],
'%H:%M', False),
'on_month': calendar.month_name[self._pattern['freq_recurrence_factor']],
'on_day': self._pattern['freq_interval']
}
return False
@yearly.setter
def yearly(self, pattern_dict):
"""
sets the pattern type as monthly with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"on_month": month to run schedule (str) January, Febuary...
"on_day": Day to run schedule (int)
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'yearly'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def yearly_relative(self):
"""
gets the yearly_relative schedule
Returns: (dict) The schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str)'first','second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"on_month": month to run the schedule(str) January, Febuary...
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Yearly_Relative':
return {'active_start_time':
SchedulePattern._time_converter(self._pattern['active_start_time'],
'%H:%M', False),
'relative_time': SchedulePattern._relative_day
[self._pattern['freq_relative_interval']],
'relative_weekday': SchedulePattern._relative_weekday
[self._pattern['freq_interval']],
'on_month': calendar.month_name[self._pattern['freq_recurrence_factor']]
}
return False
@yearly_relative.setter
def yearly_relative(self, pattern_dict):
"""
sets the pattern type as monthly_relative with the parameters provided
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
"active_start_time": time_in_%H/%S (str),
"relative_time": relative day of the schedule (str) 'first',
'second',..
"relative_weekday": Day to run schedule (str) 'sunday','monday'...
"on_month": month to run the schedule(str) January, February...
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'yearly_relative'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def continuous(self):
"""
gets the continuous schedule
Returns: (dict) -- The schedule pattern
{
job_interval: interval between jobs in mins(int)
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Continuous':
return {
'job_interval': self._pattern['freq_interval']
}
return False
@continuous.setter
def continuous(self, pattern_dict):
"""
sets the pattern type as one time with the parameters provided,
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
job_interval: interval between jobs in mins(int)
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'continuous'
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def automatic(self):
"""
gets the automatic schedule
Returns: (dict) -- The schedule pattern
{
min_interval_hours: minimum hours between jobs(int)
min_interval_minutes: minimum minutes between jobs(int)
max_interval_hours: maximum hours between jobs(int)
max_interval_minutes: maximum minutes between jobs(int)
min_sync_interval_hours: minimum sync hours
between jobs(int)
min_sync_interval_minutes: minimum sync minutes
between jobs(int)
ignore_opwindow_past_maxinterval: (bool)
wired_network_connection: (bool)
min_network_bandwidth: (int) kbps
specific_network: (dict){ip_address:(str),subnet:(int)}
dont_use_metered_network: (bool)
ac_power: (bool)
stop_if_on_battery: (bool)
stop_sleep_if_runningjob: (bool)
cpu_utilization_below : (int)%
cpu_utilization_above : (int)%
run_synthetic_full : (str: every_x_days/extended_retention/
space_reclaim)
days_between_synthetic_full : (int)
}
False: if schedule type is wrong
"""
if self.schedule_freq_type == 'Automatic':
pattern = {
"min_interval_hours": self._automatic_pattern['minBackupInterval'],
"min_interval_minutes": self._automatic_pattern['minBackupIntervalMinutes'],
"max_interval_hours": self._automatic_pattern['maxBackupInterval'],
"max_interval_minutes": self._automatic_pattern['maxBackupIntervalMinutes'],
"min_sync_interval_hours": self._automatic_pattern['minSyncInterval'],
"min_sync_interval_minutes": self._automatic_pattern['minSyncIntervalMinutes'],
"ignore_opwindow_past_maxinterval": self._automatic_pattern['ignoreOpWindowPastMaxInterval'],
"wired_network_connection": self._automatic_pattern.get('wiredNetworkConnection',
{'enabled': False})['enabled'],
"min_network_bandwidth": self._automatic_pattern.get('minNetworkBandwidth',
{'enabled': False})['enabled'],
"specific_network": self._automatic_pattern.get('specfificNetwork',
{'enabled': False})['enabled'],
"dont_use_metered_network": self._automatic_pattern.get('dontUseMeteredNetwork',
{'enabled': False})['enabled'],
"ac_power": self._automatic_pattern.get('acPower',
{'enabled': False})['enabled'],
"stop_if_on_battery": self._automatic_pattern.get('stopIfOnBattery',
{'enabled': False})['enabled'],
"stop_sleep_if_runningjob": self._automatic_pattern.get('stopSleepIfBackUp',
{'enabled': False})['enabled'],
"cpu_utilization_below": self._automatic_pattern.get('cpuUtilization',
{'enabled': False})['enabled'],
"cpu_utilization_above": self._automatic_pattern.get('cpuUtilizationAbove',
{'enabled': False})['enabled'],
"run_synthetic_full": 'every_x_days'
}
if ('useAutomaticIntervalForSyntheticFull' in self._automatic_pattern and
self._automatic_pattern['useAutomaticIntervalForSyntheticFull']):
pattern['run_synthetic_full'] = 'extended_retention'
if ('enableRunFullConsolidationBackup' in self._automatic_pattern and
self._automatic_pattern['enableRunFullConsolidationBackup']):
pattern['run_synthetic_full'] = 'space_reclaim'
if ('daysBetweenSyntheticBackup' in self._automatic_pattern and
self._automatic_pattern['daysBetweenSyntheticBackup']):
pattern['days_between_synthetic_full'] = self._automatic_pattern[
'daysBetweenSyntheticBackup']
return pattern
return False
@automatic.setter
def automatic(self, pattern_dict):
"""
sets the pattern type as one time with the parameters provided,
send only required keys to change only those values
Args:
pattern_dict (dict) -- Dictionary with the schedule pattern
{
min_interval_hours: minimum hours between jobs(int)
min_interval_minutes: minimum minutes between jobs(int)
max_interval_hours: maximum hours between jobs(int)
max_interval_minutes: maximum minutes between jobs(int)
min_sync_interval_hours: minimum sync hours
between jobs(int)
min_sync_interval_minutes: minimum sync minutes
between jobs(int)
ignore_opwindow_past_maxinterval: (bool)
wired_network_connection: (bool)
min_network_bandwidth: (int) kbps
specific_network: (dict){ip_address:(str),subnet:(int)}
dont_use_metered_network: (bool)
ac_power: (bool)
stop_if_on_battery: (bool)
stop_sleep_if_runningjob: (bool)
cpu_utilization_below : (int)%
cpu_utilization_above : (int)%
run_synthetic_full : (str: every_x_days/extended_retention/
space_reclaim)
days_between_synthetic_full : (int)
}
"""
if isinstance(pattern_dict, bool):
pattern_dict = {}
pattern_dict['freq_type'] = 'automatic'
schedule_pattern = SchedulePattern(self._automatic_pattern)
self._pattern = {"freq_type": 1024}
if 'commonOpts' in self._task_options:
self._task_options["commonOpts"]["automaticSchedulePattern"] = \
schedule_pattern.create_schedule_pattern(pattern_dict)
else:
self._task_options["commonOpts"] = \
{"automaticSchedulePattern": schedule_pattern.create_schedule_pattern(
pattern_dict)}
if 'run_synthetic_full' in pattern_dict:
synthetic_pattern = pattern_dict['run_synthetic_full']
if synthetic_pattern == 'every_x_days':
synthetic_interval = pattern_dict.get(
'days_between_synthetic_full', 30)
else:
synthetic_interval = 30
_data_opt = {
'autoCopy': True,
'daysBetweenSyntheticBackup': synthetic_interval,
'useAutomaticIntervalForSyntheticFull': (
synthetic_pattern == 'extended_retention'),
'enableRunFullConsolidationBackup': (
synthetic_pattern == 'space_reclaim')
}
if 'backupOpts' in self._task_options:
if 'dataOpt' in self._task_options["backupOpts"]:
self._task_options['backupOpts']['dataOpt'].update(_data_opt)
else:
self._task_options['backupOpts']['dataOpt'] = _data_opt
else:
self._task_options['backupOpts'] = {
'dataOpt': _data_opt
}
self._modify_task_properties()
@property
def active_start_date(self):
"""
gets the start date of the schedule
Returns: (str) -- date in %m/%d/%Y
"""
return SchedulePattern._time_converter(
self._pattern['active_start_date'], '%m/%d/%Y', False)
@active_start_date.setter
def active_start_date(self, active_start_date):
"""
sets the start date of the schedule
Args:
active_start_date (str) -- date in %m/%d/%Y
"""
pattern_dict = dict()
pattern_dict['freq_type'] = self.schedule_freq_type
pattern_dict['active_start_date'] = active_start_date
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def active_start_time(self):
"""
gets the start time of the schedule
Returns: (str) -- time in %H/%S
"""
return SchedulePattern._time_converter(
self._pattern['active_start_time'], '%H:%M', False)
@active_start_time.setter
def active_start_time(self, active_start_time):
"""
sets the start time of the schedule
Args:
active_start_time (str) -- time in %H/%S
"""
pattern_dict = dict()
pattern_dict['freq_type'] = self.schedule_freq_type
pattern_dict['active_start_time'] = active_start_time
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def active_end_date(self):
"""
gets the end date of the schedule if present
Returns: (str) -- date in %m/%d/%Y
"""
if "active_end_date" in self._pattern:
if self._pattern["active_end_date"]:
return SchedulePattern._time_converter(
self._pattern['active_end_date'], '%m/%d/%Y', False)
return False
@active_end_date.setter
def active_end_date(self, active_start_date):
"""
sets the end date for the schedule
Args:
active_start_date (str) -- date in %m/%d/%Y
"""
pattern_dict = dict()
pattern_dict['freq_type'] = self.schedule_freq_type
pattern_dict['active_end_date'] = active_start_date
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def exception_dates(self):
"""
returns a list of exception days if present
Returns:
(list) -- exception days in a schedule
"""
if "repeatPattern" in self._pattern:
for repeat_pattern in self._pattern["repeatPattern"]:
if repeat_pattern.get("exception"):
_on_day_number = repeat_pattern.get("onDayNumber")
day = 1
exceptions = []
while day <= 31 and _on_day_number != 0:
if _on_day_number & 1 == 1:
exceptions.append(day)
_on_day_number = _on_day_number >> 1
day += 1
return exceptions
return False
@exception_dates.setter
def exception_dates(self, day_list):
"""
sets exception days provided as input for the schedule
Args:
day_list: (list) -- exception days to set for the schedule
"""
pattern_dict = dict()
pattern_dict['freq_type'] = self.schedule_freq_type
pattern_dict['exception_dates'] = day_list
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def end_after(self):
"""
gets the maximum occurence of the schedule if present
Returns: (int) -- end occurence
"""
return self._pattern.get("active_end_occurence", False)
@end_after.setter
def end_after(self, end_after):
"""
sets the end date for the schedule
Args:
end_after: (int) -- number of times the schedule should run
"""
pattern_dict = dict()
pattern_dict['freq_type'] = self.schedule_freq_type
pattern_dict['end_after'] = end_after
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_dict)
self._modify_task_properties()
@property
def repeat_pattern(self):
"""
gets the repeat pattern in a schedule if present
Returns: (dict) -- the repeat pattern
{
"repeat_every": repeat_every,
"repeat_end": repeat_end
}
"""
if self._pattern.get("freq_subday_interval", 0):
_subday_interval = self._pattern["freq_subday_interval"]
repeat_every = "{0}:0{1}".format(int(_subday_interval / 3600), int(
((_subday_interval / 60) - ((_subday_interval / 3600) * 60))))
repeat_end = SchedulePattern._time_converter(
self._pattern["active_end_time"], "%H:%M", utc_to_epoch=False)
return {
"repeat_every": repeat_every,
"repeat_end": repeat_end
}
return False
@repeat_pattern.setter
def repeat_pattern(self, pattern_json):
"""
sets a repeat pattern for the schedule
Args:
pattern_json: (Dict) -- containing the repeat every and repeat end parameters
"""
pattern_json['freq_type'] = self.schedule_freq_type
schedule_pattern = SchedulePattern(self._pattern)
self._pattern = schedule_pattern.create_schedule_pattern(pattern_json)
self._modify_task_properties()
def run_now(self):
"""
Triggers the schedule to run immediately
Returns: job id
"""
request_json = {
"TMMsg_TaskOperationReq":
{
"opType": 5,
"subtaskEntity":
[
{
"_type_": 68,
"subtaskId": self.subtask_id,
"taskName": "",
"subtaskName": self.schedule_name,
"taskId": self.task_id
}
],
"taskIds":
[
self.task_id
]
}
}
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', self._MODIFYSCHEDULE, request_json
)
if response.json():
if "jobIds" in response.json():
job_id = str(response.json()["jobIds"][0])
return job_id
else:
raise SDKException(
'Response', '102', 'JobID not found in response')
else:
raise SDKException('Response', '102')
def _modify_task_properties(self):
"""
modifies the task properties of the schedule
Exception:
if modification of the schedule failed
"""
request_json = {
'TMMsg_ModifyTaskReq':
{
'taskInfo':
{
'associations': self._associations_json,
'task': self._task_json,
'subTasks':
[
{
'subTask': self._sub_task_option,
'pattern': self._pattern,
'options': self._task_options
}
]
}
}
}
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', self._MODIFYSCHEDULE, request_json
)
output = self._process_schedule_update_response(flag, response)
self.refresh()
if output[0]:
return
o_str = 'Failed to update properties of Schedule\nError: "{0}"'
raise SDKException('Schedules', '102', o_str.format(output[2]))
def enable(self):
"""Enable a schedule.
Raises:
SDKException:
if failed to enable schedule
if response is empty
if response is not success
"""
enable_request = self._commcell_object._services['ENABLE_SCHEDULE']
request_text = "taskId={0}".format(self.task_id)
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', enable_request, request_text)
if flag:
if response.json():
error_code = str(response.json()['errorCode'])
if error_code == "0":
return
else:
error_message = ""
if 'errorMessage' in response.json():
error_message = response.json()['errorMessage']
if error_message:
raise SDKException(
'Schedules',
'102',
'Failed to enable Schedule\nError: "{0}"'.format(error_message))
else:
raise SDKException(
'Schedules', '102', "Failed to enable Schedule")
else:
raise SDKException('Response', '102')
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
def disable(self):
"""Disable a Schedule.
Raises:
SDKException:
if failed to disable Schedule
if response is empty
if response is not success
"""
disable_request = self._commcell_object._services['DISABLE_SCHEDULE']
request_text = "taskId={0}".format(self.task_id)
flag, response = self._commcell_object._cvpysdk_object.make_request(
'POST', disable_request, request_text)
if flag:
if response.json():
error_code = str(response.json()['errorCode'])
if error_code == "0":
return
else:
error_message = ""
if 'errorMessage' in response.json():
error_message = response.json()['errorMessage']
if error_message:
raise SDKException(
'Schedules',
'102',
'Failed to disable Schedule\nError: "{0}"'.format(error_message))
else:
raise SDKException(
'Schedules', '102', "Failed to disable Schedule")
else:
raise SDKException('Response', '102')
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
def _process_schedule_update_response(self, flag, response):
"""
processes the response received post update request
Args:
flag: (bool) -- True or false based on response
response: (dict) response from modify request
Returns:
flag: (Bool) -- based on success and failure
error_code: (int) -- error_code from response
error_message: (str) -- error_message from the response if any
"""
task_id = None
if flag:
if response.json():
if "taskId" in response.json():
task_id = str(response.json()["taskId"])
if task_id:
return True, "0", ""
elif "errorCode" in response.json():
error_code = str(response.json()['errorCode'])
error_message = response.json()['errorMessage']
if error_code == "0":
return True, "0", ""
if error_message:
return False, error_code, error_message
else:
return False, error_code, ""
else:
raise SDKException('Response', '102')
else:
raise SDKException('Response', '102')
response_string = self._commcell_object._update_response_(
response.text)
raise SDKException('Response', '101', response_string)
def refresh(self):
"""Refresh the properties of the Schedule."""
self._get_schedule_properties()
| 43.536998 | 120 | 0.496499 |
d8f951456e41a03a6f33077aa7ed7706db62d289 | 6,254 | py | Python | src/python/grpcio/grpc/_common.py | warlock135/grpc | 81e13e4fa9c0cdf7dc131ce548e1604c895b738c | [
"Apache-2.0"
] | 36,552 | 2015-02-26T17:30:13.000Z | 2022-03-31T22:41:33.000Z | src/python/grpcio/grpc/_common.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 23,536 | 2015-02-26T17:50:56.000Z | 2022-03-31T23:39:42.000Z | src/python/grpcio/grpc/_common.py | SanjanaSingh897/grpc | 2d858866eb95ce5de8ccc8c35189a12733d8ca79 | [
"Apache-2.0"
] | 11,050 | 2015-02-26T17:22:10.000Z | 2022-03-31T10:12:35.000Z | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared implementation."""
import logging
import time
import grpc
from grpc._cython import cygrpc
import six
_LOGGER = logging.getLogger(__name__)
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY = {
cygrpc.ConnectivityState.idle:
grpc.ChannelConnectivity.IDLE,
cygrpc.ConnectivityState.connecting:
grpc.ChannelConnectivity.CONNECTING,
cygrpc.ConnectivityState.ready:
grpc.ChannelConnectivity.READY,
cygrpc.ConnectivityState.transient_failure:
grpc.ChannelConnectivity.TRANSIENT_FAILURE,
cygrpc.ConnectivityState.shutdown:
grpc.ChannelConnectivity.SHUTDOWN,
}
CYGRPC_STATUS_CODE_TO_STATUS_CODE = {
cygrpc.StatusCode.ok: grpc.StatusCode.OK,
cygrpc.StatusCode.cancelled: grpc.StatusCode.CANCELLED,
cygrpc.StatusCode.unknown: grpc.StatusCode.UNKNOWN,
cygrpc.StatusCode.invalid_argument: grpc.StatusCode.INVALID_ARGUMENT,
cygrpc.StatusCode.deadline_exceeded: grpc.StatusCode.DEADLINE_EXCEEDED,
cygrpc.StatusCode.not_found: grpc.StatusCode.NOT_FOUND,
cygrpc.StatusCode.already_exists: grpc.StatusCode.ALREADY_EXISTS,
cygrpc.StatusCode.permission_denied: grpc.StatusCode.PERMISSION_DENIED,
cygrpc.StatusCode.unauthenticated: grpc.StatusCode.UNAUTHENTICATED,
cygrpc.StatusCode.resource_exhausted: grpc.StatusCode.RESOURCE_EXHAUSTED,
cygrpc.StatusCode.failed_precondition: grpc.StatusCode.FAILED_PRECONDITION,
cygrpc.StatusCode.aborted: grpc.StatusCode.ABORTED,
cygrpc.StatusCode.out_of_range: grpc.StatusCode.OUT_OF_RANGE,
cygrpc.StatusCode.unimplemented: grpc.StatusCode.UNIMPLEMENTED,
cygrpc.StatusCode.internal: grpc.StatusCode.INTERNAL,
cygrpc.StatusCode.unavailable: grpc.StatusCode.UNAVAILABLE,
cygrpc.StatusCode.data_loss: grpc.StatusCode.DATA_LOSS,
}
STATUS_CODE_TO_CYGRPC_STATUS_CODE = {
grpc_code: cygrpc_code for cygrpc_code, grpc_code in six.iteritems(
CYGRPC_STATUS_CODE_TO_STATUS_CODE)
}
MAXIMUM_WAIT_TIMEOUT = 0.1
_ERROR_MESSAGE_PORT_BINDING_FAILED = 'Failed to bind to address %s; set ' \
'GRPC_VERBOSITY=debug environment variable to see detailed error message.'
def encode(s):
if isinstance(s, bytes):
return s
else:
return s.encode('utf8')
def decode(b):
if isinstance(b, bytes):
return b.decode('utf-8', 'replace')
return b
def _transform(message, transformer, exception_message):
if transformer is None:
return message
else:
try:
return transformer(message)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(exception_message)
return None
def serialize(message, serializer):
return _transform(message, serializer, 'Exception serializing message!')
def deserialize(serialized_message, deserializer):
return _transform(serialized_message, deserializer,
'Exception deserializing message!')
def fully_qualified_method(group, method):
return '/{}/{}'.format(group, method)
def _wait_once(wait_fn, timeout, spin_cb):
wait_fn(timeout=timeout)
if spin_cb is not None:
spin_cb()
def wait(wait_fn, wait_complete_fn, timeout=None, spin_cb=None):
"""Blocks waiting for an event without blocking the thread indefinitely.
See https://github.com/grpc/grpc/issues/19464 for full context. CPython's
`threading.Event.wait` and `threading.Condition.wait` methods, if invoked
without a timeout kwarg, may block the calling thread indefinitely. If the
call is made from the main thread, this means that signal handlers may not
run for an arbitrarily long period of time.
This wrapper calls the supplied wait function with an arbitrary short
timeout to ensure that no signal handler has to wait longer than
MAXIMUM_WAIT_TIMEOUT before executing.
Args:
wait_fn: A callable acceptable a single float-valued kwarg named
`timeout`. This function is expected to be one of `threading.Event.wait`
or `threading.Condition.wait`.
wait_complete_fn: A callable taking no arguments and returning a bool.
When this function returns true, it indicates that waiting should cease.
timeout: An optional float-valued number of seconds after which the wait
should cease.
spin_cb: An optional Callable taking no arguments and returning nothing.
This callback will be called on each iteration of the spin. This may be
used for, e.g. work related to forking.
Returns:
True if a timeout was supplied and it was reached. False otherwise.
"""
if timeout is None:
while not wait_complete_fn():
_wait_once(wait_fn, MAXIMUM_WAIT_TIMEOUT, spin_cb)
else:
end = time.time() + timeout
while not wait_complete_fn():
remaining = min(end - time.time(), MAXIMUM_WAIT_TIMEOUT)
if remaining < 0:
return True
_wait_once(wait_fn, remaining, spin_cb)
return False
def validate_port_binding_result(address, port):
"""Validates if the port binding succeed.
If the port returned by Core is 0, the binding is failed. However, in that
case, the Core API doesn't return a detailed failing reason. The best we
can do is raising an exception to prevent further confusion.
Args:
address: The address string to be bound.
port: An int returned by core
"""
if port == 0:
# The Core API doesn't return a failure message. The best we can do
# is raising an exception to prevent further confusion.
raise RuntimeError(_ERROR_MESSAGE_PORT_BINDING_FAILED % address)
else:
return port
| 37.005917 | 80 | 0.731532 |
1b99f6486e850674169de5d480aa11ac677cd161 | 3,948 | py | Python | src/core/settings.py | aminul91/tutorsitelinks | 990a48056fc148c34da32586677e6f56801a6ec8 | [
"MIT"
] | null | null | null | src/core/settings.py | aminul91/tutorsitelinks | 990a48056fc148c34da32586677e6f56801a6ec8 | [
"MIT"
] | null | null | null | src/core/settings.py | aminul91/tutorsitelinks | 990a48056fc148c34da32586677e6f56801a6ec8 | [
"MIT"
] | null | null | null | # flake8: noqa
"""
Django settings for core project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from decouple import config
from pathlib import Path
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATE_DIR = BASE_DIR / 'templates'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('PARAM')
# SECURITY WARNING: don't run with debug turned on in production!
# True for development server, false for production server.
try:
DEBUG = config('DEBUG', cast=bool)
except Exception:
DEBUG = True
try:
DEV = config('DEV', cast=bool)
except Exception:
DEV = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'app',
'rest_framework.authtoken'
]
MIDDLEWARE = [
# Part of base
'django.middleware.security.SecurityMiddleware',
# From WhiteNoise to serve static files in Heroku from Django
# Remove this if you are using S3
'whitenoise.middleware.WhiteNoiseMiddleware',
# Part of base
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'core.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if DEV:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
else:
DATABASES = {}
DATABASES['default'] = dj_database_url.config(conn_max_age=600)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [BASE_DIR / 'static_files']
STATIC_ROOT = BASE_DIR / 'static'
# Media files (Uploaded by the users)
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR / 'media'
| 25.470968 | 91 | 0.694022 |
fa1523800c88d601543fc5cf43ab857484c45c2c | 7,179 | py | Python | framework/SupervisedLearning/ScikitLearn/DiscriminantAnalysis/LinearDiscriminantAnalysis.py | dylanjm/raven | 7262bc3564da08dbb7bd76892b6435d9ce48256b | [
"Apache-2.0"
] | 159 | 2017-03-24T21:07:06.000Z | 2022-03-20T13:44:40.000Z | framework/SupervisedLearning/ScikitLearn/DiscriminantAnalysis/LinearDiscriminantAnalysis.py | idaholab/raven | e989b5f0b14b0e6f0cde20c22f24b76c5136c3eb | [
"Apache-2.0"
] | 1,667 | 2017-03-27T14:41:22.000Z | 2022-03-31T19:50:06.000Z | framework/SupervisedLearning/ScikitLearn/DiscriminantAnalysis/LinearDiscriminantAnalysis.py | wanghy-anl/raven | ef1372364a2776385931763f2b28fdf2930c77b9 | [
"Apache-2.0"
] | 95 | 2017-03-24T21:05:03.000Z | 2022-03-08T17:30:22.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jan 21, 2020
@author: alfoa, wangc
LinearDiscriminantAnalysis
Classifier implementing Discriminant Analysis (Linear) classification
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from SupervisedLearning.ScikitLearn import ScikitLearnBase
from utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class LinearDiscriminantAnalysisClassifier(ScikitLearnBase):
"""
KNeighborsClassifier
Classifier implementing the k-nearest neighbors vote.
"""
info = {'problemtype':'classification', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
import sklearn
import sklearn.discriminant_analysis
import sklearn.multioutput
# we wrap the model with the multi output classifier (for multitarget)
self.model = sklearn.multioutput.MultiOutputClassifier(sklearn.discriminant_analysis.LinearDiscriminantAnalysis())
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super(LinearDiscriminantAnalysisClassifier, cls).getInputSpecification()
specs.description = r"""The \xmlNode{LinearDiscriminantAnalysisClassifier} is a classifier with a linear decision boundary,
generated by fitting class conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input by projecting it to the most discriminative
directions, using the transform method.
\zNormalizationNotPerformed{LinearDiscriminantAnalysisClassifier}
"""
specs.addSub(InputData.parameterInputFactory("solver", contentType=InputTypes.StringType,
descr=r"""Solver to use, possible values:
\begin{itemize}
\item svd: Singular value decomposition (default). Does not compute the covariance matrix,
therefore this solver is recommended for data with a large number of features.
\item lsqr: Least squares solution. Can be combined with shrinkage or custom covariance estimator.
\item eigen: Eigenvalue decomposition. Can be combined with shrinkage or custom covariance estimator.
\end{itemize}
""", default='svd'))
specs.addSub(InputData.parameterInputFactory("Shrinkage", contentType=InputTypes.FloatOrStringType,
descr=r"""Shrinkage parameter, possible values: 1) None: no shrinkage (default),
2) `auto': automatic shrinkage using the Ledoit-Wolf lemma,
3) float between 0 an d1: fixed shrinkage parameter.
This should be left to None if covariance_estimator is used. Note that shrinkage works
only with `lsqr' and `eigen' solvers.""", default=None))
specs.addSub(InputData.parameterInputFactory("priors", contentType=InputTypes.FloatListType,
descr=r"""The class prior probabilities. By default, the class proportions are inferred from the training data.""", default=None))
specs.addSub(InputData.parameterInputFactory("n_components", contentType=InputTypes.IntegerType,
descr=r"""Number of components (<= min(n\_classes - 1, n\_features)) for dimensionality reduction.
If None, will be set to min(n\_classes - 1, n\_features). This parameter only affects the transform
method.""", default=None))
specs.addSub(InputData.parameterInputFactory("store_covariance", contentType=InputTypes.BoolType,
descr=r"""If True, explicitely compute the weighted within-class covariance matrix when solver
is `svd'. The matrix is always computed and stored for the other solvers.""", default=False))
specs.addSub(InputData.parameterInputFactory("tol", contentType=InputTypes.FloatType,
descr=r"""Absolute threshold for a singular value of X to be considered significant, used to estimate the rank of X.
Dimensions whose singular values are non-significant are discarded. Only used if solver is `svd'.""", default=1.0e-4))
specs.addSub(InputData.parameterInputFactory("covariance_estimator", contentType=InputTypes.IntegerType,
descr=r"""covariance estimator (not supported)""", default=None))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['solver', 'Shrinkage', 'priors',
'n_components', 'store_covariance','tol', 'covariance_estimator'])
# notFound must be empty
assert(not notFound)
self.initializeModel(settings)
| 63.530973 | 179 | 0.594512 |
daf0714eeeed6a3c655fafa2c6c20d37df278671 | 463 | py | Python | tests/local/unit/test_wheel_build.py | git-akihakune/pilapse | 2e2cb99e074b5b234c3d8816d421e3d24909e2e6 | [
"MIT"
] | null | null | null | tests/local/unit/test_wheel_build.py | git-akihakune/pilapse | 2e2cb99e074b5b234c3d8816d421e3d24909e2e6 | [
"MIT"
] | null | null | null | tests/local/unit/test_wheel_build.py | git-akihakune/pilapse | 2e2cb99e074b5b234c3d8816d421e3d24909e2e6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import pytest
# to make sure this runs from root directory
def test_cwd():
currentPath = os.getcwd()
# Get the name of current directory only
currentDir = currentPath[currentPath.rfind('/') + 1:]
if currentDir != "pilapse" or "pilapse" not in os.listdir(currentPath):
pytest.raises("Please execute tests from project's root directory.")
def test_wheel_build():
os.system('pip wheel . -w wheels') | 24.368421 | 76 | 0.693305 |
216e766df7da67e48f71ad2560a0e4cdc2c370cc | 4,990 | py | Python | src/nonogram_solver/basic/brute_force.py | MikeJongen/nonogram-solver | b174fe6c693c1ace3918bf19a9110165a5b42db9 | [
"MIT"
] | null | null | null | src/nonogram_solver/basic/brute_force.py | MikeJongen/nonogram-solver | b174fe6c693c1ace3918bf19a9110165a5b42db9 | [
"MIT"
] | null | null | null | src/nonogram_solver/basic/brute_force.py | MikeJongen/nonogram-solver | b174fe6c693c1ace3918bf19a9110165a5b42db9 | [
"MIT"
] | null | null | null | from nonogram_solver.nonogram import Nonogram
from nonogram_solver.nonogram import Row
class BruteForceSolver(Nonogram):
"""Brute force solver class
Performance heavy algorithm, use as last option
"""
def init_row_solvers(self):
Nonogram.init_row_solvers(self, BruteForceRowSolver)
def solve(self):
"""
Returns True if anything was changed to the solution.
"""
return self.solve_single_iteration(BruteForceRowSolver.solve_brute_force)
class BruteForceRowSolver(Row):
def solve_brute_force(self, maximum_solutions=100000):
"""
RowSolver for BruteForceSolver class
Tests every possible solution.
maximum_solutions : int
only solve if total number of solutions is lower than this.
returnvalue : Bool
A bool to indicate if the row has been changed. (True if changed)
"""
if self.solved:
return False
number_of_solutions = self._get_number_of_solutions()
if number_of_solutions > maximum_solutions:
return False
all_solutions = self._get_all_solutions()
new_solution = None
for solution in all_solutions:
if self._check_solution(solution):
if new_solution is None:
new_solution = solution
continue
new_solution = self._get_matching_solution(
new_solution, solution)
if self.values == new_solution:
return False
if new_solution is not None:
self.values = new_solution
return True
def _get_number_of_solutions(self):
"""
Returns the number of possible solutions for the row.
"""
movement_space = self.size - self.clue_size
return self._number_of_solutions(movement_space, len(self.clues))
def _number_of_solutions(self, movement_space, no_clues):
if no_clues == 1:
return movement_space + 1
elif no_clues == 0:
return 1
else:
solutions = 0
for i in range(movement_space + 1):
solutions += self._number_of_solutions(i, no_clues - 1)
return solutions
def _get_all_solutions(self):
"""
Returns a list with all possible solutions for the row
"""
solutions_list = []
return self._list_of_solutions(solutions_list, [], self.clues, self.size)
def _list_of_solutions(self, total, start, clue, size):
if len(clue) == 0:
# empty row. Return one solution with all empty cells
total.append([-1] * size)
return total
elif len(clue) == 1:
# one clue left. Check empty cell count, and move clue over these cells
empty_cells = size - clue[0] + 1
for empty_start_cells in range(empty_cells):
solution = start[:]
solution += [-1] * empty_start_cells
solution += [1] * clue[0]
solution += [-1] * (size - clue[0] - empty_start_cells)
total.append(solution)
return total
else:
# Multiple clues left. Check empty cell count, and move first clue over these cells
# Then, recursively call this function to find the possible positions of the other clues
empty_cells = size - clue[0] + 1
for empty_start_cells in range(empty_cells):
solution = start[:]
solution += [-1] * empty_start_cells
solution += [1] * clue[0]
solution += [-1]
new_size = size - clue[0] - empty_start_cells - 1
total = self._list_of_solutions(total, solution,
clue[1:], new_size)
return total
def _check_solution(self, solution):
"""
Checks if the solution given fits the current solution of the row.
Assumes solution has no unknown values.
Does not check if solution fits clues.
Returns True if solution fits (no differences in known values)
Returns False if solution does not fit
"""
for new_value, current_value in zip(solution, self.values):
if current_value == 0:
continue
elif new_value == current_value:
continue
return False
return True
def _get_matching_solution(self, solution1, solution2):
"""
Returns a combination of two solutions.
All common cells stay the same, different cells are changed to unknown.
Assumes solutions have no unknown values.
"""
solution = []
for value1, value2 in zip(solution1, solution2):
if value1 == value2:
solution.append(value1)
else:
solution.append(0)
return solution
| 34.652778 | 100 | 0.586774 |
1019383b057620d33ef417d50a60b8ce8e88bd00 | 20,498 | py | Python | outliers_removal/pcpnet.py | GiladGH/pointcleannet | 1c7f8f00c2062923063de2f796d96e444476d698 | [
"MIT"
] | 1 | 2019-09-30T03:33:47.000Z | 2019-09-30T03:33:47.000Z | outliers_removal/pcpnet.py | GiladGH/pointcleannet | 1c7f8f00c2062923063de2f796d96e444476d698 | [
"MIT"
] | null | null | null | outliers_removal/pcpnet.py | GiladGH/pointcleannet | 1c7f8f00c2062923063de2f796d96e444476d698 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torch.nn.functional as F
from torch.autograd import Variable
import utils
class STN(nn.Module):
def __init__(self, num_scales=1, num_points=500, dim=3, sym_op='max', quaternion =False):
super(STN, self).__init__()
self.quaternion = quaternion
self.dim = dim
self.sym_op = sym_op
self.num_scales = num_scales
self.num_points = num_points
self.conv1 = torch.nn.Conv1d(self.dim, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
if not quaternion:
self.fc3 = nn.Linear(256, self.dim*self.dim)
else:
self.fc3 = nn.Linear(256, 4)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
if self.num_scales > 1:
self.fc0 = nn.Linear(1024*self.num_scales, 1024)
self.bn0 = nn.BatchNorm1d(1024)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
# symmetric operation over all points
if self.num_scales == 1:
x = self.mp1(x)
else:
if x.is_cuda:
x_scales = Variable(torch.cuda.FloatTensor(x.size(0), 1024*self.num_scales, 1))
else:
x_scales = Variable(torch.FloatTensor(x.size(0), 1024*self.num_scales, 1))
for s in range(self.num_scales):
x_scales[:, s*1024:(s+1)*1024, :] = self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
x = x_scales
x = x.view(-1, 1024*self.num_scales)
if self.num_scales > 1:
x = F.relu(self.bn0(self.fc0(x)))
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
if not self.quaternion:
iden = Variable(torch.from_numpy(np.identity(self.dim, 'float32')).clone()).view(1, self.dim*self.dim).repeat(batchsize, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.dim, self.dim)
else:
# add identity quaternion (so the network can output 0 to leave the point cloud identical)
iden = Variable(torch.FloatTensor([1, 0, 0, 0]))
if x.is_cuda:
iden = iden.cuda()
x = x + iden
# convert quaternion to rotation matrix
if x.is_cuda:
trans = Variable(torch.cuda.FloatTensor(batchsize, 3, 3))
else:
trans = Variable(torch.FloatTensor(batchsize, 3, 3))
x = utils.batch_quat_to_rotmat(x, trans)
return x
class PointNetfeat(nn.Module):
def __init__(self, num_scales=1, num_points=500, use_point_stn=True, use_feat_stn=True, sym_op='max', get_pointfvals=False, point_tuple=1):
super(PointNetfeat, self).__init__()
self.num_points = num_points
self.num_scales = num_scales
self.use_point_stn = use_point_stn
self.use_feat_stn = use_feat_stn
self.sym_op = sym_op
self.get_pointfvals = get_pointfvals
self.point_tuple = point_tuple
if self.use_point_stn:
# self.stn1 = STN(num_scales=self.num_scales, num_points=num_points, dim=3, sym_op=self.sym_op)
self.stn1 = STN(num_scales=self.num_scales, num_points=num_points*self.point_tuple, dim=3, sym_op=self.sym_op, quaternion = True)
if self.use_feat_stn:
self.stn2 = STN(num_scales=self.num_scales, num_points=num_points, dim=64, sym_op=self.sym_op)
self.conv0a = torch.nn.Conv1d(3*self.point_tuple, 64, 1)
self.conv0b = torch.nn.Conv1d(64, 64, 1)
# TODO remove
# self.conv0c = torch.nn.Conv1d(64, 64, 1)
# self.bn0c = nn.BatchNorm1d(64)
# self.conv1b = torch.nn.Conv1d(64, 64, 1)
# self.bn1b = nn.BatchNorm1d(64)
self.bn0a = nn.BatchNorm1d(64)
self.bn0b = nn.BatchNorm1d(64)
self.conv1 = torch.nn.Conv1d(64, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
if self.num_scales > 1:
self.conv4 = torch.nn.Conv1d(1024, 1024*self.num_scales, 1)
self.bn4 = nn.BatchNorm1d(1024*self.num_scales)
if self.sym_op == 'max':
self.mp1 = torch.nn.MaxPool1d(num_points)
elif self.sym_op == 'sum':
self.mp1 = None
else:
raise ValueError('Unsupported symmetric operation: %s' % (self.sym_op))
def forward(self, x):
# input transform
if self.use_point_stn:
# from tuples to list of single points
x = x.view(x.size(0), 3, -1)
trans = self.stn1(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = x.contiguous().view(x.size(0), 3*self.point_tuple, -1)
else:
trans = None
# mlp (64,64)
x = F.relu(self.bn0a(self.conv0a(x)))
x = F.relu(self.bn0b(self.conv0b(x)))
# TODO remove
#x = F.relu(self.bn0c(self.conv0c(x)))
# feature transform
if self.use_feat_stn:
trans2 = self.stn2(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans2)
x = x.transpose(2, 1)
else:
trans2 = None
# mlp (64,128,1024)
x = F.relu(self.bn1(self.conv1(x)))
# TODO remove
#x = F.relu(self.bn1b(self.conv1b(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.bn3(self.conv3(x))
# mlp (1024,1024*num_scales)
if self.num_scales > 1:
x = self.bn4(self.conv4(F.relu(x)))
if self.get_pointfvals:
pointfvals = x
else:
pointfvals = None # so the intermediate result can be forgotten if it is not needed
# symmetric max operation over all points
if self.num_scales == 1:
if self.sym_op == 'max':
x = self.mp1(x)
elif self.sym_op == 'sum':
x = torch.sum(x, 2, keepdim=True)
else:
raise ValueError('Unsupported symmetric operation: %s' % (self.sym_op))
else:
if x.is_cuda:
x_scales = Variable(torch.cuda.FloatTensor(x.size(0), 1024*self.num_scales**2, 1))
else:
x_scales = Variable(torch.FloatTensor(x.size(0), 1024*self.num_scales**2, 1))
if self.sym_op == 'max':
for s in range(self.num_scales):
x_scales[:, s*self.num_scales*1024:(s+1)*self.num_scales*1024, :] = self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
elif self.sym_op == 'sum':
for s in range(self.num_scales):
x_scales[:, s*self.num_scales*1024:(s+1)*self.num_scales*1024, :] = torch.sum(x[:, :, s*self.num_points:(s+1)*self.num_points], 2, keepdim=True)
else:
raise ValueError('Unsupported symmetric operation: %s' % (self.sym_op))
x = x_scales
x = x.view(-1, 1024*self.num_scales**2)
return x, trans, trans2, pointfvals
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, conv = False):
super(BasicBlock, self).__init__()
if conv:
self.l1 = torch.nn.Conv1d(in_planes, planes, 1)
self.l2 = torch.nn.Conv1d(planes, planes, 1)
# self.l1.weight.data.fill_(0.0)
# self.l2.weight.data.fill_(0.0)
# self.l1.bias.data.fill_(0.0)
# self.l2.bias.data.fill_(0.0)
else:
self.l1 = nn.Linear(in_planes,planes)
self.l2 = nn.Linear(planes, planes)
# self.l1.weight.data.fill_(0.0)
# self.l2.weight.data.fill_(0.0)
# self.l1.bias.data.fill_(0.0)
# self.l2.bias.data.fill_(0.0)
stdv = 0.001 # for working small initialisation
# self.l1.weight.data.uniform_(-stdv, stdv)
# self.l1.weight.data.uniform_(-stdv, stdv)
# self.l2.weight.data.uniform_(-stdv, stdv)
# self.l1.bias.data.uniform_(-stdv, stdv)
# self.l2.bias.data.uniform_(-stdv, stdv)
self.bn1 = nn.BatchNorm1d(planes, momentum = 0.01)
self.shortcut = nn.Sequential()
if in_planes != planes:
if conv:
self.l0 = nn.Conv1d(in_planes, planes, 1)
else:
self.l0 = nn.Linear(in_planes, planes)
# self.l0.weight.data.uniform_(-stdv, stdv)
# self.l0.bias.data.uniform_(-stdv, stdv)
self.shortcut = nn.Sequential(self.l0,nn.BatchNorm1d(planes))
self.bn2 = nn.BatchNorm1d(planes, momentum = 0.01)
def forward(self, x):
out = F.relu(self.bn1(self.l1(x)))
out = self.bn2(self.l2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResSTN(nn.Module):
def __init__(self, num_scales=1, num_points=500, dim=3, sym_op='max', quaternion =False):
super(ResSTN, self).__init__()
self.quaternion = quaternion
self.dim = dim
self.sym_op = sym_op
self.num_scales = num_scales
self.num_points = num_points
self.b1 = BasicBlock(self.dim, 64, conv = True)
self.b2 = BasicBlock(64, 128, conv = True)
self.b3 = BasicBlock(128, 1024, conv = True)
self.mp1 = torch.nn.MaxPool1d(num_points)
self.bfc1 = BasicBlock(1024, 512)
self.bfc2 = BasicBlock(512, 256)
if not quaternion:
self.bfc3 = BasicBlock(256, self.dim*self.dim)
else:
self.bfc3 = BasicBlock(256, 4)
if self.num_scales > 1:
self.bfc0 = BasicBlock(1024*self.num_scales, 1024)
def forward(self, x):
batchsize = x.size()[0]
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
# symmetric operation over all points
if self.num_scales == 1:
x = self.mp1(x)
else:
if x.is_cuda:
x_scales = Variable(torch.cuda.FloatTensor(x.size(0), 1024*self.num_scales, 1))
else:
x_scales = Variable(torch.FloatTensor(x.size(0), 1024*self.num_scales, 1))
for s in range(self.num_scales):
x_scales[:, s*1024:(s+1)*1024, :] = self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
x = x_scales
x = x.view(-1, 1024*self.num_scales)
if self.num_scales > 1:
x = self.bfc0(x)
x =self.bfc1(x)
x = self.bfc2(x)
x = self.bfc3(x)
if not self.quaternion:
iden = Variable(torch.from_numpy(np.identity(self.dim, 'float32')).clone()).view(1, self.dim*self.dim).repeat(batchsize, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.dim, self.dim)
else:
# add identity quaternion (so the network can output 0 to leave the point cloud identical)
iden = Variable(torch.FloatTensor([1, 0, 0, 0]))
if x.is_cuda:
iden = iden.cuda()
x = x + iden
# convert quaternion to rotation matrix
if x.is_cuda:
trans = Variable(torch.cuda.FloatTensor(batchsize, 3, 3))
else:
trans = Variable(torch.FloatTensor(batchsize, 3, 3))
x = utils.batch_quat_to_rotmat(x, trans)
return x
class ResPointNetfeat(nn.Module):
def __init__(self, num_scales=1, num_points=500, use_point_stn=True, use_feat_stn=True, sym_op='max', get_pointfvals=False, point_tuple=1):
super(ResPointNetfeat, self).__init__()
self.num_points = num_points
self.num_scales = num_scales
self.use_point_stn = use_point_stn
self.use_feat_stn = use_feat_stn
self.sym_op = sym_op
self.get_pointfvals = get_pointfvals
self.point_tuple = point_tuple
if self.use_point_stn:
# self.stn1 = STN(num_scales=self.num_scales, num_points=num_points, dim=3, sym_op=self.sym_op)
self.stn1 = ResSTN(num_scales=self.num_scales, num_points=num_points*self.point_tuple, dim=3, sym_op=self.sym_op, quaternion=True)
if self.use_feat_stn:
self.stn2 = ResSTN(num_scales=self.num_scales, num_points=num_points, dim=64, sym_op=self.sym_op)
self.b0a = BasicBlock(3*self.point_tuple, 64, conv = True)
self.b0b = BasicBlock(64, 64, conv=True)
self.b1 = BasicBlock(64, 64, conv = True)
self.b2 = BasicBlock(64, 128, conv = True)
self.b3 = BasicBlock(128, 1024, conv = True)
if self.num_scales > 1:
self.b4 = BasicBlock(1024, 1024*self.num_scs, conv = True)
if self.sym_op == 'max':
self.mp1 = torch.nn.MaxPool1d(num_points)
elif self.sym_op == 'sum':
self.mp1 = None
else:
raise ValueError('Unsupported symmetric operation: %s' % (self.sym_op))
def forward(self, x):
# input transform
if self.use_point_stn:
# from tuples to list of single points
x = x.view(x.size(0), 3, -1)
trans = self.stn1(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans)
x = x.transpose(2, 1)
x = x.contiguous().view(x.size(0), 3*self.point_tuple, -1)
else:
trans = None
# mlp (64,64)
x = self.b0a(x)
x = self.b0b(x)
# feature transform
if self.use_feat_stn:
trans2 = self.stn2(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans2)
x = x.transpose(2, 1)
else:
trans2 = None
# mlp (64,128,1024)
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
# mlp (1024,1024*num_scales)
if self.num_scales > 1:
x = self.b4(x)
if self.get_pointfvals:
pointfvals = x
else:
pointfvals = None # so the intermediate result can be forgotten if it is not needed
# symmetric max operation over all points
if self.num_scales == 1:
if self.sym_op == 'max':
x = self.mp1(x)
elif self.sym_op == 'sum':
x = torch.sum(x, 2, keepdim=True)
else:
raise ValueError('Unsupported symmetric operation: %s' % (self.sym_op))
else:
if x.is_cuda:
x_scales = Variable(torch.cuda.FloatTensor(x.size(0), 1024*self.num_scales**2, 1))
else:
x_scales = Variable(torch.FloatTensor(x.size(0), 1024*self.num_scales**2, 1))
if self.sym_op == 'max':
for s in range(self.num_scales):
x_scales[:, s*self.num_scales*1024:(s+1)*self.num_scales*1024, :] = self.mp1(x[:, :, s*self.num_points:(s+1)*self.num_points])
elif self.sym_op == 'sum':
for s in range(self.num_scales):
x_scales[:, s*self.num_scales*1024:(s+1)*self.num_scales*1024, :] = torch.sum(x[:, :, s*self.num_points:(s+1)*self.num_points], 2, keepdim=True)
else:
raise ValueError('Unsupported symmetric operation: %s' % (self.sym_op))
x = x_scales
x = x.view(-1, 1024*self.num_scales**2)
return x, trans, trans2, pointfvals
class ResPCPNet(nn.Module):
def __init__(self, num_points=500, output_dim=3, use_point_stn=True, use_feat_stn=True, sym_op='max', get_pointfvals=False, point_tuple=1):
super(ResPCPNet, self).__init__()
self.num_points = num_points
self.feat = ResPointNetfeat(
num_points=num_points,
num_scales=1,
use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn,
sym_op=sym_op,
get_pointfvals=get_pointfvals,
point_tuple=point_tuple)
self.b1 = BasicBlock(1024, 512)
self.b2 = BasicBlock(512, 256)
self.b3 = BasicBlock(256, output_dim)
def forward(self, x):
x, trans, trans2, pointfvals = self.feat(x)
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
return x, trans, trans2, pointfvals
class ResMSPCPNet(nn.Module):
def __init__(self, num_scales=2, num_points=500, output_dim=3, use_point_stn=True, use_feat_stn=True, sym_op='max', get_pointfvals=False, point_tuple=1):
super(ResMSPCPNet, self).__init__()
self.num_points = num_points
self.feat = ResPointNetfeat(
num_points=num_points,
num_scales=num_scales,
use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn,
sym_op=sym_op,
get_pointfvals=get_pointfvals,
point_tuple=point_tuple)
self.b0 = BasicBlock(1024*num_scales**2, 1024)
self.b1 = BasicBlock(1024, 512)
self.b2 = BasicBlock(512, 256)
self.b3 = BasicBlock(256, output_dim)
def forward(self, x):
x, trans, trans2, pointfvals = self.feat(x)
x = self.b0(x)
x = self.b1(x)
x = self.b2(x)
x = self.b3(x)
return x, trans, trans2, pointfvals
class PCPNet(nn.Module):
def __init__(self, num_points=500, output_dim=3, use_point_stn=True, use_feat_stn=True, sym_op='max', get_pointfvals=False, point_tuple=1):
super(PCPNet, self).__init__()
self.num_points = num_points
self.feat = PointNetfeat(
num_points=num_points,
num_scales=1,
use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn,
sym_op=sym_op,
get_pointfvals=get_pointfvals,
point_tuple=point_tuple)
self.fc1 = nn.Linear(1024, 512)
#self.fc_new = nn.Linear(512, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, output_dim)
self.bn1 = nn.BatchNorm1d(512)
#self.bn_new = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.do1 = nn.Dropout(p=0.3)
#self.do_new = nn.Dropout(p=0.3)
self.do2 = nn.Dropout(p=0.3)
def forward(self, x):
x, trans, trans2, pointfvals = self.feat(x)
x = F.relu(self.bn1(self.fc1(x)))
x = self.do1(x)
# x = F.relu(self.bn_new(self.fc_new(x)))
#x = self.do_new(x)
x = F.relu(self.bn2(self.fc2(x)))
x = self.do2(x)
x = self.fc3(x)
return x, trans, trans2, pointfvals
class MSPCPNet(nn.Module):
def __init__(self, num_scales=2, num_points=500, output_dim=3, use_point_stn=True, use_feat_stn=True, sym_op='max', get_pointfvals=False, point_tuple=1):
super(MSPCPNet, self).__init__()
self.num_points = num_points
self.feat = PointNetfeat(
num_points=num_points,
num_scales=num_scales,
use_point_stn=use_point_stn,
use_feat_stn=use_feat_stn,
sym_op=sym_op,
get_pointfvals=get_pointfvals,
point_tuple=point_tuple)
self.fc0 = nn.Linear(1024*num_scales**2, 1024)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, output_dim)
self.bn0 = nn.BatchNorm1d(1024)
self.bn1 = nn.BatchNorm1d(512)
self.bn2 = nn.BatchNorm1d(256)
self.do1 = nn.Dropout(p=0.3)
self.do2 = nn.Dropout(p=0.3)
def forward(self, x):
x, trans, trans2, pointfvals = self.feat(x)
x = F.relu(self.bn0(self.fc0(x)))
x = F.relu(self.bn1(self.fc1(x)))
x = self.do1(x)
x = F.relu(self.bn2(self.fc2(x)))
x = self.do2(x)
x = self.fc3(x)
return x, trans, trans2, pointfvals
| 36.024605 | 164 | 0.566787 |
cfdca9eeff45a0cc70269cc98e6f871e00b82ddc | 15,874 | py | Python | source/editableText.py | masakotoda/nvda | d520713211e070c66ab0debe14b229036183fa57 | [
"bzip2-1.0.6"
] | null | null | null | source/editableText.py | masakotoda/nvda | d520713211e070c66ab0debe14b229036183fa57 | [
"bzip2-1.0.6"
] | null | null | null | source/editableText.py | masakotoda/nvda | d520713211e070c66ab0debe14b229036183fa57 | [
"bzip2-1.0.6"
] | null | null | null | # A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2006-2020 NV Access Limited, Davy Kager, Julien Cochuyt
"""Common support for editable text.
@note: If you want editable text functionality for an NVDAObject,
you should use the EditableText classes in L{NVDAObjects.behaviors}.
"""
import time
import sayAllHandler
import api
import review
from baseObject import ScriptableObject
from documentBase import TextContainerObject
import braille
import speech
import config
import eventHandler
from scriptHandler import isScriptWaiting, willSayAllResume
import textInfos
import controlTypes
from logHandler import log
class EditableText(TextContainerObject,ScriptableObject):
"""Provides scripts to report appropriately when moving the caret in editable text fields.
This does not handle the selection change keys.
To have selection changes reported, the object must notify of selection changes.
If the object supports selection but does not notify of selection changes, L{EditableTextWithoutAutoSelectDetection} should be used instead.
If the object notifies of selection changes, the following should be done:
* When the object gains focus, L{initAutoSelectDetection} must be called.
* When the object notifies of a possible selection change, L{detectPossibleSelectionChange} must be called.
* Optionally, if the object notifies of changes to its content, L{hasContentChangedSinceLastSelection} should be set to C{True}.
@ivar hasContentChangedSinceLastSelection: Whether the content has changed since the last selection occurred.
@type hasContentChangedSinceLastSelection: bool
"""
#: Whether to fire caretMovementFailed events when the caret doesn't move in response to a caret movement key.
shouldFireCaretMovementFailedEvents = False
#: Whether or not to announce text found before the caret on a new line (e.g. auto numbering)
announceNewLineText=True
#: When announcing new line text: should the entire line be announced, or just text after the caret?
announceEntireNewLine=False
#: The minimum amount of time that should elapse before checking if the word under the caret has changed
_hasCaretMoved_minWordTimeoutMs=30
#: The maximum amount of time that may elapse before we no longer rely on caret events to detect movement.
_useEvents_maxTimeoutMs = 10
_caretMovementTimeoutMultiplier = 1
def _hasCaretMoved(self, bookmark, retryInterval=0.01, timeout=None, origWord=None):
"""
Waits for the caret to move, for a timeout to elapse, or for a new focus event or script to be queued.
@param bookmark: a bookmark representing the position of the caret before it was instructed to move
@type bookmark: bookmark
@param retryInterval: the interval of time in seconds this method should wait before checking the caret each time.
@type retryInterval: float
@param timeout: the over all amount of time in seconds the method should wait before giving up completely,
C{None} to use the value from the configuration.
@type timeout: float
@param origWord: The word at the caret before the movement command,
C{None} if the word at the caret should not be used to detect movement.
This is intended for use with the delete key.
@return: a tuple containing a boolean denoting whether this method timed out, and a TextInfo representing the old or updated caret position or None if interupted by a script or focus event.
@rtype: tuple
"""
if timeout is None:
timeoutMs = config.conf["editableText"]["caretMoveTimeoutMs"]
else:
# This function's arguments are in seconds, but we want ms.
timeoutMs = timeout * 1000
timeoutMs *= self._caretMovementTimeoutMultiplier
# time.sleep accepts seconds, so retryInterval is in seconds.
# Convert to integer ms to avoid floating point precision errors when adding to elapsed.
retryMs = int(retryInterval * 1000)
elapsed = 0
newInfo=None
while True:
if isScriptWaiting():
return (False,None)
api.processPendingEvents(processEventQueue=False)
if eventHandler.isPendingEvents("gainFocus"):
log.debug("Focus event. Elapsed: %d ms" % elapsed)
return (True,None)
# If the focus changes after this point, fetching the caret may fail,
# but we still want to stay in this loop.
try:
newInfo = self.makeTextInfo(textInfos.POSITION_CARET)
except (RuntimeError,NotImplementedError):
newInfo = None
else:
# Caret events are unreliable in some controls.
# Only use them if we consider them safe to rely on for a particular control,
# and only if they arrive within C{_useEvents_maxTimeoutMs} mili seconds
# after causing the event to occur.
if (
elapsed <= self._useEvents_maxTimeoutMs and
self.caretMovementDetectionUsesEvents and
(eventHandler.isPendingEvents("caret") or eventHandler.isPendingEvents("textChange"))
):
log.debug("Caret move detected using event. Elapsed: %d ms" % elapsed)
return (True,newInfo)
# Try to detect with bookmarks.
newBookmark = None
if newInfo:
try:
newBookmark = newInfo.bookmark
except (RuntimeError,NotImplementedError):
pass
if newBookmark and newBookmark!=bookmark:
log.debug("Caret move detected using bookmarks. Elapsed: %d ms" % elapsed)
return (True, newInfo)
if origWord is not None and newInfo and elapsed >= self._hasCaretMoved_minWordTimeoutMs:
# When pressing delete, bookmarks might not be enough to detect caret movement.
# Therefore try detecting if the word under the caret has changed, such as when pressing delete.
# some editors such as Mozilla Gecko can have text and units that get out of sync with eachother while a character is being deleted.
# Therefore, only check if the word has changed after a particular amount of time has elapsed, allowing the text and units to settle down.
wordInfo = newInfo.copy()
wordInfo.expand(textInfos.UNIT_WORD)
word = wordInfo.text
if word != origWord:
log.debug("Word at caret changed. Elapsed: %d ms" % elapsed)
return (True, newInfo)
if elapsed >= timeoutMs:
break
time.sleep(retryInterval)
elapsed += retryMs
log.debug("Caret didn't move before timeout. Elapsed: %d ms" % elapsed)
return (False,newInfo)
def _caretScriptPostMovedHelper(self, speakUnit, gesture, info=None):
if isScriptWaiting():
return
if not info:
try:
info = self.makeTextInfo(textInfos.POSITION_CARET)
except:
return
# Forget the word currently being typed as the user has moved the caret somewhere else.
speech.clearTypedWordBuffer()
review.handleCaretMove(info)
if speakUnit and not willSayAllResume(gesture):
info.expand(speakUnit)
speech.speakTextInfo(info, unit=speakUnit, reason=controlTypes.REASON_CARET)
braille.handler.handleCaretMove(self)
def _caretMovementScriptHelper(self, gesture, unit):
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
bookmark=info.bookmark
gesture.send()
caretMoved,newInfo=self._hasCaretMoved(bookmark)
if not caretMoved and self.shouldFireCaretMovementFailedEvents:
eventHandler.executeEvent("caretMovementFailed", self, gesture=gesture)
self._caretScriptPostMovedHelper(unit,gesture,newInfo)
def _get_caretMovementDetectionUsesEvents(self) -> bool:
"""Returns whether or not to rely on caret and textChange events when
finding out whether the caret position has changed after pressing a caret movement gesture.
Note that if L{_useEvents_maxTimeoutMs} is elapsed,
relying on events is no longer reliable in most situations.
Therefore, any event should occur before that timeout elapses.
"""
# This class is a mixin that usually comes before other relevant classes in the mro.
# Therefore, try to call super first, and if that fails, return the default (C{True}.
try:
return super().caretMovementDetectionUsesEvents
except AttributeError:
return True
def script_caret_newLine(self,gesture):
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
bookmark=info.bookmark
gesture.send()
caretMoved,newInfo=self._hasCaretMoved(bookmark)
if not caretMoved or not newInfo:
return
# newInfo.copy should be good enough here, but in MS Word we get strange results.
try:
lineInfo=self.makeTextInfo(textInfos.POSITION_CARET)
except (RuntimeError,NotImplementedError):
return
lineInfo.expand(textInfos.UNIT_LINE)
if not self.announceEntireNewLine:
lineInfo.setEndPoint(newInfo,"endToStart")
if lineInfo.isCollapsed:
lineInfo.expand(textInfos.UNIT_CHARACTER)
onlyInitial=True
else:
onlyInitial=False
speech.speakTextInfo(lineInfo,unit=textInfos.UNIT_LINE,reason=controlTypes.REASON_CARET,onlyInitialFields=onlyInitial,suppressBlanks=True)
def _caretMoveBySentenceHelper(self, gesture, direction):
if isScriptWaiting():
return
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.move(textInfos.UNIT_SENTENCE, direction)
info.updateCaret()
self._caretScriptPostMovedHelper(textInfos.UNIT_SENTENCE,gesture,info)
except:
gesture.send()
return
def script_caret_moveByLine(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_LINE)
script_caret_moveByLine.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def script_caret_moveByCharacter(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_CHARACTER)
def script_caret_moveByWord(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_WORD)
def script_caret_moveByParagraph(self,gesture):
self._caretMovementScriptHelper(gesture, textInfos.UNIT_PARAGRAPH)
script_caret_moveByParagraph.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def script_caret_previousSentence(self,gesture):
self._caretMoveBySentenceHelper(gesture, -1)
script_caret_previousSentence.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def script_caret_nextSentence(self,gesture):
self._caretMoveBySentenceHelper(gesture, 1)
script_caret_nextSentence.resumeSayAllMode=sayAllHandler.CURSOR_CARET
def _backspaceScriptHelper(self,unit,gesture):
try:
oldInfo=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
oldBookmark=oldInfo.bookmark
testInfo=oldInfo.copy()
res=testInfo.move(textInfos.UNIT_CHARACTER,-1)
if res<0:
testInfo.expand(unit)
delChunk=testInfo.text
else:
delChunk=""
gesture.send()
caretMoved,newInfo=self._hasCaretMoved(oldBookmark)
if not caretMoved:
return
if len(delChunk)>1:
speech.speakMessage(delChunk)
else:
speech.speakSpelling(delChunk)
self._caretScriptPostMovedHelper(None,gesture,newInfo)
def script_caret_backspaceCharacter(self,gesture):
self._backspaceScriptHelper(textInfos.UNIT_CHARACTER,gesture)
def script_caret_backspaceWord(self,gesture):
self._backspaceScriptHelper(textInfos.UNIT_WORD,gesture)
def _deleteScriptHelper(self, unit, gesture):
try:
info=self.makeTextInfo(textInfos.POSITION_CARET)
except:
gesture.send()
return
bookmark=info.bookmark
info.expand(textInfos.UNIT_WORD)
word=info.text
gesture.send()
# We'll try waiting for the caret to move, but we don't care if it doesn't.
caretMoved,newInfo=self._hasCaretMoved(bookmark,origWord=word)
self._caretScriptPostMovedHelper(unit, gesture, newInfo)
braille.handler.handleCaretMove(self)
def script_caret_deleteCharacter(self, gesture):
self._deleteScriptHelper(textInfos.UNIT_CHARACTER, gesture)
def script_caret_deleteWord(self, gesture):
self._deleteScriptHelper(textInfos.UNIT_WORD, gesture)
__gestures = {
"kb:upArrow": "caret_moveByLine",
"kb:downArrow": "caret_moveByLine",
"kb:leftArrow": "caret_moveByCharacter",
"kb:rightArrow": "caret_moveByCharacter",
"kb:pageUp": "caret_moveByLine",
"kb:pageDown": "caret_moveByLine",
"kb:control+leftArrow": "caret_moveByWord",
"kb:control+rightArrow": "caret_moveByWord",
"kb:control+upArrow": "caret_moveByParagraph",
"kb:control+downArrow": "caret_moveByParagraph",
"kb:alt+upArrow": "caret_previousSentence",
"kb:alt+downArrow": "caret_nextSentence",
"kb:home": "caret_moveByCharacter",
"kb:end": "caret_moveByCharacter",
"kb:control+home": "caret_moveByLine",
"kb:control+end": "caret_moveByLine",
"kb:delete": "caret_deleteCharacter",
"kb:numpadDelete": "caret_deleteCharacter",
"kb:control+delete": "caret_deleteWord",
"kb:control+numpadDelete": "caret_deleteWord",
"kb:backspace": "caret_backspaceCharacter",
"kb:control+backspace": "caret_backspaceWord",
}
def initAutoSelectDetection(self):
"""Initialise automatic detection of selection changes.
This should be called when the object gains focus.
"""
try:
self._lastSelectionPos=self.makeTextInfo(textInfos.POSITION_SELECTION)
except:
self._lastSelectionPos=None
self.isTextSelectionAnchoredAtStart=True
self.hasContentChangedSinceLastSelection=False
def detectPossibleSelectionChange(self):
"""Detects if the selection has been changed, and if so it speaks the change.
"""
try:
newInfo=self.makeTextInfo(textInfos.POSITION_SELECTION)
except:
# Just leave the old selection, which is usually better than nothing.
return
oldInfo=getattr(self,'_lastSelectionPos',None)
self._lastSelectionPos=newInfo.copy()
if not oldInfo:
# There's nothing we can do, but at least the last selection will be right next time.
self.isTextSelectionAnchoredAtStart=True
return
self._updateSelectionAnchor(oldInfo,newInfo)
hasContentChanged=getattr(self,'hasContentChangedSinceLastSelection',False)
self.hasContentChangedSinceLastSelection=False
speech.speakSelectionChange(oldInfo,newInfo,generalize=hasContentChanged)
def _updateSelectionAnchor(self,oldInfo,newInfo):
# Only update the value if the selection changed.
if newInfo.compareEndPoints(oldInfo,"startToStart")!=0:
self.isTextSelectionAnchoredAtStart=False
elif newInfo.compareEndPoints(oldInfo,"endToEnd")!=0:
self.isTextSelectionAnchoredAtStart=True
class EditableTextWithoutAutoSelectDetection(EditableText):
"""In addition to L{EditableText}, provides scripts to report appropriately when the selection changes.
This should be used when an object does not notify of selection changes.
"""
def reportSelectionChange(self, oldTextInfo):
api.processPendingEvents(processEventQueue=False)
newInfo=self.makeTextInfo(textInfos.POSITION_SELECTION)
self._updateSelectionAnchor(oldTextInfo,newInfo)
speech.speakSelectionChange(oldTextInfo,newInfo)
braille.handler.handleCaretMove(self)
def script_caret_changeSelection(self,gesture):
try:
oldInfo=self.makeTextInfo(textInfos.POSITION_SELECTION)
except:
gesture.send()
return
gesture.send()
if isScriptWaiting() or eventHandler.isPendingEvents("gainFocus"):
return
try:
self.reportSelectionChange(oldInfo)
except:
return
__changeSelectionGestures = (
"kb:shift+upArrow",
"kb:shift+downArrow",
"kb:shift+leftArrow",
"kb:shift+rightArrow",
"kb:shift+pageUp",
"kb:shift+pageDown",
"kb:shift+control+leftArrow",
"kb:shift+control+rightArrow",
"kb:shift+control+upArrow",
"kb:shift+control+downArrow",
"kb:shift+home",
"kb:shift+end",
"kb:shift+control+home",
"kb:shift+control+end",
"kb:control+a",
)
def initClass(self):
for gesture in self.__changeSelectionGestures:
self.bindGesture(gesture, "caret_changeSelection")
| 39.784461 | 193 | 0.757087 |
695bac40efdd66a920e3a86c6a860e5f7a992972 | 13,445 | py | Python | lib/python2.7/site-packages/django/utils/unittest/loader.py | bop/bauhaus | 16678396ea7e3f95228e7419c6dcd127ff45a80a | [
"MIT"
] | 285 | 2019-12-23T09:50:21.000Z | 2021-12-08T09:08:49.000Z | lib/python2.7/site-packages/django/utils/unittest/loader.py | bop/bauhaus | 16678396ea7e3f95228e7419c6dcd127ff45a80a | [
"MIT"
] | 18 | 2015-01-14T07:51:48.000Z | 2021-10-14T01:19:26.000Z | lib/python2.7/site-packages/django/utils/unittest/loader.py | bop/bauhaus | 16678396ea7e3f95228e7419c6dcd127ff45a80a | [
"MIT"
] | 70 | 2015-01-01T00:33:24.000Z | 2021-12-10T03:43:07.000Z | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from django.utils.unittest import case, suite
try:
from os.path import relpath
except ImportError:
from django.utils.unittest.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, unittest.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception as e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
| 41.625387 | 123 | 0.615917 |
0269ec229888e8d187c57f2734f0007c4243b429 | 4,851 | py | Python | caffe2/python/predictor/mobile_exporter_test.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | caffe2/python/predictor/mobile_exporter_test.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | caffe2/python/predictor/mobile_exporter_test.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z |
from caffe2.python.test_util import TestCase
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
import numpy as np
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
brew.max_pool(model, 'conv2', 'pool2', kernel=2, stride=2)
brew.fc(model, 'pool2', 'fc3', dim_in=50 * 4 * 4, dim_out=500)
brew.relu(model, 'fc3', 'fc3')
brew.fc(model, 'fc3', 'pred', 500, 10)
brew.softmax(model, 'pred', 'out')
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
# Populate the workspace with data
np_data = np.random.rand(1, 1, 28, 28).astype(np.float32)
workspace.FeedBlob("data", np_data)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Fake "data" is populated by init_net, we have to replace it
workspace.FeedBlob("data", np_data)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([np_data])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
def test_mobile_exporter_datatypes(self):
model = ModelHelper(name="mobile_exporter_test_model")
model.Copy("data_int", "out")
model.params.append("data_int")
model.Copy("data_obj", "out_obj")
model.params.append("data_obj")
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
np_data_int = np.random.randint(100, size=(1, 1, 28, 28), dtype=np.int32)
workspace.FeedBlob("data_int", np_data_int)
np_data_obj = np.array(['aa', 'bb']).astype(np.dtype('O'))
workspace.FeedBlob("data_obj", np_data_obj)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
ref_out_obj = workspace.FetchBlob("out_obj")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
manual_run_out_obj = workspace.FetchBlob("out_obj")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
np.testing.assert_equal(ref_out_obj, manual_run_out_obj)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs.
predictor_out = predictor.run([])
assert len(predictor_out) == 2
predictor_out_int = predictor_out[1]
predictor_out_obj = predictor_out[0]
# The order in predictor_out is non-deterministic. Use type of the entry
# to figure out what to compare it to.
if isinstance(predictor_out[1][0], bytes):
predictor_out_int = predictor_out[0]
predictor_out_obj = predictor_out[1]
np.testing.assert_allclose(
ref_out, predictor_out_int, atol=1e-10, rtol=1e-10
)
np.testing.assert_equal(ref_out_obj, predictor_out_obj)
| 36.473684 | 88 | 0.647702 |
e1b6df234f6ddc8ec1db0e6a9d4a332bcd2e7294 | 248 | py | Python | ad2web/api/constants.py | billfor/alarmdecoder-webapp | 43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc | [
"BSD-3-Clause",
"MIT"
] | 46 | 2015-06-14T02:19:16.000Z | 2022-03-24T03:11:19.000Z | ad2web/api/constants.py | billfor/alarmdecoder-webapp | 43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc | [
"BSD-3-Clause",
"MIT"
] | 66 | 2015-03-14T16:30:43.000Z | 2021-08-28T22:20:01.000Z | ad2web/api/constants.py | billfor/alarmdecoder-webapp | 43c3ebb2b44c7291cd89a2a7a31bbdfdb3ec06dc | [
"BSD-3-Clause",
"MIT"
] | 44 | 2015-02-13T19:23:37.000Z | 2021-12-30T04:17:21.000Z | # -*- coding: utf-8 -*-
ERROR_NOT_AUTHORIZED = 7100
ERROR_DEVICE_NOT_INITIALIZED = 7101
ERROR_MISSING_BODY = 7102
ERROR_MISSING_FIELD = 7103
ERROR_INVALID_VALUE = 7104
ERROR_RECORD_ALREADY_EXISTS = 7105
ERROR_RECORD_DOES_NOT_EXIST = 7106
| 24.8 | 36 | 0.790323 |
4ceca657cf254b27abe1d86c70dd47a77bdef5c1 | 4,144 | py | Python | utils/animation.py | BaptisteLafoux/free_swim_illum_var | 9618611e0820c2d7ff2162aa603a47418db848ed | [
"MIT"
] | null | null | null | utils/animation.py | BaptisteLafoux/free_swim_illum_var | 9618611e0820c2d7ff2162aa603a47418db848ed | [
"MIT"
] | null | null | null | utils/animation.py | BaptisteLafoux/free_swim_illum_var | 9618611e0820c2d7ff2162aa603a47418db848ed | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 10 16:23:24 2022
@author: baptistelafoux
"""
from utils.loader import dataloader
from src.analysis import add_var_vs_time
from utils.graphic import set_matplotlib_config
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation, FFMpegWriter
#%%
def ini_pol_rot_timeserie(ax):
ax.set_xlabel('Time [min]')
ax.set_ylabel('[-]')
ax.set_ylim([0, 1])
plot_obj = {}
ax.plot(ds.time/60, ds.pol_param, lw=0.3, color='none')
ax.plot(ds.time/60, ds.rot_param, lw=0.3, color='none')
plot_obj['p_pol'], = ax.plot([], lw=1.5, color='C4', mfc='k', label='$\mathcal{P}$')
plot_obj['p_rot'], = ax.plot([], lw=1.5, color='C3', mfc='k', label='$\mathcal{M}$')
plot_obj['pt_pol'], = ax.plot([], '.', mew=1.5, mfc='C4', color='C4')
plot_obj['pt_rot'], = ax.plot([], '.', mew=1.5, mfc='C3', color='C3')
plot_obj['l'] = ax.axvline(0, ls='-', color='w', lw=0.75, zorder=0)
plot_obj['leg'] = ax.legend(frameon=True, loc='upper left'); plot_obj['leg'].get_frame().set_linewidth(0.0)
plot_obj['leg'].get_frame().set_facecolor('w')
plot_obj['leg'].get_frame().set_alpha(0.3)
return plot_obj
def ini_light_timeserie(ax):
ax.set_xlabel('Time [min]')
ax.set_ylabel('[-]')
plot_obj = {}
ax.plot(ds.time / 60, ds.light, lw=0.5, color='w')
plot_obj['p_light'], = ax.plot([], lw=1.5, color='w', label=r'Illuminance $\bar{E}$')
plot_obj['pt_light'], = ax.plot([], '.', mew=1.5, mfc='w', color='w')
#plot_obj['l'] = ax.axvline(0, ls='-', color='0.4', zorder=0)
plot_obj['leg'] = ax.legend(frameon=True, loc='upper left'); plot_obj['leg'].get_frame().set_linewidth(0.0)
plot_obj['leg'].get_frame().set_facecolor('w')
plot_obj['leg'].get_frame().set_alpha(0.3)
return plot_obj
def remove_time_axis(ax):
#ax.set_frame_on(False)
ax.axes.get_xaxis().set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
return ax
#%%
set_matplotlib_config()
plt.style.use('dark_background')
plt.close('all')
import matplotlib.pyplot as mpl
mpl.rcParams['font.family'] = 'Avenir'
downsampling_period = 1
ds = dataloader('cleaned/3_VarLight/2022-01-06/1/trajectory.nc')
ds = np.abs(ds.coarsen(time=int(downsampling_period*ds.fps), boundary='trim').mean())
#ds = ds.rolling(time=120, center=True).mean()
fig, ax = plt.subplots(figsize=(4, 3))
fig.set_facecolor([35/255, 83/255, 137/255])
ax.set_facecolor([35/255, 83/255, 137/255])
#plot_obj = ini_pol_rot_timeserie(ax)
plot_obj = ini_light_timeserie(ax)
def animation_pol_rot_timeserie(i):
dsi = ds.isel(time=slice(0, i+1))
plot_obj['p_pol'].set_data(dsi.time / 60, dsi.pol_param)
plot_obj['p_rot'].set_data(dsi.time / 60, dsi.rot_param)
plot_obj['pt_pol'].set_data(dsi.time[-1] / 60, dsi.pol_param[-1])
plot_obj['pt_rot'].set_data(dsi.time[-1] / 60, dsi.rot_param[-1])
plot_obj['l'].set_xdata([dsi.time[-1] / 60, dsi.time[-1] / 60])
if i%100 == 0: print(f'{i / ds.time.size * 100:5.2f} %')
return plot_obj['leg'], plot_obj['pt_pol'], plot_obj['pt_rot'], plot_obj['p_pol'], plot_obj['p_rot'], plot_obj['l'],
def animation_light_timeserie(i):
dsi = ds.isel(time=slice(0, i+1))
plot_obj['p_light'].set_data(dsi.time / 60, dsi.light)
plot_obj['pt_light'].set_data(dsi.time[-1] / 60, dsi.light[-1])
#plot_obj['l'].set_xdata([dsi.time[-1] / 60, dsi.time[-1] / 60])
if i%100 == 0: print(f'{i / ds.time.size * 100:5.2f} %')
return plot_obj['leg'], plot_obj['pt_light'], plot_obj['p_light'], #plot_obj['l'],
anim = FuncAnimation(fig, animation_light_timeserie, frames=ds.time.size, blit=False, interval=10)
anim.save('output/animated_light_vs_time_with_time_axis_blue_bg.mp4', dpi=120, writer=FFMpegWriter(fps=1/downsampling_period), savefig_kwargs=dict(facecolor=fig.get_facecolor()))
plt.show()
| 30.470588 | 178 | 0.634653 |
4d523df75776ecf740cd0753d2da7ca41897a00a | 4,549 | py | Python | experiments/steven-images/ddpg_image_classic_envs.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | experiments/steven-images/ddpg_image_classic_envs.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | experiments/steven-images/ddpg_image_classic_envs.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | import gym
import numpy as np
from railrl.torch.dqn.double_dqn import DoubleDQN
import railrl.misc.hyperparameter as hyp
import railrl.torch.pytorch_util as ptu
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.dqn.dqn import DQN
from railrl.torch.networks import Mlp, CNN, CNNPolicy, MergedCNN
from torch import nn as nn
from railrl.torch.modules import HuberLoss
from railrl.envs.wrappers import ImageMujocoWithObsEnv
from railrl.torch.ddpg.ddpg import DDPG
from railrl.envs.mujoco.discrete_reacher import DiscreteReacherEnv
from railrl.exploration_strategies.ou_strategy import OUStrategy
from railrl.torch.networks import FlattenMlp, TanhMlpPolicy
from railrl.envs.wrappers import NormalizedBoxEnv
from railrl.exploration_strategies.gaussian_strategy import GaussianStrategy
from railrl.exploration_strategies.base import \
PolicyWrappedWithExplorationStrategy
from railrl.launchers.launcher_util import setup_logger
from railrl.envs.mujoco.pusher2d import Pusher2DEnv
from railrl.envs.mujoco.sawyer_gripper_env import SawyerXYZEnv
import railrl.images.camera as camera
import torch
def experiment(variant):
imsize = variant['imsize']
history = variant['history']
#env = InvertedDoublePendulumEnv()#gym.make(variant['env_id'])
env = Pusher2DEnv()
partial_obs_size = env.obs_dim
env = NormalizedBoxEnv(ImageMujocoWithObsEnv(env,
imsize=imsize,
keep_prev=history-1,
init_camera=variant['init_camera']))
# es = GaussianStrategy(
# action_space=env.action_space,
# )
es = OUStrategy(action_space=env.action_space)
obs_dim = env.observation_space.low.size
action_dim = env.action_space.low.size
qf = MergedCNN(input_width=imsize,
input_height=imsize,
output_size=1,
input_channels= history,
added_fc_input_size=action_dim + partial_obs_size,
**variant['cnn_params'])
policy = CNNPolicy(input_width=imsize,
input_height=imsize,
added_fc_input_size=partial_obs_size,
output_size=action_dim,
input_channels=history,
**variant['cnn_params'],
output_activation=torch.tanh,
)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=es,
policy=policy,
)
algorithm = DDPG(
env,
qf=qf,
policy=policy,
# qf_weight_decay=.01,
exploration_policy=exploration_policy,
**variant['algo_params']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
imsize=16,
history=1,
env_id='DoubleInvertedPendulum-v2',
init_camera=camera.pusher_2d_init_camera,
algo_params=dict(
num_epochs=1000,
num_steps_per_epoch=1000,
num_steps_per_eval=500,
batch_size=64,
max_path_length=100,
discount=.99,
use_soft_update=True,
tau=1e-3,
qf_learning_rate=1e-3,
policy_learning_rate=1e-4,
save_replay_buffer=False,
replay_buffer_size=int(2E4),
),
cnn_params=dict(
kernel_sizes=[3, 3],
n_channels=[16, 16],
strides=[2, 2],
pool_sizes=[1, 1],
hidden_sizes=[400, 300],
paddings=[0, 0],
use_batch_norm=False,
),
algo_class=DDPG,
qf_criterion_class=HuberLoss,
)
search_space = {
# 'algo_params.use_hard_updates': [True, False],
'qf_criterion_class': [
HuberLoss,
],
}
# setup_logger('dqn-images-experiment', variant=variant)
# experiment(variant)
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
# for i in range(2):
run_experiment(
experiment,
variant=variant,
exp_id=exp_id,
exp_prefix="DDPG-images-pusher",
mode='local',
# exp_prefix="double-vs-dqn-huber-sweep-cartpole",
# mode='local',
#use_gpu=True,
)
| 32.035211 | 76 | 0.626731 |
45fda5e63f86f3e2ddc4a4308196294dbe1c5c22 | 7,303 | py | Python | tests/unit/fs/test_dvc.py | itcarroll/dvc | 55219e9089005ac15d668ecf735aeaf31a771d0b | [
"Apache-2.0"
] | null | null | null | tests/unit/fs/test_dvc.py | itcarroll/dvc | 55219e9089005ac15d668ecf735aeaf31a771d0b | [
"Apache-2.0"
] | 41 | 2021-11-16T15:38:50.000Z | 2022-03-30T10:32:14.000Z | tests/unit/fs/test_dvc.py | jhhuh/dvc | fecc81e951efeaa8130264f726c27e92876422ae | [
"Apache-2.0"
] | null | null | null | import posixpath
import shutil
import pytest
from dvc.config import NoRemoteError
from dvc.data.stage import stage
from dvc.fs.dvc import DvcFileSystem
from dvc.hash_info import HashInfo
from dvc.utils.fs import remove
@pytest.mark.parametrize(
"path, key",
[
("", ("repo",)),
(".", ("repo",)),
("foo", ("repo", "foo")),
("dir/foo", ("repo", "dir", "foo")),
],
)
def test_get_key(tmp_dir, dvc, path, key):
fs = DvcFileSystem(repo=dvc)
assert fs.fs._get_key(path) == key
def test_exists(tmp_dir, dvc):
tmp_dir.gen("foo", "foo")
dvc.add("foo")
(tmp_dir / "foo").unlink()
fs = DvcFileSystem(repo=dvc)
assert fs.exists("foo")
def test_open(tmp_dir, dvc):
tmp_dir.gen("foo", "foo")
dvc.add("foo")
(tmp_dir / "foo").unlink()
fs = DvcFileSystem(repo=dvc)
with fs.open("foo", "r") as fobj:
assert fobj.read() == "foo"
def test_open_dirty_hash(tmp_dir, dvc):
tmp_dir.dvc_gen("file", "file")
(tmp_dir / "file").write_text("something")
fs = DvcFileSystem(repo=dvc)
with fs.open("file", "r") as fobj:
# NOTE: Unlike RepoFileSystem, DvcFileSystem should not
# be affected by a dirty workspace.
assert fobj.read() == "file"
def test_open_no_remote(tmp_dir, dvc):
tmp_dir.dvc_gen("file", "file")
(tmp_dir / "file").unlink()
remove(dvc.odb.local.cache_dir)
fs = DvcFileSystem(repo=dvc)
with pytest.raises(FileNotFoundError) as exc_info:
with fs.open("file", "r"):
pass
assert isinstance(exc_info.value.__cause__, NoRemoteError)
def test_open_dirty_no_hash(tmp_dir, dvc):
tmp_dir.gen("file", "file")
(tmp_dir / "file.dvc").write_text("outs:\n- path: file\n")
fs = DvcFileSystem(repo=dvc)
# NOTE: Unlike RepoFileSystem, DvcFileSystem should not
# be affected by a dirty workspace.
with pytest.raises(FileNotFoundError):
with fs.open("file", "r"):
pass
def test_open_in_history(tmp_dir, scm, dvc):
tmp_dir.gen("foo", "foo")
dvc.add("foo")
dvc.scm.add(["foo.dvc", ".gitignore"])
dvc.scm.commit("foo")
tmp_dir.gen("foo", "foofoo")
dvc.add("foo")
dvc.scm.add(["foo.dvc", ".gitignore"])
dvc.scm.commit("foofoo")
for rev in dvc.brancher(revs=["HEAD~1"]):
if rev == "workspace":
continue
fs = DvcFileSystem(repo=dvc)
with fs.open("foo", "r") as fobj:
assert fobj.read() == "foo"
def test_isdir_isfile(tmp_dir, dvc):
tmp_dir.gen({"datafile": "data", "datadir": {"foo": "foo", "bar": "bar"}})
fs = DvcFileSystem(repo=dvc)
assert not fs.isdir("datadir")
assert not fs.isfile("datadir")
assert not fs.isdir("datafile")
assert not fs.isfile("datafile")
dvc.add(["datadir", "datafile"])
shutil.rmtree(tmp_dir / "datadir")
(tmp_dir / "datafile").unlink()
assert fs.isdir("datadir")
assert not fs.isfile("datadir")
assert not fs.isdir("datafile")
assert fs.isfile("datafile")
def test_isdir_mixed(tmp_dir, dvc):
tmp_dir.gen({"dir": {"foo": "foo", "bar": "bar"}})
dvc.add(str(tmp_dir / "dir" / "foo"))
fs = DvcFileSystem(repo=dvc)
assert fs.isdir("dir")
assert not fs.isfile("dir")
def test_walk(tmp_dir, dvc):
tmp_dir.gen(
{
"dir": {
"subdir1": {"foo1": "foo1", "bar1": "bar1"},
"subdir2": {"foo2": "foo2"},
"foo": "foo",
"bar": "bar",
}
}
)
dvc.add("dir", recursive=True)
fs = DvcFileSystem(repo=dvc)
expected = [
"dir/subdir1",
"dir/subdir2",
"dir/subdir1/foo1",
"dir/subdir1/bar1",
"dir/subdir2/foo2",
"dir/foo",
"dir/bar",
]
actual = []
for root, dirs, files in fs.walk("dir"):
for entry in dirs + files:
actual.append(posixpath.join(root, entry))
assert set(actual) == set(expected)
assert len(actual) == len(expected)
def test_walk_dir(tmp_dir, dvc):
tmp_dir.gen(
{
"dir": {
"subdir1": {"foo1": "foo1", "bar1": "bar1"},
"subdir2": {"foo2": "foo2"},
"foo": "foo",
"bar": "bar",
}
}
)
dvc.add("dir")
fs = DvcFileSystem(repo=dvc)
expected = [
"dir/subdir1",
"dir/subdir2",
"dir/subdir1/foo1",
"dir/subdir1/bar1",
"dir/subdir2/foo2",
"dir/foo",
"dir/bar",
]
actual = []
for root, dirs, files in fs.walk("dir"):
for entry in dirs + files:
actual.append(posixpath.join(root, entry))
assert set(actual) == set(expected)
assert len(actual) == len(expected)
def test_walk_missing(tmp_dir, dvc):
fs = DvcFileSystem(repo=dvc)
for _ in fs.walk("dir"):
pass
def test_walk_not_a_dir(tmp_dir, dvc):
tmp_dir.dvc_gen("foo", "foo")
fs = DvcFileSystem(repo=dvc)
for _ in fs.walk("foo"):
pass
def test_isdvc(tmp_dir, dvc):
tmp_dir.gen({"foo": "foo", "bar": "bar"})
dvc.add("foo")
fs = DvcFileSystem(repo=dvc)
assert fs.isdvc("foo")
assert not fs.isdvc("bar")
def test_get_hash_file(tmp_dir, dvc):
tmp_dir.dvc_gen({"foo": "foo"})
fs = DvcFileSystem(repo=dvc)
assert fs.info("foo")["md5"] == "acbd18db4cc2f85cedef654fccc4a4d8"
def test_get_hash_dir(tmp_dir, dvc, mocker):
import dvc as dvc_module
tmp_dir.dvc_gen(
{"dir": {"foo": "foo", "bar": "bar", "subdir": {"data": "data"}}}
)
fs = DvcFileSystem(repo=dvc)
get_file_hash_spy = mocker.spy(dvc_module.data.stage, "get_file_hash")
assert fs.info("dir")["md5"] == "8761c4e9acad696bee718615e23e22db.dir"
assert not get_file_hash_spy.called
def test_get_hash_granular(tmp_dir, dvc):
tmp_dir.dvc_gen(
{"dir": {"foo": "foo", "bar": "bar", "subdir": {"data": "data"}}}
)
fs = DvcFileSystem(repo=dvc)
subdir = "dir/subdir"
assert fs.info(subdir).get("md5") is None
_, _, obj = stage(dvc.odb.local, subdir, fs, "md5", dry_run=True)
assert obj.hash_info == HashInfo(
"md5", "af314506f1622d107e0ed3f14ec1a3b5.dir"
)
data = posixpath.join(subdir, "data")
assert fs.info(data)["md5"] == "8d777f385d3dfec8815d20f7496026dc"
_, _, obj = stage(dvc.odb.local, data, fs, "md5", dry_run=True)
assert obj.hash_info == HashInfo("md5", "8d777f385d3dfec8815d20f7496026dc")
def test_get_hash_dirty_file(tmp_dir, dvc):
tmp_dir.dvc_gen("file", "file")
(tmp_dir / "file").write_text("something")
fs = DvcFileSystem(repo=dvc)
expected = "8c7dd922ad47494fc02c388e12c00eac"
assert fs.info("file").get("md5") == expected
_, _, obj = stage(dvc.odb.local, "file", fs, "md5", dry_run=True)
assert obj.hash_info == HashInfo("md5", expected)
def test_get_hash_dirty_dir(tmp_dir, dvc):
tmp_dir.dvc_gen({"dir": {"foo": "foo", "bar": "bar"}})
(tmp_dir / "dir" / "baz").write_text("baz")
fs = DvcFileSystem(repo=dvc)
expected = "5ea40360f5b4ec688df672a4db9c17d1.dir"
assert fs.info("dir").get("md5") == expected
_, _, obj = stage(dvc.odb.local, "dir", fs, "md5", dry_run=True)
assert obj.hash_info == HashInfo("md5", expected)
| 26.460145 | 79 | 0.589758 |
eec0ed13f084b81833274574db176fa41da94488 | 376 | py | Python | core/migrations/0006_referral_amount.py | adesiyanoladipo/django-referral-system | 7cc4b41338289ecff78f7a50c9eee4bd47986215 | [
"MIT"
] | 6 | 2020-09-03T20:05:00.000Z | 2021-07-02T11:49:46.000Z | core/migrations/0006_referral_amount.py | adesiyan-ifedayo/django-referral-system | 7cc4b41338289ecff78f7a50c9eee4bd47986215 | [
"MIT"
] | null | null | null | core/migrations/0006_referral_amount.py | adesiyan-ifedayo/django-referral-system | 7cc4b41338289ecff78f7a50c9eee4bd47986215 | [
"MIT"
] | 4 | 2020-09-03T10:52:20.000Z | 2021-01-13T16:13:45.000Z | # Generated by Django 2.2.14 on 2020-08-22 22:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_referral'),
]
operations = [
migrations.AddField(
model_name='referral',
name='amount',
field=models.FloatField(default='1500'),
),
]
| 19.789474 | 52 | 0.582447 |
33692de4aa92ad907d21d251278fac217a6563a2 | 16,399 | py | Python | tests/integration/goldens/logging/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py | major/gapic-generator-python | 68515c4c1444875f151a971b595e9dc837ddf47c | [
"Apache-2.0"
] | null | null | null | tests/integration/goldens/logging/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py | major/gapic-generator-python | 68515c4c1444875f151a971b595e9dc837ddf47c | [
"Apache-2.0"
] | null | null | null | tests/integration/goldens/logging/google/cloud/logging_v2/services/metrics_service_v2/transports/grpc_asyncio.py | major/gapic-generator-python | 68515c4c1444875f151a971b595e9dc837ddf47c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.logging_v2.types import logging_metrics
from google.protobuf import empty_pb2 # type: ignore
from .base import MetricsServiceV2Transport, DEFAULT_CLIENT_INFO
from .grpc import MetricsServiceV2GrpcTransport
class MetricsServiceV2GrpcAsyncIOTransport(MetricsServiceV2Transport):
"""gRPC AsyncIO backend transport for MetricsServiceV2.
Service for configuring logs-based metrics.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'logging.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_log_metrics(self) -> Callable[
[logging_metrics.ListLogMetricsRequest],
Awaitable[logging_metrics.ListLogMetricsResponse]]:
r"""Return a callable for the list log metrics method over gRPC.
Lists logs-based metrics.
Returns:
Callable[[~.ListLogMetricsRequest],
Awaitable[~.ListLogMetricsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_log_metrics' not in self._stubs:
self._stubs['list_log_metrics'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/ListLogMetrics',
request_serializer=logging_metrics.ListLogMetricsRequest.serialize,
response_deserializer=logging_metrics.ListLogMetricsResponse.deserialize,
)
return self._stubs['list_log_metrics']
@property
def get_log_metric(self) -> Callable[
[logging_metrics.GetLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the get log metric method over gRPC.
Gets a logs-based metric.
Returns:
Callable[[~.GetLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_log_metric' not in self._stubs:
self._stubs['get_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/GetLogMetric',
request_serializer=logging_metrics.GetLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['get_log_metric']
@property
def create_log_metric(self) -> Callable[
[logging_metrics.CreateLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the create log metric method over gRPC.
Creates a logs-based metric.
Returns:
Callable[[~.CreateLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_log_metric' not in self._stubs:
self._stubs['create_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/CreateLogMetric',
request_serializer=logging_metrics.CreateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['create_log_metric']
@property
def update_log_metric(self) -> Callable[
[logging_metrics.UpdateLogMetricRequest],
Awaitable[logging_metrics.LogMetric]]:
r"""Return a callable for the update log metric method over gRPC.
Creates or updates a logs-based metric.
Returns:
Callable[[~.UpdateLogMetricRequest],
Awaitable[~.LogMetric]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_log_metric' not in self._stubs:
self._stubs['update_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/UpdateLogMetric',
request_serializer=logging_metrics.UpdateLogMetricRequest.serialize,
response_deserializer=logging_metrics.LogMetric.deserialize,
)
return self._stubs['update_log_metric']
@property
def delete_log_metric(self) -> Callable[
[logging_metrics.DeleteLogMetricRequest],
Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete log metric method over gRPC.
Deletes a logs-based metric.
Returns:
Callable[[~.DeleteLogMetricRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_log_metric' not in self._stubs:
self._stubs['delete_log_metric'] = self.grpc_channel.unary_unary(
'/google.logging.v2.MetricsServiceV2/DeleteLogMetric',
request_serializer=logging_metrics.DeleteLogMetricRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs['delete_log_metric']
__all__ = (
'MetricsServiceV2GrpcAsyncIOTransport',
)
| 45.301105 | 89 | 0.630953 |
9fc4b5b8ed21eecf607780464e855b20a864a7ff | 5,038 | py | Python | generate_pairs.py | Rasoul20sh/InsightFace_TF | 06a177a3176660787f21f184728bdf6b553b25ae | [
"MIT"
] | 4 | 2019-04-01T03:52:20.000Z | 2019-11-12T21:15:05.000Z | generate_pairs.py | Rasoul20sh/InsightFace_TF | 06a177a3176660787f21f184728bdf6b553b25ae | [
"MIT"
] | null | null | null | generate_pairs.py | Rasoul20sh/InsightFace_TF | 06a177a3176660787f21f184728bdf6b553b25ae | [
"MIT"
] | 1 | 2021-01-06T06:58:49.000Z | 2021-01-06T06:58:49.000Z | #! encoding: utf-8
import os
import random
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
class GeneratePairs:
"""
Generate the pairs.txt file that is used for training face classifier when calling python `src/train_softmax.py`.
Or others' python scripts that needs the file of pairs.txt.
Doc Reference: http://vis-www.cs.umass.edu/lfw/README.txt
"""
counter = 1
def __init__(self, data_dir, pairs_filepath, img_ext, num_random_images_per_folder):
"""
Parameter data_dir, is your data directory.
Parameter pairs_filepath, where is the pairs.txt that belongs to.
Parameter img_ext, is the image data extension for all of your image data.
"""
self.data_dir = data_dir
self.pairs_filepath = pairs_filepath
self.img_ext = img_ext
self.num_random_images_per_folder = num_random_images_per_folder
if os.name == 'nt':
self.separator = "\\"
else:
self.separator = "/"
self.remaining = []
for name in os.listdir(self.data_dir):
if os.path.isdir(os.path.join(self.data_dir, name)):
self.remaining.append(name)
def update_remaining(self):
self.remaining = []
for name in os.listdir(self.data_dir):
if os.path.isdir(os.path.join(self.data_dir, name)):
self.remaining.append(name)
def generate(self):
self._generate_matches_pairs()
self._generate_mismatches_pairs()
def _generate_matches_pairs(self):
"""
Generate all matches pairs
"""
for name in self.remaining:
a = []
for file in os.listdir(os.path.join(self.data_dir, name)):
if self.img_ext in file:
a.append(os.path.join(name, file))
if a:
with open(self.pairs_filepath, "a") as f:
for i in range(self.num_random_images_per_folder):
temp = random.choice(a).split(self.separator) # This line may vary depending on how your images are named.
w = self.separator.join(temp[:-1])
l = random.choice(a).split(self.separator)[-1]
r = random.choice(a).split(self.separator)[-1]
print("For '" + os.path.join(self.data_dir, name) + "' and counter: ", self.counter, ', Match Pair:', w + " -> " + l
+ ", " + r)
f.write(w + "\t" + l + "\t" + r + "\n")
self.counter += 1
def _generate_mismatches_pairs(self):
"""
Generate all mismatches pairs
"""
tmptmp = ['00061', '10285', '00074', '10156', '10041', '20344', '10041', '20344', '10041', '20344', '10217', '20345', '20324', '20345', '20344',
'10268', '20345', '20481', '20394', '00074', '20412', '10014', '20436', '20412', '30604', '10218']
for i, name in enumerate(self.remaining):
self.update_remaining()
del self.remaining[i] # deletes the file from the list, so that it is not chosen again
other_dir = random.choice(self.remaining)
with open(self.pairs_filepath, "a") as f:
for _ in range(self.num_random_images_per_folder):
if name in tmptmp:
print()
if other_dir in tmptmp:
print()
temps_file_1 = os.listdir(os.path.join(self.data_dir, name))
if temps_file_1:
file1 = random.choice(temps_file_1)
temps_file_2 = os.listdir(os.path.join(self.data_dir, other_dir))
if temps_file_2:
file2 = random.choice(temps_file_2)
if temps_file_1 and temps_file_2:
if self.img_ext in file1 and self.img_ext in file2:
print("For '" + self.data_dir + "' and counter: ", self.counter, ', MisMatch Pair:',
name + " " + file1.split(self.separator)[-1] + ' ' +
other_dir + ' ' + file2.split(self.separator)[-1])
f.write(name + "\t" + file1.split(self.separator)[-1] + "\t" + other_dir + "\t" +
file2.split(self.separator)[-1] + "\n")
self.counter += 1
if __name__ == '__main__':
data_dir = r"E:\Projects & Courses\CpAE\NIR-VIS-2.0 Dataset -cbsr.ia.ac.cn\First_100_ALL VIS_112"
eval_pairs_path = os.path.join(PROJECT_PATH, 'data/First_100_ALL VIS_112_1.txt')
pairs_filepath = eval_pairs_path
# img_ext = ".jpg"
img_ext = ".png"
generatePairs = GeneratePairs(data_dir, pairs_filepath, img_ext, num_random_images_per_folder=1)
generatePairs.generate()
| 41.295082 | 152 | 0.538706 |
5ba644e4db9f846ac9431f319b42557067365780 | 6,061 | py | Python | tests/test_caching.py | CyrilLeMat/modelkit | 2150ffe78ebb00e3302dac36ccb09e66becd5130 | [
"MIT"
] | null | null | null | tests/test_caching.py | CyrilLeMat/modelkit | 2150ffe78ebb00e3302dac36ccb09e66becd5130 | [
"MIT"
] | null | null | null | tests/test_caching.py | CyrilLeMat/modelkit | 2150ffe78ebb00e3302dac36ccb09e66becd5130 | [
"MIT"
] | null | null | null | import asyncio
import os
import subprocess
import time
from typing import List, Union
import pydantic
import pytest
import redis
from modelkit.core.library import ModelLibrary
from modelkit.core.model import AsyncModel, Model
from modelkit.utils.cache import NativeCache, RedisCache
from tests.conftest import skip_unless
@pytest.mark.parametrize("cache_implementation", ["LFU", "LRU", "RR"])
def test_native_cache(cache_implementation):
class SomeModel(Model):
CONFIGURATIONS = {"model": {"model_settings": {"cache_predictions": True}}}
def _predict(self, item):
return item
class SomeModelMultiple(Model):
CONFIGURATIONS = {
"model_multiple": {"model_settings": {"cache_predictions": True}}
}
def _predict_batch(self, items):
return items
class Item(pydantic.BaseModel):
class SubItem(pydantic.BaseModel):
boomer: Union[int, List[int]]
ok: SubItem
class SomeModelValidated(Model[Item, Item]):
CONFIGURATIONS = {
"model_validated": {"model_settings": {"cache_predictions": True}}
}
def _predict_batch(self, items):
return items
lib = ModelLibrary(
models=[SomeModel, SomeModelMultiple, SomeModelValidated],
settings={
"cache": {
"cache_provider": "native",
"implementation": cache_implementation,
"maxsize": 16,
}
},
)
assert isinstance(lib.cache, NativeCache)
m = lib.get("model")
m_multi = lib.get("model_multiple")
ITEMS = [{"ok": {"boomer": 1}}, {"ok": {"boomer": [2, 2, 3]}}]
_do_model_test(m, ITEMS)
_do_model_test(m_multi, ITEMS)
m_validated = lib.get("model_validated")
_do_model_test(m_validated, ITEMS)
@pytest.fixture()
def redis_service(request):
if "JENKINS_CI" in os.environ:
redis_proc = subprocess.Popen(["redis-server"])
def finalize():
redis_proc.terminate()
else:
# start redis as docker container
subprocess.Popen(
["docker", "run", "--name", "redis-tests", "-p", "6379:6379", "redis:5"]
)
def finalize():
subprocess.call(["docker", "rm", "-f", "redis-tests"])
request.addfinalizer(finalize)
rd = redis.Redis(host="localhost", port=6379)
for _ in range(30):
try:
if rd.ping():
break
except redis.ConnectionError:
time.sleep(1)
yield
def _do_model_test(model, ITEMS):
for i in ITEMS:
res = model(i, _force_compute=True)
assert i == res
assert model.predict_batch(ITEMS) == ITEMS
assert ITEMS + [{"ok": {"boomer": [-1]}}] == model.predict_batch(
ITEMS + [{"ok": {"boomer": [-1]}}]
)
@skip_unless("ENABLE_REDIS_TEST", "True")
def test_redis_cache(redis_service):
class SomeModel(Model):
CONFIGURATIONS = {"model": {"model_settings": {"cache_predictions": True}}}
def _predict(self, item):
return item
class SomeModelMultiple(Model):
CONFIGURATIONS = {
"model_multiple": {"model_settings": {"cache_predictions": True}}
}
def _predict_batch(self, items):
return items
class Item(pydantic.BaseModel):
class SubItem(pydantic.BaseModel):
boomer: Union[int, List[int]]
ok: SubItem
class SomeModelValidated(Model[Item, Item]):
CONFIGURATIONS = {
"model_validated": {"model_settings": {"cache_predictions": True}}
}
def _predict_batch(self, items):
return items
lib = ModelLibrary(
models=[SomeModel, SomeModelMultiple, SomeModelValidated],
settings={"cache": {"cache_provider": "redis"}},
)
assert isinstance(lib.cache, RedisCache)
m = lib.get("model")
m_multi = lib.get("model_multiple")
ITEMS = [{"ok": {"boomer": 1}}, {"ok": {"boomer": [2, 2, 3]}}]
_do_model_test(m, ITEMS)
_do_model_test(m_multi, ITEMS)
m_validated = lib.get("model_validated")
_do_model_test(m_validated, ITEMS)
async def _do_model_test_async(model, ITEMS):
for i in ITEMS:
res = await model(i, _force_compute=True)
assert i == res
res = await model.predict_batch(ITEMS)
assert res == ITEMS
res = await model.predict_batch(ITEMS + [{"new": "item"}])
assert ITEMS + [{"new": "item"}] == res
@pytest.mark.asyncio
@skip_unless("ENABLE_REDIS_TEST", "True")
async def test_redis_cache_async(redis_service, event_loop):
class SomeModel(AsyncModel):
CONFIGURATIONS = {"model": {"model_settings": {"cache_predictions": True}}}
async def _predict(self, item):
await asyncio.sleep(0)
return item
class SomeModelMultiple(AsyncModel):
CONFIGURATIONS = {
"model_multiple": {"model_settings": {"cache_predictions": True}}
}
async def _predict_batch(self, items):
await asyncio.sleep(0)
return items
class Item(pydantic.BaseModel):
class SubItem(pydantic.BaseModel):
boomer: Union[int, List[int]]
ok: SubItem
class SomeModelValidated(AsyncModel[Item, Item]):
CONFIGURATIONS = {
"model_validated": {"model_settings": {"cache_predictions": True}}
}
async def _predict_batch(self, items):
await asyncio.sleep(0)
return items
lib = ModelLibrary(
models=[SomeModel, SomeModelMultiple, SomeModelValidated],
settings={"cache": {"cache_provider": "redis"}},
)
assert isinstance(lib.cache, RedisCache)
m = lib.get("model")
m_multi = lib.get("model_multiple")
ITEMS = [{"ok": {"boomer": 1}}, {"ok": {"boomer": [2, 2, 3]}}]
await _do_model_test_async(m, ITEMS)
await _do_model_test_async(m_multi, ITEMS)
await lib.aclose()
m_validated = lib.get("model_validated")
await _do_model_test_async(m_validated, ITEMS)
| 26.937778 | 84 | 0.611615 |
8afc0121828113f030b2e97ae6578f563e7c54de | 4,337 | py | Python | barrel_reaktor/document_list/models.py | txtr/barrel-reaktor | 6d2dfb894bd449f91ede2c32e6a162664e59a971 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | barrel_reaktor/document_list/models.py | txtr/barrel-reaktor | 6d2dfb894bd449f91ede2c32e6a162664e59a971 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | barrel_reaktor/document_list/models.py | txtr/barrel-reaktor | 6d2dfb894bd449f91ede2c32e6a162664e59a971 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | from barrel import Store, Field, DateField, IntField
from barrel.rpc import RpcMixin
class List(Store, RpcMixin):
interface = 'WSListMgmt'
tracked_fields = ["document_ids"]
id = Field(target='ID')
count = IntField(target='count')
creation_date = DateField(target='creationTime', default='')
description = Field(target='description')
document_ids = Field(target='documentIDs', default=[])
# global_id = Field(target='globalID')
name = Field(target='name')
offset = IntField(target='offset')
owner = Field(target='owner')
total_count = IntField(target='size')
@classmethod
def delete_by_id(cls, token, list_id, delete_documents=False):
return cls.signature(method='deleteList', args=[token, list_id, delete_documents])
@classmethod
def _get_by_id(cls, token, list_id, offset, number_of_results):
return cls.signature(method='getList', args=[token, list_id, offset, number_of_results])
@classmethod
def _get_constrained_by_id(cls, token, list_id, search_string, offset, number_of_results):
if not ':' in search_string:
search_string = '*%s*' % search_string
return cls.signature(method='getListConstrained', args=[token, list_id, search_string, offset, number_of_results])
@classmethod
def _change_sorting(cls, token, list_id, sort, direction):
# That would be nice, but unfortunately, it's not the case.
# sort = cls.fields[sort].target
invert = direction == 'desc'
return cls.signature(method='changeListSorting', args=[token, list_id, sort, invert])
@classmethod
def get_by_ids(cls, token, list_ids):
return cls.signature(method='getLists', args=[token, list_ids])
@classmethod
def filter(cls, token, list_id, search_string=None, offset=0, number_of_results=-1, sort='creationDate', direction='desc'):
cls._change_sorting(token, list_id, sort, direction)
if search_string:
return cls._get_constrained_by_id(token, list_id, search_string, offset, number_of_results)
else:
return cls._get_by_id(token, list_id, offset, number_of_results)
@classmethod
def get_by_doc_ids(cls, token, document_ids):
return cls.signature(method='getListsWithDocumentList', args=[token, document_ids], data_converter=lambda d: d)
@classmethod
def _get_by_type(cls, token, type, offset, number_of_results):
return cls.signature(method='getSpecialList', args=[token, type, offset, number_of_results])
@classmethod
def get_inbox(cls, token, offset=0, number_of_results=-1):
return cls._get_by_type(token, 'INBOX', offset, number_of_results)
@classmethod
def get_trash(cls, token, offset=0, number_of_results=-1):
return cls._get_by_type(token, 'TRASH', offset, number_of_results)
@classmethod
def get_user_list_ids(cls, token):
return cls.signature(method='getListList', args=[token], data_converter=lambda d: d)
@classmethod
def create(cls, token, name, description=''):
def converter(data):
return cls({'ID': data, 'name': name, 'description':description})
return cls.signature(method='createList', args=[token, name, description], data_converter=converter)
@classmethod
def add_documents(cls, token, list_id, document_ids, index=0):
return cls.signature(method='addDocumentsToList', args=[token, list_id, document_ids, index])
@classmethod
def remove_documents(cls, token, list_id, document_ids):
return cls.signature(method='removeDocumentsFromList', args=[token, list_id, document_ids])
@classmethod
def empty(cls, token, list_id):
# `keepDocumentsInOtherLists` is always True, since reaktor does not support False (cfr. api doc).
# Note that since moving a document to trash removes other labels, the expected result
# is still reached.
return cls.signature(interface='WSDocMgmt', method='removeDocumentsInList', args=[token, list_id, True])
@property
def is_inbox(self):
return self.name.startswith('INBOX-')
@property
def is_trash(self):
return self.name.startswith('TRASH-')
def __len__(self):
return self.count
def __nonzero__(self):
return self.total_count
| 40.157407 | 127 | 0.693798 |
1af5e75ca4b82b64a63cf48a03c64e112c258004 | 1,323 | py | Python | auth0/v3/authentication/users.py | santiagoroman/auth0-python | b88b056d0c68eb26a1171f33273010faf8fefe63 | [
"MIT"
] | null | null | null | auth0/v3/authentication/users.py | santiagoroman/auth0-python | b88b056d0c68eb26a1171f33273010faf8fefe63 | [
"MIT"
] | null | null | null | auth0/v3/authentication/users.py | santiagoroman/auth0-python | b88b056d0c68eb26a1171f33273010faf8fefe63 | [
"MIT"
] | null | null | null | from .base import AuthenticationBase
import warnings
class Users(AuthenticationBase):
"""Userinfo related endpoints.
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def userinfo(self, access_token):
"""Returns the user information based on the Auth0 access token.
This endpoint will work only if openid was granted as a scope for the access_token.
Args:
access_token (str): Auth0 access token (obtained during login).
Returns:
The user profile.
"""
return self.get(
url='https://{}/userinfo'.format(self.domain),
headers={'Authorization': 'Bearer {}'.format(access_token)}
)
def tokeninfo(self, jwt):
"""Returns user profile based on the user's jwt
Validates a JSON Web Token (signature and expiration) and returns the
user information associated with the user id (sub property) of
the token.
Args:
jwt (str): User's jwt
Returns:
The user profile.
"""
warnings.warn("/tokeninfo will be deprecated in future releases", DeprecationWarning)
return self.post(
url='https://{}/tokeninfo'.format(self.domain),
data={'id_token': jwt}
)
| 27 | 93 | 0.602419 |
915bc1bd6a87d6184dec13eaf693f8936969b829 | 14,565 | py | Python | aleph/views/entitysets_api.py | davidknezic/aleph | e59c95b201313bbe40cd036f5d2aeb0f8b599639 | [
"MIT"
] | 1 | 2021-09-14T02:49:46.000Z | 2021-09-14T02:49:46.000Z | aleph/views/entitysets_api.py | houston1784/aleph | a73543c2d4334c9476042c49d7147f70c2be5f4e | [
"MIT"
] | 319 | 2021-04-30T01:09:57.000Z | 2022-03-30T01:15:19.000Z | aleph/views/entitysets_api.py | houston1784/aleph | a73543c2d4334c9476042c49d7147f70c2be5f4e | [
"MIT"
] | null | null | null | import logging
from banal import ensure_list
from flask_babel import gettext
from flask import Blueprint, request, redirect
from werkzeug.exceptions import NotFound, BadRequest
from aleph.core import db, url_for
from aleph.model import EntitySet, Judgement
from aleph.model.common import make_textid
from aleph.logic.entitysets import create_entityset, refresh_entityset
from aleph.logic.entitysets import save_entityset_item
from aleph.logic.diagrams import publish_diagram
from aleph.logic.entities import upsert_entity, validate_entity, check_write_entity
from aleph.queues import queue_task, OP_UPDATE_ENTITY
from aleph.search import EntitySetItemsQuery, SearchQueryParser
from aleph.search import QueryParser, DatabaseQueryResult
from aleph.views.context import tag_request
from aleph.views.entities_api import view as entity_view
from aleph.views.serializers import EntitySerializer, EntitySetSerializer
from aleph.views.serializers import EntitySetItemSerializer
from aleph.views.util import jsonify, get_flag, get_session_id
from aleph.views.util import get_nested_collection, get_index_entity, get_entityset
from aleph.views.util import parse_request, get_db_collection
blueprint = Blueprint("entitysets_api", __name__)
log = logging.getLogger(__name__)
@blueprint.route("/api/2/entitysets", methods=["GET"])
def index():
"""Returns a list of entitysets for the role
---
get:
summary: List entitysets
parameters:
- description: The collection ID.
in: query
name: 'filter:collection_id'
required: true
schema:
minimum: 1
type: integer
- description: The type of the entity set
in: query
name: 'filter:type'
required: false
schema:
type: string
- description: Quert string for searches
in: query
name: 'prefix'
required: false
schema:
type: string
responses:
'200':
content:
application/json:
schema:
type: object
allOf:
- $ref: '#/components/schemas/QueryResponse'
properties:
results:
type: array
items:
$ref: '#/components/schemas/EntitySet'
description: OK
tags:
- EntitySet
"""
parser = QueryParser(request.args, request.authz)
types = parser.filters.get("type")
q = EntitySet.by_authz(request.authz, types=types, prefix=parser.prefix)
q = q.order_by(EntitySet.updated_at.desc())
collection_ids = ensure_list(parser.filters.get("collection_id"))
if len(collection_ids):
q = q.filter(EntitySet.collection_id.in_(collection_ids))
result = DatabaseQueryResult(request, q, parser=parser)
return EntitySetSerializer.jsonify_result(result)
@blueprint.route("/api/2/entitysets", methods=["POST", "PUT"])
def create():
"""Create an entityset.
---
post:
summary: Create an entityset
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySetCreate'
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySet'
description: OK
tags:
- EntitySet
"""
data = parse_request("EntitySetCreate")
collection = get_nested_collection(data, request.authz.WRITE)
entityset = create_entityset(collection, data, request.authz)
db.session.commit()
return EntitySetSerializer.jsonify(entityset)
@blueprint.route("/api/2/entitysets/<entityset_id>", methods=["GET"])
def view(entityset_id):
"""Return the entityset with ID `entityset_id`.
---
get:
summary: Fetch an entityset
parameters:
- description: The entityset id.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySet'
description: OK
tags:
- EntitySet
"""
entityset = get_entityset(entityset_id, request.authz.READ)
if entityset.type == EntitySet.PROFILE:
return redirect(url_for("profile_api.view", profile_id=entityset_id))
data = entityset.to_dict()
data["shallow"] = False
return EntitySetSerializer.jsonify(data)
@blueprint.route("/api/2/entitysets/<entityset_id>", methods=["POST", "PUT"])
def update(entityset_id):
"""Update the entityset with ID `entityset_id`.
---
post:
summary: Update an entityset
parameters:
- description: The entityset ID.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySetUpdate'
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySet'
description: OK
tags:
- EntitySet
"""
entityset = get_entityset(entityset_id, request.authz.WRITE)
data = parse_request("EntitySetUpdate")
entityset.update(data)
db.session.commit()
refresh_entityset(entityset_id)
return view(entityset_id)
@blueprint.route("/api/2/entitysets/<entityset_id>/embed", methods=["POST"])
def embed(entityset_id):
"""Return an embedded network diagram for the entityset with ID `entityset_id`.
---
post:
summary: Create an embedded network diagram
parameters:
- description: The entityset id.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
responses:
'200':
content:
application/json:
schema:
type: object
properties:
embed:
type: string
description: HTML fragment to be embedded.
url:
type: string
format: url
description: Published version of the embedded file.
description: OK
tags:
- EntitySet
"""
entityset = get_entityset(entityset_id, request.authz.WRITE)
if entityset.type != EntitySet.DIAGRAM:
raise BadRequest(gettext("Only diagrams can be embedded!"))
data = publish_diagram(entityset)
return jsonify(data)
@blueprint.route("/api/2/entitysets/<entityset_id>", methods=["DELETE"])
def delete(entityset_id):
"""Delete an entity set.
---
delete:
summary: Delete an entity set
parameters:
- description: The entity set ID.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
responses:
'204':
description: No Content
tags:
- EntitySet
"""
entityset = get_entityset(entityset_id, request.authz.WRITE)
entityset.delete()
db.session.commit()
refresh_entityset(entityset_id)
return ("", 204)
@blueprint.route("/api/2/entitysets/<entityset_id>/entities", methods=["GET"])
def entities_index(entityset_id):
"""Search entities in the entity set with id `entityset_id`.
---
get:
summary: Search entities in the entity set with id `entityset_id`
description: >
Supports all query filters and arguments present in the normal
entity search API, but all resulting entities will be members of
the set.
parameters:
- description: The entityset id.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
responses:
'200':
description: Resturns a list of entities in result
content:
application/json:
schema:
$ref: '#/components/schemas/EntitiesResponse'
tags:
- EntitySet
"""
entityset = get_entityset(entityset_id, request.authz.READ)
parser = SearchQueryParser(request.args, request.authz)
tag_request(query=parser.text, prefix=parser.prefix)
result = EntitySetItemsQuery.handle(request, parser=parser, entityset=entityset)
return EntitySerializer.jsonify_result(result)
@blueprint.route("/api/2/entitysets/<entityset_id>/entities", methods=["POST", "PUT"])
def entities_update(entityset_id):
"""
---
post:
summary: Update an entity and add it to the entity set.
description: >
Update the entity with id `entity_id`. If it does not exist it will be
created. If the user cannot edit the given entity, it is merely added
to the entity set. New entities are always created in the collection of
the entity set.
Aside from these idiosyncracies, this is the same as `/api/2/entities/<id>`,
but handles entity set membership transparently.
parameters:
- description: The entityset id.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
- in: query
name: sign
description: Sign entity IDs referenced in nested properties.
required: false
schema:
type: boolean
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/EntityUpdate'
responses:
'200':
description: OK
content:
application/json:
schema:
$ref: '#/components/schemas/Entity'
tags:
- Entity
"""
entityset = get_entityset(entityset_id, request.authz.WRITE)
data = parse_request("EntityUpdate")
entity_id = data.get("id", make_textid())
try:
entity = get_index_entity(entity_id, request.authz.READ)
collection = get_db_collection(entity.get("collection_id"), request.authz.READ)
except NotFound:
entity = None
collection = entityset.collection
tag_request(collection_id=entityset.collection_id)
if entity is None or check_write_entity(entity, request.authz):
if get_flag("validate", default=False):
validate_entity(data)
entity_id = upsert_entity(
data,
collection,
authz=request.authz,
sync=get_flag("sync", default=True),
sign=get_flag("sign", default=False),
job_id=get_session_id(),
)
save_entityset_item(
entityset,
collection,
entity_id,
added_by_id=request.authz.id,
)
db.session.commit()
return entity_view(entity_id)
@blueprint.route("/api/2/entitysets/<entityset_id>/items", methods=["GET"])
def item_index(entityset_id):
"""See a list of all items in that are linked to this entity set.
This gives entities that are judged negative and unsure alongside the
positive matches returned by the subling `./entities` API.
---
post:
summary: Get all items in the entity set.
parameters:
- description: The entityset id.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySetItemResponse'
description: OK
tags:
- EntitySetItem
"""
entityset = get_entityset(entityset_id, request.authz.READ)
result = DatabaseQueryResult(request, entityset.items(request.authz))
# The entityset is needed to check if the item is writeable in the serializer:
result.results = [i.to_dict(entityset=entityset) for i in result.results]
return EntitySetItemSerializer.jsonify_result(result)
@blueprint.route("/api/2/entitysets/<entityset_id>/items", methods=["POST", "PUT"])
def item_update(entityset_id):
"""Add an item to the entity set with id `entityset_id`, or change
the items judgement.
To delete an item from the entity set, apply the judgement: `no_judgement`.
---
post:
summary: Add item to an entityset
parameters:
- description: The entityset id.
in: path
name: entityset_id
required: true
schema:
type: string
example: 3a0d91ece2dce88ad3259594c7b642485235a048
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySetItemUpdate'
responses:
'200':
content:
application/json:
schema:
$ref: '#/components/schemas/EntitySetItem'
description: OK
'204':
description: Item removed
tags:
- EntitySetItem
"""
entityset = get_entityset(entityset_id, request.authz.WRITE)
data = parse_request("EntitySetItemUpdate")
entity = data.pop("entity", {})
entity_id = data.pop("entity_id", entity.get("id"))
entity = get_index_entity(entity_id, request.authz.READ)
collection = get_db_collection(entity["collection_id"])
data["added_by_id"] = request.authz.id
data.pop("collection", None)
item = save_entityset_item(entityset, collection, entity_id, **data)
db.session.commit()
job_id = get_session_id()
queue_task(collection, OP_UPDATE_ENTITY, job_id=job_id, entity_id=entity_id)
if item is not None:
# The entityset is needed to check if the item is writeable in the serializer:
item = item.to_dict(entityset=entityset)
else:
item = {
"id": "$".join((entityset_id, entity_id)),
"entityset_id": entityset_id,
"entityset_collection_id": entityset.collection_id,
"entity_id": entity_id,
"collection_id": entity["collection_id"],
"judgement": Judgement.NO_JUDGEMENT,
}
return EntitySetItemSerializer.jsonify(item)
| 32.366667 | 87 | 0.630965 |
bb95c1bf03d0530f9698fdc69fa1abfe494d274e | 1,534 | py | Python | setup.py | kota7/wordleai-sql | c99ec44d4d9fc5d621764c460d4f23fffbfb919b | [
"MIT"
] | null | null | null | setup.py | kota7/wordleai-sql | c99ec44d4d9fc5d621764c460d4f23fffbfb919b | [
"MIT"
] | 2 | 2022-03-08T22:36:26.000Z | 2022-03-21T23:21:37.000Z | setup.py | kota7/wordleai-sql | c99ec44d4d9fc5d621764c460d4f23fffbfb919b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
readmefile = os.path.join(os.path.dirname(__file__), "README.md")
with open(readmefile) as f:
readme = f.read()
from wordleaisql import __version__
setup(
name='wordleaisql',
version=__version__,
description='Wordle AI with SQL Backend',
long_description=readme,
long_description_content_type="text/markdown",
url='https://github.com/kota7/wordleai-sql',
packages=['wordleaisql'],
install_requires=['tqdm'],
#test_require=[],
package_data={"wordleaisql": ["wordle-judge-all.cpp", "wordle-vocab.txt"]},
entry_points={'console_scripts': ['wordleai-sql=wordleaisql.api:main']},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
#'Programming Language :: Python :: 2.6',
#'Programming Language :: Python :: 2.7',
#'Programming Language :: Python :: 3.3',
#'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
test_suite='tests'
)
| 34.088889 | 79 | 0.627119 |
3360cb8419476e9adce7a1e1c2fdf721b9739b2f | 4,011 | py | Python | crits/services/analysis_result.py | thelandy/crits | e8d72d8e3cb278d6e86215ba2bb567a874c66edd | [
"MIT"
] | 22 | 2015-01-14T19:49:32.000Z | 2022-01-26T12:18:52.000Z | crits/services/analysis_result.py | thelandy/crits | e8d72d8e3cb278d6e86215ba2bb567a874c66edd | [
"MIT"
] | null | null | null | crits/services/analysis_result.py | thelandy/crits | e8d72d8e3cb278d6e86215ba2bb567a874c66edd | [
"MIT"
] | 6 | 2015-01-22T21:25:52.000Z | 2021-04-12T23:24:14.000Z | from django.conf import settings
from mongoengine import Document, StringField, ListField, EmbeddedDocument
from mongoengine import DynamicEmbeddedDocument, DynamicField, UUIDField
from mongoengine import DictField, EmbeddedDocumentField, BooleanField
from crits.core.crits_mongoengine import CritsDocument, CritsSchemaDocument
from crits.core.crits_mongoengine import CritsDocumentFormatter
# Embedded Documents common to most classes
class AnalysisConfig(DynamicEmbeddedDocument, CritsDocumentFormatter):
"""
Embedded Analysis Configuration dictionary.
"""
meta = {}
class EmbeddedAnalysisResultLog(EmbeddedDocument, CritsDocumentFormatter):
"""
Log entry for a service run.
"""
message = StringField()
#TODO: this should be a datetime object
datetime = StringField()
level = StringField()
class AnalysisResult(CritsDocument, CritsSchemaDocument, CritsDocumentFormatter,
Document):
"""
Analysis Result from running an analytic service.
"""
meta = {
"crits_type": "AnalysisResult",
"collection": settings.COL_ANALYSIS_RESULTS,
"latest_schema_version": 1,
"schema_doc": {
'analyst': 'Analyst who ran the service.',
'analysis_id': 'Unique ID for this service execution.',
'analysis_type': 'Type of analysis this is.',
'config': 'Configuration options used for this execution.',
'distributed': 'Distributed for this execution.',
'finish_date': 'Date execution finished.',
'log': 'Log entries for this execution.',
'object_type': 'Type of TLO this is for.',
'object_id': 'ObjectId of the TLO.',
'results': 'Analysis results.',
'service_name': 'Name of the service.',
'source': 'Source of the service.',
'start_date': 'Date execution started.',
'status': 'Status of the execution.',
'template': 'Custom template to render results.',
'version': 'Version of the service used.',
},
"jtable_opts": {
'details_url': 'crits.services.views.analysis_result',
'details_url_key': 'id',
'default_sort': "start_date DESC",
'searchurl': 'crits.services.views.analysis_results_listing',
'fields': [ "object_type", "service_name", "version",
"start_date", "finish_date", "results",
"object_id", "id"],
'jtopts_fields': [ "details",
"object_type",
"service_name",
"version",
"start_date",
"finish_date",
"results",
"id"],
'hidden_fields': ["object_id", "id"],
'linked_fields': [ "object_type", "service_name" ],
'details_link': 'details',
'no_sort': ['details']
}
}
#TODO: these should be datetime objects, not strings
analyst = StringField()
analysis_id = UUIDField(binary=False)
analysis_type = StringField(db_field="type")
config = EmbeddedDocumentField(AnalysisConfig)
distributed = BooleanField()
finish_date = StringField()
log = ListField(EmbeddedDocumentField(EmbeddedAnalysisResultLog))
object_type = StringField(required=True)
object_id = StringField(required=True)
results = ListField(DynamicField(DictField))
service_name = StringField()
source = StringField()
start_date = StringField()
status = StringField()
template = StringField()
version = StringField()
| 40.928571 | 86 | 0.566442 |
48cdee0e6669124e2604444a05adf9a5a7c6e61d | 2,231 | py | Python | 3d/dambreak/dissipation_p.py | AntoninoScala/air-water-vv | 06b6579cc308795dc56acfd30873747f74231589 | [
"MIT"
] | 5 | 2017-11-15T16:18:47.000Z | 2021-07-18T02:32:48.000Z | 3d/dambreak/dissipation_p.py | AntoninoScala/air-water-vv | 06b6579cc308795dc56acfd30873747f74231589 | [
"MIT"
] | 41 | 2015-06-03T17:53:17.000Z | 2017-07-26T09:15:07.000Z | 3d/dambreak/dissipation_p.py | AntoninoScala/air-water-vv | 06b6579cc308795dc56acfd30873747f74231589 | [
"MIT"
] | 21 | 2017-10-27T14:37:54.000Z | 2020-02-10T10:31:36.000Z | from proteus import *
from proteus.default_p import *
from dambreak import *
from proteus.mprans import Dissipation
LevelModelType = Dissipation.LevelModel
if useOnlyVF:
RD_model = None
LS_model = None
dissipation_model = 3
ME_model = 2
else:
RD_model = 3
LS_model = 2
ME_model = 6
kappa_model = 5
if movingDomain:
kappa_model += 1
ME_model += 1
#
dissipation_model_flag = 1
if useRANS == 2:
dissipation_model_flag=2
elif useRANS == 3:
dissipation_model_flag=3
coefficients = Dissipation.Coefficients(V_model=0,ME_model=ME_model,LS_model=LS_model,RD_model=RD_model,kappa_model=kappa_model,
dissipation_model_flag=dissipation_model_flag,#1 -- K-epsilon, 2 -- K-omega 1998, 3 -- K-omega 1988
useMetrics=useMetrics,
rho_0=rho_0,nu_0=nu_0,
rho_1=rho_1,nu_1=nu_1,
g=g,
c_mu=0.09,sigma_e=1.0,
sc_uref=dissipation_sc_uref,
sc_beta=dissipation_sc_beta)
dissipationInflow = coefficients.c_mu*kInflow**(1.5)/(0.03*L[1])
if useRANS >= 2:
dissipationInflow = dissipationInflow/(kInflow+1.0e-12)
def getDBC_dissipation(x,flag):
if flag == boundaryTags['left']:
return lambda x,t:dissipationInflow
if flag == boundaryTags['right']:
return lambda x,t:0.0
dirichletConditions = {0:getDBC_dissipation}
#fluxBoundaryConditions = {0:'outFlow'}
def getAFBC_dissipation(x,flag):
if flag == boundaryTags['right']:
return None
if flag != boundaryTags['left']:
return lambda x,t: 0.0
def getDFBC_dissipation(x,flag):
if flag == boundaryTags['right']:
return lambda x,t: 0.0
if flag != boundaryTags['left']:
return lambda x,t: 0.0
advectiveFluxBoundaryConditions = {0:getAFBC_dissipation}
diffusiveFluxBoundaryConditions = {0:{0:getDFBC_dissipation}}
class ConstantIC:
def __init__(self,cval=0.0):
self.cval=cval
def uOfXT(self,x,t):
return self.cval
initialConditions = {0:ConstantIC(cval=dissipationInflow*0.001)}
| 30.561644 | 133 | 0.626625 |
415a5cc7a1806808ea28f0beca4c61c0f0f2edb8 | 20,006 | py | Python | glib-2.64.0/gio/gdbus-2.0/codegen/dbustypes.py | BeanGreen247/surf | a56d1b0ceb43c578c6a3258e4ffb2903dc691fe2 | [
"MIT"
] | null | null | null | glib-2.64.0/gio/gdbus-2.0/codegen/dbustypes.py | BeanGreen247/surf | a56d1b0ceb43c578c6a3258e4ffb2903dc691fe2 | [
"MIT"
] | null | null | null | glib-2.64.0/gio/gdbus-2.0/codegen/dbustypes.py | BeanGreen247/surf | a56d1b0ceb43c578c6a3258e4ffb2903dc691fe2 | [
"MIT"
] | null | null | null | # -*- Mode: Python -*-
# GDBus - GLib D-Bus Library
#
# Copyright (C) 2008-2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
# Author: David Zeuthen <davidz@redhat.com>
from . import utils
from .utils import print_error
class Annotation:
def __init__(self, key, value):
self.key = key
self.value = value
self.annotations = []
self.since = ''
def post_process(self, interface_prefix, cns, cns_upper, cns_lower, container):
key = self.key
overridden_key = utils.lookup_annotation(self.annotations, 'org.gtk.GDBus.C.Name')
if utils.is_ugly_case(overridden_key):
self.key_lower = overridden_key.lower()
else:
if overridden_key:
key = overridden_key
self.key_lower = utils.camel_case_to_uscore(key).lower().replace('-', '_').replace('.', '_')
if len(self.since) == 0:
self.since = utils.lookup_since(self.annotations)
if len(self.since) == 0:
self.since = container.since
for a in self.annotations:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
class Arg:
def __init__(self, name, signature):
self.name = name
self.signature = signature
self.annotations = []
self.doc_string = ''
self.since = ''
def post_process(self, interface_prefix, cns, cns_upper, cns_lower, arg_number):
if len(self.doc_string) == 0:
self.doc_string = utils.lookup_docs(self.annotations)
if len(self.since) == 0:
self.since = utils.lookup_since(self.annotations)
if self.name is None:
self.name = 'unnamed_arg%d'%arg_number
# default to GVariant
self.ctype_in_g = 'GVariant *'
self.ctype_in = 'GVariant *'
self.ctype_in_dup = 'GVariant *'
self.ctype_out = 'GVariant **'
self.gtype = 'G_TYPE_VARIANT'
self.free_func = 'g_variant_unref'
self.format_in = '@' + self.signature
self.format_out = '@' + self.signature
self.gvariant_get = 'XXX'
self.gvalue_get = 'g_value_get_variant'
self.array_annotation = ''
if not utils.lookup_annotation(self.annotations, 'org.gtk.GDBus.C.ForceGVariant'):
if self.signature == 'b':
self.ctype_in_g = 'gboolean '
self.ctype_in = 'gboolean '
self.ctype_out = 'gboolean *'
self.gtype = 'G_TYPE_BOOLEAN'
self.free_func = None
self.format_in = 'b'
self.format_out = 'b'
self.gvariant_get = 'g_variant_get_boolean'
self.gvalue_get = 'g_value_get_boolean'
elif self.signature == 'y':
self.ctype_in_g = 'guchar '
self.ctype_in = 'guchar '
self.ctype_out = 'guchar *'
self.gtype = 'G_TYPE_UCHAR'
self.free_func = None
self.format_in = 'y'
self.format_out = 'y'
self.gvariant_get = 'g_variant_get_byte'
self.gvalue_get = 'g_value_get_uchar'
elif self.signature == 'n':
self.ctype_in_g = 'gint '
self.ctype_in = 'gint16 '
self.ctype_out = 'gint16 *'
self.gtype = 'G_TYPE_INT'
self.free_func = None
self.format_in = 'n'
self.format_out = 'n'
self.gvariant_get = 'g_variant_get_int16'
self.gvalue_get = 'g_value_get_int'
elif self.signature == 'q':
self.ctype_in_g = 'guint '
self.ctype_in = 'guint16 '
self.ctype_out = 'guint16 *'
self.gtype = 'G_TYPE_UINT'
self.free_func = None
self.format_in = 'q'
self.format_out = 'q'
self.gvariant_get = 'g_variant_get_uint16'
self.gvalue_get = 'g_value_get_uint'
elif self.signature == 'i':
self.ctype_in_g = 'gint '
self.ctype_in = 'gint '
self.ctype_out = 'gint *'
self.gtype = 'G_TYPE_INT'
self.free_func = None
self.format_in = 'i'
self.format_out = 'i'
self.gvariant_get = 'g_variant_get_int32'
self.gvalue_get = 'g_value_get_int'
elif self.signature == 'u':
self.ctype_in_g = 'guint '
self.ctype_in = 'guint '
self.ctype_out = 'guint *'
self.gtype = 'G_TYPE_UINT'
self.free_func = None
self.format_in = 'u'
self.format_out = 'u'
self.gvariant_get = 'g_variant_get_uint32'
self.gvalue_get = 'g_value_get_uint'
elif self.signature == 'x':
self.ctype_in_g = 'gint64 '
self.ctype_in = 'gint64 '
self.ctype_out = 'gint64 *'
self.gtype = 'G_TYPE_INT64'
self.free_func = None
self.format_in = 'x'
self.format_out = 'x'
self.gvariant_get = 'g_variant_get_int64'
self.gvalue_get = 'g_value_get_int64'
elif self.signature == 't':
self.ctype_in_g = 'guint64 '
self.ctype_in = 'guint64 '
self.ctype_out = 'guint64 *'
self.gtype = 'G_TYPE_UINT64'
self.free_func = None
self.format_in = 't'
self.format_out = 't'
self.gvariant_get = 'g_variant_get_uint64'
self.gvalue_get = 'g_value_get_uint64'
elif self.signature == 'd':
self.ctype_in_g = 'gdouble '
self.ctype_in = 'gdouble '
self.ctype_out = 'gdouble *'
self.gtype = 'G_TYPE_DOUBLE'
self.free_func = None
self.format_in = 'd'
self.format_out = 'd'
self.gvariant_get = 'g_variant_get_double'
self.gvalue_get = 'g_value_get_double'
elif self.signature == 's':
self.ctype_in_g = 'const gchar *'
self.ctype_in = 'const gchar *'
self.ctype_in_dup = 'gchar *'
self.ctype_out = 'gchar **'
self.gtype = 'G_TYPE_STRING'
self.free_func = 'g_free'
self.format_in = 's'
self.format_out = 's'
self.gvariant_get = 'g_variant_get_string'
self.gvalue_get = 'g_value_get_string'
elif self.signature == 'o':
self.ctype_in_g = 'const gchar *'
self.ctype_in = 'const gchar *'
self.ctype_in_dup = 'gchar *'
self.ctype_out = 'gchar **'
self.gtype = 'G_TYPE_STRING'
self.free_func = 'g_free'
self.format_in = 'o'
self.format_out = 'o'
self.gvariant_get = 'g_variant_get_string'
self.gvalue_get = 'g_value_get_string'
elif self.signature == 'g':
self.ctype_in_g = 'const gchar *'
self.ctype_in = 'const gchar *'
self.ctype_in_dup = 'gchar *'
self.ctype_out = 'gchar **'
self.gtype = 'G_TYPE_STRING'
self.free_func = 'g_free'
self.format_in = 'g'
self.format_out = 'g'
self.gvariant_get = 'g_variant_get_string'
self.gvalue_get = 'g_value_get_string'
elif self.signature == 'ay':
self.ctype_in_g = 'const gchar *'
self.ctype_in = 'const gchar *'
self.ctype_in_dup = 'gchar *'
self.ctype_out = 'gchar **'
self.gtype = 'G_TYPE_STRING'
self.free_func = 'g_free'
self.format_in = '^ay'
self.format_out = '^ay'
self.gvariant_get = 'g_variant_get_bytestring'
self.gvalue_get = 'g_value_get_string'
elif self.signature == 'as':
self.ctype_in_g = 'const gchar *const *'
self.ctype_in = 'const gchar *const *'
self.ctype_in_dup = 'gchar **'
self.ctype_out = 'gchar ***'
self.gtype = 'G_TYPE_STRV'
self.free_func = 'g_strfreev'
self.format_in = '^as'
self.format_out = '^as'
self.gvariant_get = 'g_variant_get_strv'
self.gvalue_get = 'g_value_get_boxed'
self.array_annotation = '(array zero-terminated=1)'
elif self.signature == 'ao':
self.ctype_in_g = 'const gchar *const *'
self.ctype_in = 'const gchar *const *'
self.ctype_in_dup = 'gchar **'
self.ctype_out = 'gchar ***'
self.gtype = 'G_TYPE_STRV'
self.free_func = 'g_strfreev'
self.format_in = '^ao'
self.format_out = '^ao'
self.gvariant_get = 'g_variant_get_objv'
self.gvalue_get = 'g_value_get_boxed'
self.array_annotation = '(array zero-terminated=1)'
elif self.signature == 'aay':
self.ctype_in_g = 'const gchar *const *'
self.ctype_in = 'const gchar *const *'
self.ctype_in_dup = 'gchar **'
self.ctype_out = 'gchar ***'
self.gtype = 'G_TYPE_STRV'
self.free_func = 'g_strfreev'
self.format_in = '^aay'
self.format_out = '^aay'
self.gvariant_get = 'g_variant_get_bytestring_array'
self.gvalue_get = 'g_value_get_boxed'
self.array_annotation = '(array zero-terminated=1)'
for a in self.annotations:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
class Method:
def __init__(self, name, h_type_implies_unix_fd=True):
self.name = name
self.h_type_implies_unix_fd = h_type_implies_unix_fd
self.in_args = []
self.out_args = []
self.annotations = []
self.doc_string = ''
self.since = ''
self.deprecated = False
self.unix_fd = False
def post_process(self, interface_prefix, cns, cns_upper, cns_lower, containing_iface):
if len(self.doc_string) == 0:
self.doc_string = utils.lookup_docs(self.annotations)
if len(self.since) == 0:
self.since = utils.lookup_since(self.annotations)
if len(self.since) == 0:
self.since = containing_iface.since
name = self.name
overridden_name = utils.lookup_annotation(self.annotations, 'org.gtk.GDBus.C.Name')
if utils.is_ugly_case(overridden_name):
self.name_lower = overridden_name.lower()
else:
if overridden_name:
name = overridden_name
self.name_lower = utils.camel_case_to_uscore(name).lower().replace('-', '_')
self.name_hyphen = self.name_lower.replace('_', '-')
arg_count = 0
for a in self.in_args:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, arg_count)
arg_count += 1
if self.h_type_implies_unix_fd and 'h' in a.signature:
self.unix_fd = True
for a in self.out_args:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, arg_count)
arg_count += 1
if self.h_type_implies_unix_fd and 'h' in a.signature:
self.unix_fd = True
if utils.lookup_annotation(self.annotations, 'org.freedesktop.DBus.Deprecated') == 'true':
self.deprecated = True
if utils.lookup_annotation(self.annotations, 'org.gtk.GDBus.C.UnixFD'):
self.unix_fd = True
for a in self.annotations:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
class Signal:
def __init__(self, name):
self.name = name
self.args = []
self.annotations = []
self.doc_string = ''
self.since = ''
self.deprecated = False
def post_process(self, interface_prefix, cns, cns_upper, cns_lower, containing_iface):
if len(self.doc_string) == 0:
self.doc_string = utils.lookup_docs(self.annotations)
if len(self.since) == 0:
self.since = utils.lookup_since(self.annotations)
if len(self.since) == 0:
self.since = containing_iface.since
name = self.name
overridden_name = utils.lookup_annotation(self.annotations, 'org.gtk.GDBus.C.Name')
if utils.is_ugly_case(overridden_name):
self.name_lower = overridden_name.lower()
else:
if overridden_name:
name = overridden_name
self.name_lower = utils.camel_case_to_uscore(name).lower().replace('-', '_')
self.name_hyphen = self.name_lower.replace('_', '-')
arg_count = 0
for a in self.args:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, arg_count)
arg_count += 1
if utils.lookup_annotation(self.annotations, 'org.freedesktop.DBus.Deprecated') == 'true':
self.deprecated = True
for a in self.annotations:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
class Property:
def __init__(self, name, signature, access):
self.name = name
self.signature = signature
self.access = access
self.annotations = []
self.arg = Arg('value', self.signature)
self.arg.annotations = self.annotations
self.readable = False
self.writable = False
if self.access == 'readwrite':
self.readable = True
self.writable = True
elif self.access == 'read':
self.readable = True
elif self.access == 'write':
self.writable = True
else:
print_error('Invalid access type "{}"'.format(self.access))
self.doc_string = ''
self.since = ''
self.deprecated = False
self.emits_changed_signal = True
def post_process(self, interface_prefix, cns, cns_upper, cns_lower, containing_iface):
if len(self.doc_string) == 0:
self.doc_string = utils.lookup_docs(self.annotations)
if len(self.since) == 0:
self.since = utils.lookup_since(self.annotations)
if len(self.since) == 0:
self.since = containing_iface.since
name = self.name
overridden_name = utils.lookup_annotation(self.annotations, 'org.gtk.GDBus.C.Name')
if utils.is_ugly_case(overridden_name):
self.name_lower = overridden_name.lower()
else:
if overridden_name:
name = overridden_name
self.name_lower = utils.camel_case_to_uscore(name).lower().replace('-', '_')
self.name_hyphen = self.name_lower.replace('_', '-')
# don't clash with the GType getter, e.g.: GType foo_bar_get_type (void); G_GNUC_CONST
if self.name_lower == 'type':
self.name_lower = 'type_'
# recalculate arg
self.arg.annotations = self.annotations
self.arg.post_process(interface_prefix, cns, cns_upper, cns_lower, 0)
if utils.lookup_annotation(self.annotations, 'org.freedesktop.DBus.Deprecated') == 'true':
self.deprecated = True
for a in self.annotations:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
# FIXME: for now we only support 'false' and 'const' on the signal itself, see #674913 and
# http://dbus.freedesktop.org/doc/dbus-specification.html#introspection-format
# for details
if utils.lookup_annotation(self.annotations, 'org.freedesktop.DBus.Property.EmitsChangedSignal') in ('false', 'const'):
self.emits_changed_signal = False
class Interface:
def __init__(self, name):
self.name = name
self.methods = []
self.signals = []
self.properties = []
self.annotations = []
self.doc_string = ''
self.doc_string_brief = ''
self.since = ''
self.deprecated = False
def post_process(self, interface_prefix, c_namespace):
if len(self.doc_string) == 0:
self.doc_string = utils.lookup_docs(self.annotations)
if len(self.doc_string_brief) == 0:
self.doc_string_brief = utils.lookup_brief_docs(self.annotations)
if len(self.since) == 0:
self.since = utils.lookup_since(self.annotations)
if len(c_namespace) > 0:
if utils.is_ugly_case(c_namespace):
cns = c_namespace.replace('_', '')
cns_upper = c_namespace.upper() + '_'
cns_lower = c_namespace.lower() + '_'
else:
cns = c_namespace
cns_upper = utils.camel_case_to_uscore(c_namespace).upper() + '_'
cns_lower = utils.camel_case_to_uscore(c_namespace).lower() + '_'
else:
cns = ''
cns_upper = ''
cns_lower = ''
overridden_name = utils.lookup_annotation(self.annotations, 'org.gtk.GDBus.C.Name')
if utils.is_ugly_case(overridden_name):
name = overridden_name.replace('_', '')
name_with_ns = cns + name
self.name_without_prefix = name
self.camel_name = name_with_ns
self.ns_upper = cns_upper
self.name_lower = cns_lower + overridden_name.lower()
self.name_upper = overridden_name.upper()
#print_error('handle Ugly_Case "{}"'.format(overridden_name))
else:
if overridden_name:
name = overridden_name
else:
name = self.name
if name.startswith(interface_prefix):
name = name[len(interface_prefix):]
self.name_without_prefix = name
name = utils.strip_dots(name)
name_with_ns = utils.strip_dots(cns + '.' + name)
self.camel_name = name_with_ns
self.ns_upper = cns_upper
self.name_lower = cns_lower + utils.camel_case_to_uscore(name)
self.name_upper = utils.camel_case_to_uscore(name).upper()
self.name_hyphen = self.name_upper.lower().replace('_', '-')
if utils.lookup_annotation(self.annotations, 'org.freedesktop.DBus.Deprecated') == 'true':
self.deprecated = True
for m in self.methods:
m.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
for s in self.signals:
s.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
for p in self.properties:
p.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
for a in self.annotations:
a.post_process(interface_prefix, cns, cns_upper, cns_lower, self)
| 41.592516 | 127 | 0.561881 |
24a78472538332cf4aad227ed722b3a37f8e4f8b | 2,491 | py | Python | pptree/utils.py | srianbury/pptree | a0c00f5b14c10d2b174d2acd9542c4955832ce3a | [
"MIT"
] | 119 | 2016-12-07T12:38:08.000Z | 2022-03-31T11:40:36.000Z | pptree/utils.py | srianbury/pptree | a0c00f5b14c10d2b174d2acd9542c4955832ce3a | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/pptree/utils.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 26 | 2017-03-08T22:56:01.000Z | 2022-01-10T17:35:49.000Z | # -*- coding: utf-8 -*-
"""
Ettore Forigo (c) 2020
"""
from itertools import chain, zip_longest, repeat
JOINER_WIDTH = 3
DEFAULT_JOINER = ' ' * JOINER_WIDTH
CONNECTION_JOINER = '─' * JOINER_WIDTH
L_BRANCH_CONNECTOR = '─┘ '
LR_BRANCH_CONNECTOR = '─┴─'
R_BRANCH_CONNECTOR = ' └─'
L_NODE_CONNECTOR = '─┐ '
LR_NODE_CONNECTOR = '─┬─'
R_NODE_CONNECTOR= ' ┌─'
def multijoin(blocks, joiners=()):
f"""
Take one block (list of strings) or more and join them line by line with the specified joiners
:param blocks: [['a', ...], ['b', ...], ...]
:param joiners: ['─', ...]
:return: ['a─b', ...]
"""
# find maximum content width for each block
block_content_width = tuple(max(map(len, block), default=0) for block in blocks)
return tuple(
joiner.join(
(string or '') # string if present (see fillvalue below)
.center(block_content_length) # normalize content width across block
for string, block_content_length in zip(block, block_content_width)
)
for block, joiner in zip(zip_longest(*blocks, fillvalue=None),
chain(joiners, repeat(DEFAULT_JOINER))) # joiners or default
)
def wire(block, connector):
left_c = ' ' if connector == R_NODE_CONNECTOR else '─'
right_c = ' ' if connector == L_NODE_CONNECTOR else '─'
block, (left, right) = block
if not (left or right):
length = len(block[0]) # len of first line
length -= 1 # ignore connector
left = length // 2
right = length - left
return multijoin([[
f'{left_c * left}{connector}{right_c * right}',
*block
]])
def branch(blocks):
wired_blocks = tuple(map(lambda blk: wire(blk, LR_NODE_CONNECTOR), blocks))
return multijoin(wired_blocks, (CONNECTION_JOINER,))
def branch_left(blocks):
last, *rest = blocks
last = wire(last, R_NODE_CONNECTOR)
rest = branch(rest)
return multijoin([last, rest], (CONNECTION_JOINER,))
def branch_right(blocks):
*rest, last = blocks
rest = branch(rest)
last = wire(last, L_NODE_CONNECTOR)
return multijoin([rest, last], (CONNECTION_JOINER,))
def connect_branches(left, right):
joiner = (LR_BRANCH_CONNECTOR if right else L_BRANCH_CONNECTOR) if left else R_BRANCH_CONNECTOR
return multijoin([left, right], (joiner,))
def blocklen(block):
if block:
return len(block[0])
else:
return 0
| 23.951923 | 99 | 0.620233 |
bf5c55267bb4400544370d6da1b17540d00a38b0 | 1,176 | py | Python | setup.py | redotics/dataplug | 47d7e54472aa84b3655ab1f29cdf6ad56672123a | [
"MIT"
] | 1 | 2021-02-25T19:52:41.000Z | 2021-02-25T19:52:41.000Z | setup.py | redotics/dataplug | 47d7e54472aa84b3655ab1f29cdf6ad56672123a | [
"MIT"
] | null | null | null | setup.py | redotics/dataplug | 47d7e54472aa84b3655ab1f29cdf6ad56672123a | [
"MIT"
] | null | null | null | """ dataplug setuptools-based setup.
Created thanks to:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
version="2.1.0",
name="dataplug",
description="Schemaless, NoSQL, multi-model data interactions on top ArangoDB",
long_description=long_description,
url="https://github.com/redsharpbyte/dataplug",
license="MIT",
author="Red Boumghar",
install_requires=["python-arango"],
python_requires='>=3',
extras_require={
"test": ["pytest"]
},
packages=find_packages(exclude=["tests", "docs", "backends"]),
keywords="schemaless, no-sql, multi-model, data, graph, databasea",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License"
],
)
| 29.4 | 83 | 0.677721 |
a09e1e326cc63beb6fd10ccaffb4970ec4cf61e1 | 1,038 | py | Python | bin/bx_summary.py | fargenfo/fargen-ngs | 95eee917c5ed48a4be581978481bb3dbbb906f7f | [
"MIT"
] | null | null | null | bin/bx_summary.py | fargenfo/fargen-ngs | 95eee917c5ed48a4be581978481bb3dbbb906f7f | [
"MIT"
] | null | null | null | bin/bx_summary.py | fargenfo/fargen-ngs | 95eee917c5ed48a4be581978481bb3dbbb906f7f | [
"MIT"
] | 1 | 2020-10-06T09:46:35.000Z | 2020-10-06T09:46:35.000Z | #!/usr/bin/env python3
'''
Summarize output from `bxtools stats` (https://github.com/walaj/bxtools).
'''
import sys
import numpy as np
csv = sys.argv[1]
bx_counts = []
with open(csv) as fid:
# Discard the header.
temp = fid.readline()
for line in fid:
# Split the tab separated line into fields, and get the count (the second field).
count = line.split('\t')[1]
# Convert it to an integer and append to list.
bx_counts.append(int(count))
# Convert to a numpy array.
bx_counts = np.array(bx_counts)
# Number of unique barcodes.
n_bx = len(bx_counts)
# Maximum, minimum, median and mean of barcode count.
n_max = max(bx_counts)
n_min = min(bx_counts)
median = np.median(bx_counts)
mean = np.mean(bx_counts)
# Number of barcodes with only one read.
singletons = sum(bx_counts == 1)
print_format = "Unique barcodes: %d\nPer-barcode read counts:\nMax: %d\nMin: %d\nMedian: %d\nMean: %f\nNumber of singletons: %d"
print(print_format %(n_bx, n_max, n_min, median, mean, singletons))
| 25.95 | 128 | 0.683044 |
5895edce03597f857784e27379e6f4ec9daae108 | 3,496 | py | Python | test/test_grammar.py | Bhaskers-Blu-Org1/lale | 6d3a361ccf2377027fba84a15d0ec38ba285ff4c | [
"Apache-2.0"
] | null | null | null | test/test_grammar.py | Bhaskers-Blu-Org1/lale | 6d3a361ccf2377027fba84a15d0ec38ba285ff4c | [
"Apache-2.0"
] | null | null | null | test/test_grammar.py | Bhaskers-Blu-Org1/lale | 6d3a361ccf2377027fba84a15d0ec38ba285ff4c | [
"Apache-2.0"
] | 1 | 2020-07-30T10:06:23.000Z | 2020-07-30T10:06:23.000Z | import unittest
from lale.grammar import Grammar
from lale.operators import make_choice, PlannedOperator, PlannedPipeline, TrainedOperator
from lale import wrap_imported_operators
from lale.lib.sklearn import LogisticRegression as LR
from lale.lib.sklearn import KNeighborsClassifier as KNN
from lale.lib.sklearn import PCA
from lale.lib.sklearn import StandardScaler as Scaler
from lale.lib.sklearn import AdaBoostClassifier as Boost
from lale.lib.lale import ConcatFeatures as Concat
from lale.lib.lale import NoOp
from lale.lib.lale import Hyperopt
import lale.datasets
class TestGrammar(unittest.TestCase):
def setUp(self):
(self.train_X, self.train_y), (self.test_X, self.test_y) = lale.datasets.load_iris_df()
def test_grammar_simple(self):
g = Grammar()
g.start = g.estimator
g.estimator = (NoOp | g.transformer) >> g.prim_est
g.transformer = (NoOp | g.transformer) >> g.prim_tfm
g.prim_est = LR | KNN
g.prim_tfm = PCA | Scaler
generated = g.unfold(6)
sample = g.sample(6)
# unfold and sample return a PlannedOperator
assert isinstance(generated, PlannedOperator)
assert isinstance(sample, PlannedOperator)
# test getter for methods other than Nonterminal
if isinstance(generated, PlannedPipeline):
assert (generated._name.startswith('pipeline'))
try:
gtrainer = Hyperopt(estimator=generated, max_evals=3, scoring='r2')
gtrained = gtrainer.fit(self.train_X, self.train_y)
assert isinstance(gtrained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass
try:
strainer = Hyperopt(estimator=sample, max_evals=3, scoring='r2')
strained = strainer.fit(self.train_X, self.train_y)
assert isinstance(strained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass
def test_grammar_all_combinator(self):
g = Grammar()
g.start = g.estimator
g.estimator = g.term_est | g.transformer >> g.term_est
g.term_est = g.prim_est | g.ensemble
g.ensemble = Boost ( base_estimator = LR )
g.transformer = g.union_tfm | g.union_tfm >> g.transformer
g.union_tfm = g.prim_tfm | g.union_body >> Concat
g.union_body = g.transformer | g.transformer & g.union_body
g.prim_est = LR | KNN
g.prim_tfm = PCA | Scaler
g.ensembler = Boost
generated = g.unfold(7)
sample = g.sample(7)
assert isinstance(generated, PlannedOperator)
assert isinstance(sample, PlannedOperator)
# Train
try:
gtrainer = Hyperopt(estimator=generated, max_evals=3, scoring='r2')
gtrained = gtrainer.fit(self.train_X, self.train_y)
assert isinstance(gtrained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass
try:
strainer = Hyperopt(estimator=sample, max_evals=3, scoring='r2')
strained = strainer.fit(self.train_X, self.train_y)
assert isinstance(strained.get_pipeline(), TrainedOperator)
except ValueError:
# None of the trials succeeded
pass | 38 | 95 | 0.633867 |
035362791c6953f713e69255df3ad691b33f55e6 | 2,953 | py | Python | gpytorch/kernels/cosine_kernel.py | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 | [
"MIT"
] | 2 | 2019-03-31T04:36:30.000Z | 2019-05-22T20:09:25.000Z | gpytorch/kernels/cosine_kernel.py | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 | [
"MIT"
] | null | null | null | gpytorch/kernels/cosine_kernel.py | konstantinklemmer/gpytorch | f1d947b340a188c398b6c6e610b6a839c61aa298 | [
"MIT"
] | 1 | 2019-02-15T17:05:42.000Z | 2019-02-15T17:05:42.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import torch
from .kernel import Kernel
class CosineKernel(Kernel):
r"""
Computes a covariance matrix based on the cosine kernel
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
.. math::
\begin{equation*}
k_{\text{Cosine}}(\mathbf{x_1}, \mathbf{x_2}) = \cos \left(
\pi \Vert \mathbf{x_1} - \mathbf{x_2} \Vert_2 / p \right)
\end{equation*}
where :math:`p` is the periord length parameter.
Args:
:attr:`batch_size` (int, optional):
Set this if you want a separate lengthscale for each
batch of input data. It should be `b` if :attr:`x1` is a `b x n x d` tensor. Default: `1`
:attr:`active_dims` (tuple of ints, optional):
Set this if you want to compute the covariance of only a few input dimensions. The ints
corresponds to the indices of the dimensions. Default: `None`.
:attr:`log_period_length_prior` (Prior, optional):
Set this if you want to apply a prior to the period length parameter. Default: `None`
:attr:`eps` (float):
The minimum value that the lengthscale/period length can take
(prevents divide by zero errors). Default: `1e-6`.
Attributes:
:attr:`period_length` (Tensor):
The period length parameter. Size = `batch_size x 1 x 1`.
Example:
>>> x = torch.randn(10, 5)
>>> # Non-batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.CosineKernel())
>>>
>>> batch_x = torch.randn(2, 10, 5)
>>> # Batch: Simple option
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.CosineKernel())
>>> # Batch: different lengthscale for each batch
>>> covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.CosineKernel(batch_size=2))
>>> covar = covar_module(x) # Output: LazyVariable of size (2 x 10 x 10)
"""
def __init__(self, active_dims=None, batch_size=1, log_period_length_prior=None, eps=1e-6):
super(CosineKernel, self).__init__(has_lengthscale=False, active_dims=active_dims)
self.eps = eps
self.register_parameter(
name="log_period_length",
parameter=torch.nn.Parameter(torch.zeros(batch_size, 1, 1)),
prior=log_period_length_prior,
)
@property
def period_length(self):
return self.log_period_length.exp().clamp(self.eps, 1e5)
def forward(self, x1, x2, **params):
x1_ = x1.div(self.period_length)
x2_ = x2.div(self.period_length)
x1_, x2_ = self._create_input_grid(x1_, x2_, **params)
diff = torch.norm((x1_ - x2_).abs(), 2, -1)
res = torch.cos(diff.mul(math.pi))
return res
| 38.855263 | 101 | 0.635963 |
958272b5914d2f3cdcdea5812001aa2709a8649a | 4,803 | py | Python | hk_ros_2021/scripts/animal_location_test.py | nmaatta/hk_ros_2021 | 3da25e961b5c9bffd5d8af7a9e10b99967c9e78b | [
"MIT"
] | 2 | 2021-02-15T12:36:41.000Z | 2021-02-15T16:52:16.000Z | hk_ros_2021/scripts/animal_location_test.py | nmaatta/hk_ros_2021 | 3da25e961b5c9bffd5d8af7a9e10b99967c9e78b | [
"MIT"
] | null | null | null | hk_ros_2021/scripts/animal_location_test.py | nmaatta/hk_ros_2021 | 3da25e961b5c9bffd5d8af7a9e10b99967c9e78b | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import math
import rospy
import tf
import tf2_ros
from sensor_msgs.msg import LaserScan
from darknet_ros_msgs.msg import BoundingBox #msg that contains bounding box coordinates
from darknet_ros_msgs.msg import BoundingBoxes
from apriltag_ros.msg import animalpixels
from std_msgs.msg import String
#rostopic echo darknet_ros/bounding_boxes...
#std_msgs/Header header
#uint32 seq
#time stamp
#string frame_id
#std_msgs/Header image_header
#uint32 seq
#time stamps
#string frame_id
#darknet_ros_msgs/BoundingBox[] bounding_boxes
#float64 probability
#int64 xmin
#int64 ymin
#int64 xmax
#int64 ymax
#int16 id
#string Class
#rostopic type /scan | rosmsg show
#std_msgs/Header header
#uint32 seq
#time stamp
#string frame_id
#float32 angle_min
#float32 angle_max
#float32 angle_increment
#float32 time_increment
#float32 scan_time
#float32 range_min
#float32 range_max
#float32[] ranges
#float32[] intensities
rospy.init_node('Animalpixelcoords',anonymous = False)
# lidar_angle = None
class lidar:
def __init__(self):
self.lidar_angle = None
self.ranges = []
self.new_value = 0 #Not used
self.animaltag = None
def callback1(self, animalBox): #function for calculating relative
angleScale = 320/31.1#26.75#31.5#26.75
try:
animalType = str(animalBox.bounding_boxes[0].Class)
#CALCULATING
if animalType in ['cat','dog']: #if class is one of these animals
print(animalType)
self.animaltag = str(animalBox.bounding_boxes[0].Class)
x_max = animalBox.bounding_boxes[0].xmax
x_min = animalBox.bounding_boxes[0].xmin
x_position = (x_max + x_min)/2 #calculate the pixel in optical frame [0-64]
x_angle = x_position/angleScale-31.1#26.75#26.75
x_angle = round(x_angle)
if x_angle <0:
self.lidar_angle = int(-x_angle)
else:
self.lidar_angle = int(359-x_angle)
self.new_value = 1
print(self.lidar_angle)
#print(x_angle)
# print(lidar_angle)
# lidarAngleInfo = Coordinates()
#
# lidarAngleInfo.lidarAngle = lidar_angle #might be good for geometric
#
# try:
# pub.publish(lidarAngleInfo) #Publishing coordinates onto the "chatter" topic for the yaml file to read.
#
# except rospy.ROSInterruptException:
# pass
self.get_hit() ##When we have nu value we go into get_git function
return #lidar_angle
else:
return
except (IndexError):
return
#TRANSLATE THIS TO ANGLE AROUND ROBOT
#note: 53.5 degree viewing angle max (-26.75 to 26.75)
#note: LaserScan (0-360) with 0 being in front, rotating CCW - base scan
def callback2(self, laserData): #function for determining laser distance at determined angle
# print len(msg.ranges)
self.ranges = laserData.ranges
# if lidar_angle:
# print(lidar_angle)
# try:
# animal_distance = laserData.ranges[lidar_angle]
# # if x_angle:
# print(animal_distance)
#
# except(IndexError):
#pub = rospy.Publisher('animalFound', Coordinates, queue_size=10)
return
def get_hit(self):
pub = rospy.Publisher('Animal_info', animalpixels, queue_size=10)
animalDistance = self.ranges[self.lidar_angle]
print(animalDistance)
#x_cord = math.cosd(self.lidar_angle)*animalDistance
#y_cord = math.sind(self.lidar_angle)*animalDistance
animal_coord_info = animalpixels()
x_cord = math.cos(math.radians(self.lidar_angle))*animalDistance
y_cord = math.sin(math.radians(self.lidar_angle))*animalDistance
#print(x_cord)
#print(y_cord)
#
animal_coord_info.animal_x = x_cord #might be good for geometric
animal_coord_info.animal_y = y_cord
animal_coord_info.animaltag = self.animaltag
#
try:
pub.publish(animal_coord_info) #Publishing coordinates onto the "chatter" topic for the yaml file to read.
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
lidar = lidar()
sub1 = rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes , lidar.callback1)
sub2 = rospy.Subscriber('/scan', LaserScan , lidar.callback2)
# sub3 = rospy.Subscriber('chatter', Coordinates , callback2)
rospy.spin() #continuous loop
| 29.832298 | 127 | 0.62919 |
62fb4373ddf9e9ccfab4f63b3f159b1526df11cf | 1,457 | py | Python | app/views.py | Cgunter1/Tuition_Hike | 96a5ef105e009112099dd7e78ad19cfb70f41f2e | [
"WTFPL"
] | 1 | 2016-11-23T00:29:40.000Z | 2016-11-23T00:29:40.000Z | app/views.py | Cgunter1/Tuition_Hike | 96a5ef105e009112099dd7e78ad19cfb70f41f2e | [
"WTFPL"
] | null | null | null | app/views.py | Cgunter1/Tuition_Hike | 96a5ef105e009112099dd7e78ad19cfb70f41f2e | [
"WTFPL"
] | null | null | null | from django.shortcuts import render, HttpResponse
from app.forms import ContactForm
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.template import Context
from django.template.loader import get_template
def index1(request):
form_class = ContactForm
# new logic!
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
contact_name = request.POST.get(
'contact_name'
, '')
contact_email = request.POST.get(
'contact_email'
, '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('contact_template.txt')
context = Context({
'contact_name': contact_name,
'contact_email': contact_email,
'form_content': form_content,
})
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"UC Tuition Hike" +'',
['cgunter@ucsc.edu'],
headers = {'Reply-To': 'cgunter@ucsc.edu' }
)
email.send()
return redirect('/app/')
return render(request, "index1.html", {
'form': form_class,
})
| 30.354167 | 60 | 0.548387 |
ecdd49373967ba8e960300b4f867a7facc8571a6 | 238 | py | Python | accounts/admin.py | felixyin/website | 4377eb9cc48ca99b8e9f802a5b42c128f805e3f5 | [
"MIT"
] | null | null | null | accounts/admin.py | felixyin/website | 4377eb9cc48ca99b8e9f802a5b42c128f805e3f5 | [
"MIT"
] | 21 | 2020-02-11T23:08:59.000Z | 2022-03-11T23:28:24.000Z | accounts/admin.py | felixyin/website | 4377eb9cc48ca99b8e9f802a5b42c128f805e3f5 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
class BlogUserAdmin(admin.ModelAdmin):
list_display = ('id', 'nickname', 'username', 'email', 'last_login', 'date_joined')
list_display_links = ('id', 'username')
| 21.636364 | 87 | 0.701681 |
d4bcce85876e436a65d97c2394bdf0d3c6aa68aa | 3,011 | py | Python | catalog/migrations/0001_initial.py | 3Cement/django_local_library | e9eb48e2b6b3ec835f98770fdf3100bf6477e9a7 | [
"MIT"
] | null | null | null | catalog/migrations/0001_initial.py | 3Cement/django_local_library | e9eb48e2b6b3ec835f98770fdf3100bf6477e9a7 | [
"MIT"
] | null | null | null | catalog/migrations/0001_initial.py | 3Cement/django_local_library | e9eb48e2b6b3ec835f98770fdf3100bf6477e9a7 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-06-05 12:33
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('date_of_birth', models.DateField(blank=True, null=True)),
('date_of_death', models.DateField(blank=True, null=True, verbose_name='Died')),
],
options={
'ordering': ['last_name', 'first_name'],
},
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('summary', models.TextField(help_text='Enter a brief description of the book', max_length=1000)),
('isbn', models.CharField(help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>', max_length=13, verbose_name='ISBN')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author')),
],
),
migrations.CreateModel(
name='BookInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, help_text='Unique ID for this particular book across whole library', primary_key=True, serialize=False)),
('imprint', models.CharField(max_length=200)),
('due_back', models.DateField(blank=True, null=True)),
('status', models.CharField(blank=True, choices=[('m', 'Maintenance'), ('o', 'On loan'), ('a', 'Available'), ('r', 'Reserved')], default='m', help_text='Book availability', max_length=1)),
('book', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Book')),
],
options={
'ordering': ['due_back'],
},
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a book genre (e.g. Science Fiction, French Poetry etc.)', max_length=200)),
],
),
migrations.AddField(
model_name='book',
name='genre',
field=models.ManyToManyField(help_text='Select a genre for this book', to='catalog.Genre'),
),
]
| 46.323077 | 205 | 0.564596 |
ff1843e7a24b23bb42364e5287e7ae8546c8b6a5 | 3,201 | py | Python | sarc.py | comex/smash-stuff | 4b659d0810fa5f042d76adab0a664aa3c60b1ec3 | [
"BSD-Source-Code"
] | 7 | 2015-01-10T12:46:52.000Z | 2022-02-18T09:50:00.000Z | sarc.py | shinyquagsire23/smash-stuff | 198ae035f089ec4dcc688689e9863fe9729b6f1c | [
"BSD-Source-Code"
] | 3 | 2015-02-10T06:52:32.000Z | 2015-12-23T17:31:28.000Z | sarc.py | shinyquagsire23/smash-stuff | 198ae035f089ec4dcc688689e9863fe9729b6f1c | [
"BSD-Source-Code"
] | 7 | 2015-01-23T00:17:21.000Z | 2021-01-27T12:20:07.000Z | # {enemy,fighter,assistpokemon/item_,stage}
import sys, struct, zlib, subprocess, os
print 'Getting strings...'
outdir = sys.argv[3]
if not os.path.exists(outdir):
os.mkdir(outdir)
strs = subprocess.check_output(['strings', '-', sys.argv[2]]).rstrip().split('\n')
by_crc = {}
def try_crc(str):
crc = zlib.crc32(str) & 0xffffffff
by_crc.setdefault(crc, set()).add(str)
for fighter in ['koopa', 'zelda', 'sheik', 'marth', 'gamewatch', 'ganon', 'falco', 'wario', 'metaknight', 'pit', 'szerosuit', 'pikmin', 'diddy', 'dedede', 'ike', 'lucario', 'robot', 'toonlink', 'lizardon', 'sonic', 'purin', 'mariod', 'lucina', 'pitb', 'rosetta', 'wiifit', 'littlemac', 'murabito', 'palutena', 'reflet', 'duckhunt', 'koopajr', 'shulk', 'gekkouga', 'pacman', 'rockman', 'koopag', 'warioman', 'littlemacg', 'lucariom', 'miienemyf', 'miienemys', 'miienemyg']:
try_crc('fighter/' + fighter)
for stage in '3DLand 3dland Allstar_c AuraHitF AuraHitL AuraHitM AuraHitS BalloonFight BattleField_c Battlefieldk_c Bomb_c Colloseum DxCorneria DxGarden DxZebes ElecHitF ElecHitL ElecHitM ElecHitS End_c FireHitF FireHitL FireHitM FireHitS FzeroSfc Gerudo HitFlash Homerun_c Island Magicant MainField MiniRoom00 MiniRoom01 MiniRoom02 MiniRoom03 MiniRoom05 NewMario2 NintenDogs Nobore None OnlineTraining_c PacMaze Paper Pictchat Plasma Playable_roll_c PrePlay_c Prism ProgTest PunchOut2_c PunchOut_c Pupupuland Pupupuland_VC PurpleFireHitF PurpleFireHitL PurpleFireHitM PurpleFireHitS Race00 Race01 RainbowRoad Rush_c StreetPass Title_c Tomodachi Train Uprising WeakenFlashLv1 WeakenFlashLv2 WeakenFlashLv3 Wily2 XCrayon XGreenhill XGw XMadein XMarioPast XPikmin XenoBlade_c allstar_c balloon_fight battle_field_k_c battlefield_c bomb_c colloseum dx_corneria dx_garden dx_zebes end end_c field_smash fzerosfc gerudo homerun_c island magicant main_field melee miniroom newmario2 nintendogs nobore online_training_c other pacmaze paper pictchat plasma playable_roll_c pre_play_c prism punchout_c pupupuland race rainbowroad rush_c streetpass title_c tomodachi train uprising wily2 x_crayon x_greenhill x_gw x_madein x_mariopast x_pikmin xenoblade_c'.split(' '):
try_crc('stage/' + stage)
try_crc('menu/menu')
try_crc('minigame/minigame')
for str in strs:
for prefix in {'', 'enemy/', 'fighter/', 'assistpokemon/item_', 'stage/'}:
try_crc(prefix + str)
fp = open(sys.argv[1], 'rb')
assert fp.read(4) == 'SARC'
num_files, = struct.unpack('<I', fp.read(4))
for i in xrange(num_files):
print i
fp.seek(0x10 + 0x10 * i)
crc, off, flags, size = struct.unpack('<IIII', fp.read(16))
fp.seek(off)
compressed = fp.read(size)
print hex(flags)
data = zlib.decompress(compressed)
strings = by_crc.get(crc, set())
print strings
#continue
if len(strings) >= 2:
raise Exception('Multiple CRC possibilities for %08x' % crc)
elif len(strings) == 1:
fn = strings.pop()
assert not fn.startswith('/')
outfn = os.path.join(outdir, fn)
if not os.path.exists(os.path.dirname(outfn)):
os.makedirs(os.path.dirname(outfn))
else:
outfn = os.path.join(outdir, 'unkcrc-%08x' % crc)
open(outfn, 'wb').write(data)
| 65.326531 | 1,258 | 0.728522 |
a12bcd00fa74fdf120696d53926a1c3e75724586 | 4,598 | py | Python | copy_files_from_g_drive.py | tslima/copy-files-from-g-drive | d12cf6eca1fb4e4ac8b65404a71c3560f72d60ef | [
"MIT"
] | null | null | null | copy_files_from_g_drive.py | tslima/copy-files-from-g-drive | d12cf6eca1fb4e4ac8b65404a71c3560f72d60ef | [
"MIT"
] | null | null | null | copy_files_from_g_drive.py | tslima/copy-files-from-g-drive | d12cf6eca1fb4e4ac8b65404a71c3560f72d60ef | [
"MIT"
] | null | null | null | from __future__ import print_function
import io
import os.path
import sys
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
import concurrent.futures
import zipfile
# If modifying these scopes, delete the file token.json.
SCOPES = ['https://www.googleapis.com/auth/drive']
done_files = set()
try:
with open('done.txt', 'r+') as file:
for line in file:
done_files.add(line.rstrip())
except:
pass
ig_folders = set()
try:
with open('folders.txt', 'r+') as file:
for line in file:
ig_folders.add(line.rstrip())
except:
pass
executor = concurrent.futures.ProcessPoolExecutor(10)
def main(argv):
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
try:
service = build('drive', 'v3', credentials=creds)
folder_id = argv[0]
# Call the Drive v3 API
return crawller(service, folder_id)
except HttpError as error:
# TODO(developer) - Handle errors from drive API.
print(f'An error occurred: {error}')
def crawller(service, folder_id, path=''):
results = service.files().list(
q= f"'{folder_id}' in parents" , pageSize=1000, fields="nextPageToken, files(id, name, mimeType)").execute()
while True:
items = results.get('files', [])
nextPageToken = results.get('nextPageToken')
if not items:
print('No files found.')
return
folders = list(filter(lambda item: item['mimeType'] == 'application/vnd.google-apps.folder' and item['name'] not in ig_folders, items))
for folder in folders:
new_path = path + folder['name'] + "/"
crawller(service, folder['id'], new_path)
files = list(filter(lambda item: item['mimeType'] != 'application/vnd.google-apps.folder' and (path + item['name']) not in done_files, items))
futures = [executor.submit(save_file, service, path, item) for item in files]
concurrent.futures.wait(futures)
results = [future.result() for future in futures]
for result in results:
if result['status'] == 'OK':
with open('done.txt', 'a+') as file:
file.write(result['file'] + '\r')
else:
with open('error.txt', 'a+') as file:
file.write(result['file'] + '\r')
if nextPageToken is None:
with open('folders.txt', 'a+') as file:
file.write(path[:-1] + '\r')
break
results = service.files().list(
q= f"'{folder_id}' in parents" , pageSize=1000, pageToken=f"{nextPageToken}" ,fields="nextPageToken, files(id, name, mimeType)").execute()
def save_file(service, path, item):
name = path + item['name']
try:
if '/' in name:
os.makedirs(os.path.dirname(name), exist_ok=True)
file_id = item['id']
request = service.files().get_media(fileId=file_id)
fh = io.FileIO(name, 'wb')
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print("Download %s: %d%%." % (name,int(status.progress() * 100)))
if '.zip' in name:
print(f'Unzinping file: {name}')
with zipfile.ZipFile(name, 'r') as zip_ref:
zip_ref.extractall(os.path.splitext(name)[0])
return {'status':'OK', 'file': name}
except:
return {'status':'FAIL', 'file': name}
if __name__ == '__main__':
main(sys.argv[1:])
| 35.369231 | 150 | 0.609395 |
ace171563a46090d2e375e9a54aabefe7975a56b | 11,445 | py | Python | sarnet_td3/trainer/comm_trainer_reinforce.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
] | null | null | null | sarnet_td3/trainer/comm_trainer_reinforce.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
] | null | null | null | sarnet_td3/trainer/comm_trainer_reinforce.py | JingdiC/SARNet | 05d668c2d1c0d3f8009ecb98ab33cd5a496cd4ea | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
import sarnet_td3.common.buffer_util_td3 as butil
from sarnet_td3 import MAgentTrainer
from sarnet_td3.common.distributions import make_pdtype
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r
r = r*(1.-done)
discounted.append(r)
return discounted[::-1]
def make_update_exp(vals, target_vals, polyak):
polyak = 1.0 - polyak
expression = []
for var, var_target in zip(sorted(vals, key=lambda v: v.name), sorted(target_vals, key=lambda v: v.name)):
expression.append(var_target.assign(polyak * var_target + (1.0-polyak) * var))
expression = tf.group(*expression)
return U.function([], [], updates=[expression])
def create_placeholder_vpg(obs_shape_n, act_space_n, num_agents, args):
# Create placeholders
with tf.name_scope("placeholders"):
obs_ph_n = []
memory_ph_n = []
h_ph_n = []
c_ph_n = []
return_ph_n = []
for i in range(num_agents):
if args.env_type == "mpe":
obs_ph_n.append(U.BatchInput(obs_shape_n[i], name="observation" + str(i), traj=True).get())
else:
obs_ph_n.append(U.BatchInput((obs_shape_n[i],), name="observation" + str(i), traj=True).get())
h_ph_n.append(U.BatchInput((args.gru_units,), name="gru_ph1" + str(i)).get())
c_ph_n.append(U.BatchInput((args.gru_units,), name="gru_ph2" + str(i)).get())
memory_ph_n.append(U.BatchInput((args.value_units,), name="memory_ph" + str(i)).get())
return_ph_n.append(tf.compat.v1.placeholder(tf.float32, [None, None], name="returns" + str(i)))
act_pdtype_n = [make_pdtype(act_space, args.env_type) for act_space in act_space_n]
act_ph_n = [tf.compat.v1.placeholder(tf.int32, [None, None], name="act_one_hot" + str(i)) for i in range(len(act_space_n))]
return obs_ph_n, h_ph_n, c_ph_n, memory_ph_n, act_ph_n, act_space_n, return_ph_n
class CommAgentTrainerVPG(MAgentTrainer):
def __init__(self, name, p_model, obs_ph_n, h_ph_n, c_ph_n, memory_ph_n, act_ph_n,
action_space_n, return_in_ph, args, p_index, num_env=1, is_train=False):
self.name = name
self.args = args
self.p_index = p_index
self.reuse = False
self.num_adv = self.args.num_adversaries
self.n = len(obs_ph_n) # Total number of agents
self.n_start = 0
self.n_end = self.num_adv
self.comm_type = self.args.adv_test
# Update at these many number of steps
self.step_update_time = 10
if self.args.optimizer == "RMSProp":
self.optimizer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=self.args.actor_lr, decay=0.97, epsilon=1e-6)
else:
self.optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=self.args.actor_lr)
# Setup weight sharing for first initialization of adv/good policy
if not(self.p_index == 0 or self.p_index == self.num_adv): self.reuse = True
# Prepare indexing parameters
if self.name == "good_agent":
self.comm_type = self.args.good_test
self.n_start = self.num_adv
self.n_end = self.n
# Batch size and number of agents/environments
self.num_env = num_env
# Initialize actor network for communication
actor_net = p_model(is_train, self.args, reuse=self.reuse)
pMA_model = self.agent_model(self.comm_type, actor_net)
self.max_replay_buffer_len = self.args.update_lag
self.act, self.p_train, self.v_train = self._pMA_VPG_train(
scope=self.name,
make_obs_ph_n=obs_ph_n,
make_memory_ph_n=memory_ph_n,
make_h_ph_n=h_ph_n,
make_c_ph_n=c_ph_n,
make_act_ph_n=act_ph_n,
action_space_n=action_space_n,
make_return_ph_n=return_in_ph,
p_func=pMA_model,
grad_norm_clipping=0.5,
reuse=self.reuse,
)
def agent_model(self, comm_type, p_model):
if comm_type == "SARNET":
return p_model.sarnet
elif comm_type == "TARMAC":
return p_model.tarmac
elif comm_type == "COMMNET":
return p_model.commnet
elif comm_type == "DDPG":
return p_model.ddpg
elif comm_type == "IC3NET":
return p_model.ic3net
def _p_setup_placeholder(self, obs_ph_n, h_ph_n, c_ph_n, memory_ph_n):
p_input = [None] * int(self.n * 4)
for i in range(self.n):
p_input[i] = obs_ph_n[i]
p_input[i + self.n] = h_ph_n[i]
p_input[i + int(2 * self.n)] = c_ph_n[i]
p_input[i + int(3 * self.n)] = memory_ph_n[i]
return p_input
def _pMA_VPG_train(self, make_obs_ph_n, make_memory_ph_n, make_h_ph_n, make_c_ph_n, make_act_ph_n, action_space_n, make_return_ph_n, p_func, grad_norm_clipping=None, scope="agent", reuse=None):
with tf.compat.v1.variable_scope(scope, reuse=reuse):
# create distributions
act_pdtype_n = [make_pdtype(act_space, self.args.env_type) for act_space in action_space_n]
# set up placeholders
obs_ph_n = make_obs_ph_n
memory_ph_n = make_memory_ph_n
h_ph_n = make_h_ph_n
c_ph_n = make_c_ph_n
act_onehot_ph = make_act_ph_n[self.p_index]
return_ph = make_return_ph_n[self.p_index]
# Feed all inputs. Let the model decide what to choose.
p_input = self._p_setup_placeholder(obs_ph_n, h_ph_n, c_ph_n, memory_ph_n)
p, enc_state, memory_state, attention, value = p_func(p_input, int(act_pdtype_n[self.p_index].param_shape()[0]), self.p_index, self.n, self.n_start, self.n_end, scope="p_func", reuse=reuse)
# wrap parameters in distribution and sample
act_pd = act_pdtype_n[self.p_index].pdfromflat(p)
act_soft_sample = act_pd.sample(noise=False)
# print(act_soft_sample)
act_onehot = tf.multinomial(act_soft_sample[-1,:,:], 1)
# print(act_onehot)
value_out = tf.squeeze(value, axis=0) # remove the time dimension from the output for storing in the buffer
return_ph_expd = tf.expand_dims(return_ph, axis=-1)
# Value Network Optimization
# value = tf.squeeze(value, axis=-1) # remove the last single out dim, to align with return (#trajlen, #batch)
target = return_ph_expd - value
loss_v = tf.reduce_mean(tf.math.squared_difference(value, return_ph_expd))
optim_v = self.optimizer.minimize(loss_v, name='adam_optim_v')
# Policy Network Optimization
# print(act_soft_sample)
target_pi = tf.squeeze(target, axis=-1)
loss_pi = tf.reduce_mean(tf.stop_gradient(target_pi) * tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=p, labels=act_onehot_ph), name='loss_pi')
optim_pi = self.optimizer.minimize(loss_pi, name='adam_optim_pi')
# Create callable functions
# policy network
# Use sess.run to the feed the dictionary, since we are not calling it anywhere else, simi
update_pi = optim_pi
update_v = optim_v
train_v = U.function(inputs=p_input + [return_ph], outputs=update_v)
train_pi = U.function(inputs=p_input + [act_onehot_ph] + [return_ph], outputs=update_pi)
act = U.function(inputs=p_input, outputs=[act_onehot, act_soft_sample, enc_state, memory_state, attention, value_out])
return act, train_pi, train_v
def prep_input(self, obs, h, c, memory, is_train=True):
input = [None] * int(self.n * 4)
for i in range(self.n):
input[i] = obs[i]
input[i + self.n] = h[i]
input[i + int(2 * self.n)] = c[i]
input[i + int(3 * self.n)] = memory[i]
return input
def action(self, input, is_train=False):
return self.act(*input)
def sample_experience(self, bufferop):
# Receive all the data for the sampled trajectories
data, index, importance = bufferop.return_exp()
return data, index, importance
def update(self, agents, buffer_data, t):
# Check if an update is needed
# if not (t % self.step_update_time == 0): # only update every 10 steps for policy, 5 for critic
# return "no_update"
# Get mini-batch of trajectories
# Returns the following indexing scheme
# Shape of the trajectory is [# numtraj, [agent, trajlen, numenv, dim] or
# [numtraj [agent, trajlen, num_env]] for rew/done
obs_n_buffer, h_n_buffer, c_n_buffer, memory_n_buffer, action_n_buffer, action_n_logits_buffer, rew_n_buffer, \
value_n_buffer, done_n_buffer = buffer_data
""" Prepare Inputs for network feed """
# Receives [batch_size, [trajlen, numenv, agent]] -> concat [trajlen, batch x numenv, agent]
# Reshape to - [agent, trajlen, batchsize x num_env]]
rew_n_buffer = np.transpose(np.concatenate(rew_n_buffer, axis=1), (2, 0, 1))
# done_n_buffer = np.transpose(np.concatenate(done_n_buffer, axis=1), (2, 0, 1))
# Receives [batch_size, [trajlen, agent, numenv]] -> concat [trajlen, agent, batch x numenv]
# Reshape to - [agent, trajlen, batchsize x num_env]]
# value_n_buffer = np.transpose(np.concatenate(value_n_buffer, axis=-1), (2, 0, 1))
# Receives [batch, [traj, agent, numevn, dim]] -> [traj, agent, numenv x batch, dim]
# Reshape to [agent, trajlen, numenv x batch, dim]
obs_n_buffer = np.swapaxes(np.concatenate(obs_n_buffer, axis=-2), 1, 0)
action_n_buffer = np.squeeze(np.swapaxes(np.concatenate(action_n_buffer, axis=-2), 1, 0))
# For hidden states we only feed the start (i.e. no trajlen)
h_n_buffer = np.swapaxes(np.concatenate(h_n_buffer, axis=-2), 1, 0)
h_n_buffer = h_n_buffer[:, 0, :, :]
c_n_buffer = np.swapaxes(np.concatenate(c_n_buffer, axis=-2), 1, 0)
c_n_buffer = c_n_buffer[:, 0, :, :]
memory_n_buffer = np.swapaxes(np.concatenate(memory_n_buffer, axis=-2), 1, 0)
memory_n_buffer = memory_n_buffer[:, 0, :, :]
returns = []
advantages = []
# Calculate returns
return_so_far = np.zeros(np.shape(rew_n_buffer[self.p_index, 0, :]))
# Get trajectory length to compute the returns in reverse
traj_len, _ = rew_n_buffer[self.p_index].shape
# Do returns calculation for individual agent
for traj_idx in reversed(range(traj_len)):
return_so_far = self.args.gamma * return_so_far + rew_n_buffer[self.p_index, traj_idx, :]
returns.append(return_so_far)
# Returns is of the form [trajlen, dim]
# We need first indexes as agents for easier data manipulation
# returns = np.stack(returns, axis=0)
train_input = self.prep_input(obs_n_buffer, h_n_buffer, c_n_buffer, memory_n_buffer)
_ = self.v_train(*(train_input + [returns]))
_ = self.p_train(*(train_input + [action_n_buffer[self.p_index]] + [returns]))
return "update done" | 46.149194 | 201 | 0.635387 |
5970225fbb4ec9b01744c2c869bd1ea6f4500d6c | 16,208 | py | Python | boto/cloudformation/connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2019-07-29T02:53:51.000Z | 2019-07-29T02:53:51.000Z | boto/cloudformation/connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 1 | 2021-09-11T14:30:32.000Z | 2021-09-11T14:30:32.000Z | boto/cloudformation/connection.py | adastreamer/boto | ce472cbbcffd06298fdd0c980d5bfcdcee875498 | [
"MIT"
] | 2 | 2016-12-19T02:27:46.000Z | 2019-07-29T02:53:54.000Z | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from boto.cloudformation.stack import Stack, StackSummary, StackEvent
from boto.cloudformation.stack import StackResource, StackResourceSummary
from boto.cloudformation.template import Template
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.compat import json
class CloudFormationConnection(AWSQueryConnection):
"""
A Connection to the CloudFormation Service.
"""
APIVersion = boto.config.get('Boto', 'cfn_version', '2010-05-15')
DefaultRegionName = boto.config.get('Boto', 'cfn_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'cfn_region_endpoint',
'cloudformation.us-east-1.amazonaws.com')
valid_states = (
'CREATE_IN_PROGRESS', 'CREATE_FAILED', 'CREATE_COMPLETE',
'ROLLBACK_IN_PROGRESS', 'ROLLBACK_FAILED', 'ROLLBACK_COMPLETE',
'DELETE_IN_PROGRESS', 'DELETE_FAILED', 'DELETE_COMPLETE',
'UPDATE_IN_PROGRESS', 'UPDATE_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_FAILED',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
converter=None, security_token=None, validate_certs=True):
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint, CloudFormationConnection)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs)
def _required_auth_capability(self):
return ['hmac-v4']
def encode_bool(self, v):
v = bool(v)
return {True: "true", False: "false"}[v]
def _build_create_or_update_params(self, stack_name, template_body,
template_url, parameters,
notification_arns, disable_rollback,
timeout_in_minutes, capabilities, tags):
"""
Helper that creates JSON parameters needed by a Stack Create or
Stack Update call.
:type stack_name: string
:param stack_name: The name of the Stack, must be unique amoung running
Stacks
:type template_body: string
:param template_body: The template body (JSON string)
:type template_url: string
:param template_url: An S3 URL of a stored template JSON document. If
both the template_body and template_url are
specified, the template_body takes precedence
:type parameters: list of tuples
:param parameters: A list of (key, value) pairs for template input
parameters.
:type notification_arns: list of strings
:param notification_arns: A list of SNS topics to send Stack event
notifications to.
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
failure.
:type timeout_in_minutes: int
:param timeout_in_minutes: Maximum amount of time to let the Stack
spend creating itself. If this timeout is exceeded,
the Stack will enter the CREATE_FAILED state.
:type capabilities: list
:param capabilities: The list of capabilities you want to allow in
the stack. Currently, the only valid capability is
'CAPABILITY_IAM'.
:type tags: dict
:param tags: A dictionary of (key, value) pairs of tags to
associate with this stack.
:rtype: dict
:return: JSON parameters represented as a Python dict.
"""
params = {'ContentType': "JSON", 'StackName': stack_name,
'DisableRollback': self.encode_bool(disable_rollback)}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
if len(parameters) > 0:
for i, (key, value) in enumerate(parameters):
params['Parameters.member.%d.ParameterKey' % (i + 1)] = key
params['Parameters.member.%d.ParameterValue' % (i + 1)] = value
if capabilities:
for i, value in enumerate(capabilities):
params['Capabilities.member.%d' % (i + 1)] = value
if tags:
for i, (key, value) in enumerate(tags.items()):
params['Tags.member.%d.Key' % (i + 1)] = key
params['Tags.member.%d.Value' % (i + 1)] = value
if len(notification_arns) > 0:
self.build_list_params(params, notification_arns,
"NotificationARNs.member")
if timeout_in_minutes:
params['TimeoutInMinutes'] = int(timeout_in_minutes)
return params
def create_stack(self, stack_name, template_body=None, template_url=None,
parameters=[], notification_arns=[], disable_rollback=False,
timeout_in_minutes=None, capabilities=None, tags=None):
"""
Creates a CloudFormation Stack as specified by the template.
:type stack_name: string
:param stack_name: The name of the Stack, must be unique amoung running
Stacks
:type template_body: string
:param template_body: The template body (JSON string)
:type template_url: string
:param template_url: An S3 URL of a stored template JSON document. If
both the template_body and template_url are
specified, the template_body takes precedence
:type parameters: list of tuples
:param parameters: A list of (key, value) pairs for template input
parameters.
:type notification_arns: list of strings
:param notification_arns: A list of SNS topics to send Stack event
notifications to.
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
failure.
:type timeout_in_minutes: int
:param timeout_in_minutes: Maximum amount of time to let the Stack
spend creating itself. If this timeout is exceeded,
the Stack will enter the CREATE_FAILED state.
:type capabilities: list
:param capabilities: The list of capabilities you want to allow in
the stack. Currently, the only valid capability is
'CAPABILITY_IAM'.
:type tags: dict
:param tags: A dictionary of (key, value) pairs of tags to
associate with this stack.
:rtype: string
:return: The unique Stack ID.
"""
params = self._build_create_or_update_params(stack_name,
template_body, template_url, parameters, notification_arns,
disable_rollback, timeout_in_minutes, capabilities, tags)
response = self.make_request('CreateStack', params, '/', 'POST')
body = response.read()
if response.status == 200:
body = json.loads(body)
return body['CreateStackResponse']['CreateStackResult']['StackId']
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def update_stack(self, stack_name, template_body=None, template_url=None,
parameters=[], notification_arns=[], disable_rollback=False,
timeout_in_minutes=None, capabilities=None, tags=None):
"""
Updates a CloudFormation Stack as specified by the template.
:type stack_name: string
:param stack_name: The name of the Stack, must be unique amoung running
Stacks.
:type template_body: string
:param template_body: The template body (JSON string)
:type template_url: string
:param template_url: An S3 URL of a stored template JSON document. If
both the template_body and template_url are
specified, the template_body takes precedence.
:type parameters: list of tuples
:param parameters: A list of (key, value) pairs for template input
parameters.
:type notification_arns: list of strings
:param notification_arns: A list of SNS topics to send Stack event
notifications to.
:type disable_rollback: bool
:param disable_rollback: Indicates whether or not to rollback on
failure.
:type timeout_in_minutes: int
:param timeout_in_minutes: Maximum amount of time to let the Stack
spend creating itself. If this timeout is exceeded,
the Stack will enter the CREATE_FAILED state
:type capabilities: list
:param capabilities: The list of capabilities you want to allow in
the stack. Currently, the only valid capability is
'CAPABILITY_IAM'.
:type tags: dict
:param tags: A dictionary of (key, value) pairs of tags to
associate with this stack.
:rtype: string
:return: The unique Stack ID.
"""
params = self._build_create_or_update_params(stack_name,
template_body, template_url, parameters, notification_arns,
disable_rollback, timeout_in_minutes, capabilities, tags)
response = self.make_request('UpdateStack', params, '/', 'POST')
body = response.read()
if response.status == 200:
body = json.loads(body)
return body['UpdateStackResponse']['UpdateStackResult']['StackId']
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def delete_stack(self, stack_name_or_id):
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
# TODO: change this to get_status ?
response = self.make_request('DeleteStack', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def describe_stack_events(self, stack_name_or_id=None, next_token=None):
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if next_token:
params['NextToken'] = next_token
return self.get_list('DescribeStackEvents', params, [('member',
StackEvent)])
def describe_stack_resource(self, stack_name_or_id, logical_resource_id):
params = {'ContentType': "JSON", 'StackName': stack_name_or_id,
'LogicalResourceId': logical_resource_id}
response = self.make_request('DescribeStackResource', params,
'/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def describe_stack_resources(self, stack_name_or_id=None,
logical_resource_id=None,
physical_resource_id=None):
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
if logical_resource_id:
params['LogicalResourceId'] = logical_resource_id
if physical_resource_id:
params['PhysicalResourceId'] = physical_resource_id
return self.get_list('DescribeStackResources', params,
[('member', StackResource)])
def describe_stacks(self, stack_name_or_id=None):
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
return self.get_list('DescribeStacks', params, [('member', Stack)])
def get_template(self, stack_name_or_id):
params = {'ContentType': "JSON", 'StackName': stack_name_or_id}
response = self.make_request('GetTemplate', params, '/', 'GET')
body = response.read()
if response.status == 200:
return json.loads(body)
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def list_stack_resources(self, stack_name_or_id, next_token=None):
params = {'StackName': stack_name_or_id}
if next_token:
params['NextToken'] = next_token
return self.get_list('ListStackResources', params,
[('member', StackResourceSummary)])
def list_stacks(self, stack_status_filters=[], next_token=None):
params = {}
if next_token:
params['NextToken'] = next_token
if len(stack_status_filters) > 0:
self.build_list_params(params, stack_status_filters,
"StackStatusFilter.member")
return self.get_list('ListStacks', params,
[('member', StackSummary)])
def validate_template(self, template_body=None, template_url=None):
params = {}
if template_body:
params['TemplateBody'] = template_body
if template_url:
params['TemplateURL'] = template_url
if template_body and template_url:
boto.log.warning("If both TemplateBody and TemplateURL are"
" specified, only TemplateBody will be honored by the API")
return self.get_object('ValidateTemplate', params, Template,
verb="POST")
def cancel_update_stack(self, stack_name_or_id=None):
params = {}
if stack_name_or_id:
params['StackName'] = stack_name_or_id
return self.get_status('CancelUpdateStack', params)
| 43.687332 | 85 | 0.629257 |
b260c02957e08ea4decfa8ceef3682216d516cf7 | 1,433 | py | Python | creme/optim/vanilla_sgd.py | sroecker/creme | 9ae8d994e1ce74f760bb95c0e5569774bf19839a | [
"BSD-3-Clause"
] | null | null | null | creme/optim/vanilla_sgd.py | sroecker/creme | 9ae8d994e1ce74f760bb95c0e5569774bf19839a | [
"BSD-3-Clause"
] | null | null | null | creme/optim/vanilla_sgd.py | sroecker/creme | 9ae8d994e1ce74f760bb95c0e5569774bf19839a | [
"BSD-3-Clause"
] | 2 | 2021-06-20T09:29:38.000Z | 2021-06-23T07:47:21.000Z | from . import base
__all__ = ['VanillaSGD']
class VanillaSGD(base.Optimizer):
"""Plain stochastic gradient descent.
Example:
::
>>> from creme import compose
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> from creme import stream
>>> from sklearn import datasets
>>> X_y = stream.iter_sklearn_dataset(
... dataset=datasets.load_breast_cancer(),
... shuffle=True,
... random_state=42
... )
>>> optimiser = optim.VanillaSGD()
>>> model = compose.Pipeline([
... ('scale', preprocessing.StandardScaler()),
... ('learn', linear_model.LogisticRegression(optimiser))
... ])
>>> metric = metrics.F1()
>>> model_selection.online_score(X_y, model, metric)
F1: 0.966102
References:
1. `A Stochastic Approximation Method <https://pdfs.semanticscholar.org/34dd/d8865569c2c32dec9bf7ffc817ff42faaa01.pdf>`_
"""
def __init__(self, lr=0.1):
super().__init__(lr)
def _update_after_pred(self, w, g):
for i, gi in g.items():
w[i] -= self.learning_rate * gi
return w
| 27.557692 | 128 | 0.542917 |
9d2435a2d8349e6648af9e995082f53bf67a525f | 831 | py | Python | 01.py | hezhao/adventofcode2021 | f197efcf492019512b3844984981cc28d8881aae | [
"MIT"
] | null | null | null | 01.py | hezhao/adventofcode2021 | f197efcf492019512b3844984981cc28d8881aae | [
"MIT"
] | null | null | null | 01.py | hezhao/adventofcode2021 | f197efcf492019512b3844984981cc28d8881aae | [
"MIT"
] | null | null | null | #
# https://adventofcode.com/2021/day/1
# Advent of Code 2021
#
# Created on 2021-12-04.
#
import heapq
import math
from collections import defaultdict, deque
from itertools import combinations, permutations
from typing import List, Set, Dict, Tuple, Optional
class Solution:
def part1(self, nums: List[int]) -> int:
n = len(nums)
res = 0
for i in range(1, n):
if nums[i] > nums[i - 1]:
res += 1
return res
def part2(self, nums: List[int]) -> int:
n = len(nums)
res = 0
for i in range(3, n):
if nums[i] > nums[i - 3]:
res += 1
return res
if __name__ == '__main__':
s = Solution()
with open('01.txt') as f:
nums = [int(line.rstrip()) for line in f]
print(s.part2(nums))
| 21.307692 | 51 | 0.54633 |
c9c4a02d169b932474f89802353308e9399bcd4f | 5,504 | py | Python | garminworkouts/__main__.py | darkzbaron/garmin-workouts | f5b40af5491912b63baddfe515a3393e7c1ea452 | [
"Apache-2.0"
] | null | null | null | garminworkouts/__main__.py | darkzbaron/garmin-workouts | f5b40af5491912b63baddfe515a3393e7c1ea452 | [
"Apache-2.0"
] | null | null | null | garminworkouts/__main__.py | darkzbaron/garmin-workouts | f5b40af5491912b63baddfe515a3393e7c1ea452 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import argparse
import glob
import logging
import os
from garminworkouts.config import configreader
from garminworkouts.garmin.garminclient import GarminClient
from garminworkouts.models.workout import Workout
from garminworkouts.utils.validators import writeable_dir
def command_import(args):
workout_files = glob.glob(args.workout)
workout_configs = [configreader.read_config(workout_file) for workout_file in workout_files]
workouts = [Workout(workout_config, args.ftp, args.target_power_diff) for workout_config in workout_configs]
with _garmin_client(args) as connection:
existing_workouts_by_name = {Workout.extract_workout_name(w): w for w in connection.list_workouts()}
for workout in workouts:
workout_name = workout.get_workout_name()
existing_workout = existing_workouts_by_name.get(workout_name)
if existing_workout:
workout_id = Workout.extract_workout_id(existing_workout)
workout_owner_id = Workout.extract_workout_owner_id(existing_workout)
payload = workout.create_workout(workout_id, workout_owner_id)
logging.info("Updating workout '%s'", workout_name)
connection.update_workout(workout_id, payload)
else:
payload = workout.create_workout()
logging.info("Creating workout '%s'", workout_name)
connection.save_workout(payload)
def command_export(args):
with _garmin_client(args) as connection:
for workout in connection.list_workouts():
workout_id = Workout.extract_workout_id(workout)
workout_name = Workout.extract_workout_name(workout)
file = os.path.join(args.directory, str(workout_id)) + ".fit"
logging.info("Exporting workout '%s' into '%s'", workout_name, file)
connection.download_workout(workout_id, file)
def command_list(args):
with _garmin_client(args) as connection:
for workout in connection.list_workouts():
Workout.print_workout_summary(workout)
def command_schedule(args):
with _garmin_client(args) as connection:
workout_id = args.workout_id
date = args.date
connection.schedule_workout(workout_id, date)
#print(result)
def command_get(args):
with _garmin_client(args) as connection:
workout = connection.get_workout(args.id)
Workout.print_workout_json(workout)
def command_delete(args):
with _garmin_client(args) as connection:
logging.info("Deleting workout '%s'", args.id)
connection.delete_workout(args.id)
def _garmin_client(args):
return GarminClient(username=args.username, password=args.password, cookie_jar=args.cookie_jar)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Manage Garmin Connect workout(s)")
parser.add_argument("--username", "-u", required=True, help="Garmin Connect account username")
parser.add_argument("--password", "-p", required=True, help="Garmin Connect account password")
parser.add_argument("--cookie-jar", default=".garmin-cookies.txt", help="Filename with authentication cookies")
parser.add_argument("--debug", action='store_true', help="Enables more detailed messages")
subparsers = parser.add_subparsers(title="Commands")
parser_import = subparsers.add_parser("import", description="Import workout(s) from file(s) into Garmin Connect")
parser_import.add_argument("workout", help="File(s) with workout(s) to import, wildcards are supported e.g: sample_workouts/*.yaml")
parser_import.add_argument("--ftp", required=True, type=int, help="FTP to calculate absolute target power from relative value")
parser_import.add_argument("--target-power-diff", default=0.05, type=float, help="Percent of target power to calculate final target power range")
parser_import.set_defaults(func=command_import)
parser_export = subparsers.add_parser("export", description="Export all workouts from Garmin Connect and save into directory")
parser_export.add_argument("directory", type=writeable_dir, help="Destination directory where workout(s) will be exported")
parser_export.set_defaults(func=command_export)
parser_list = subparsers.add_parser("list", description="List all workouts")
parser_list.set_defaults(func=command_list)
parser_schedule = subparsers.add_parser("schedule", description="Schedule a workouts")
parser_schedule.add_argument("--workout_id","-w", required=True, help="Workout id to schedule")
parser_schedule.add_argument("--date", "-d",required=True, help="Date to which schedule the workout")
parser_schedule.set_defaults(func=command_schedule)
parser_get = subparsers.add_parser("get", description="Get workout")
parser_get.add_argument("--id", required=True, help="Workout id, use list command to get workouts identifiers")
parser_get.set_defaults(func=command_get)
parser_delete = subparsers.add_parser("delete", description="Delete workout")
parser_delete.add_argument("--id", required=True, help="Workout id, use list command to get workouts identifiers")
parser_delete.set_defaults(func=command_delete)
args = parser.parse_args()
logging_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=logging_level)
args.func(args)
if __name__ == "__main__":
main()
| 44.387097 | 149 | 0.733648 |
3090aef5252858641961d0d181f8fce682f7d357 | 2,164 | py | Python | main.py | superatomic/spider | 8c171103da0c4f3e37dce85516dea135a7222700 | [
"MIT"
] | null | null | null | main.py | superatomic/spider | 8c171103da0c4f3e37dce85516dea135a7222700 | [
"MIT"
] | null | null | null | main.py | superatomic/spider | 8c171103da0c4f3e37dce85516dea135a7222700 | [
"MIT"
] | null | null | null | """
Draws a spider using tkinter (via turtle).
"""
__author__ = 'Ethan Kinnear (https://github.com/superatomic)'
import turtle
from math import tau # 0.5 * tau = pi (https://tauday.com/tau-manifesto)
# The main scaling constant. If you want to make the spider bigger or smaller, change this.
SCALE = 20
# Spider properties
BODY_SIZE = 4 * SCALE
LEG_THICKNESS = SCALE // 2 # 5
LEG_LENGTH = 7 * SCALE
# WINDOW_SIZE is the actual size of the window, and SCREEN_SIZE is the area that is scrollable.
WINDOW_SIZE = 20 * SCALE
SCREEN_SIZE = 2 * LEG_LENGTH + LEG_THICKNESS
assert WINDOW_SIZE > SCREEN_SIZE
# Define the layout of the spider.
# `L` is leg, `E` is eye, and ` ` is nothing.
# noinspection SpellCheckingInspection
TURTLE_BODY_PARTS = 'LLLL LLLL' 'EE'
def draw_spider(pen: turtle.RawPen) -> None:
# Draw the spider's body
pen.dot(BODY_SIZE * 2)
# Draw the spider's legs and eyes
pen.pensize(LEG_THICKNESS)
for index, part in enumerate(TURTLE_BODY_PARTS):
pen.up()
pen.goto(0, 0)
pen.setheading(tau * index / len(TURTLE_BODY_PARTS))
draw_body_part(part, pen)
def draw_body_part(part, pen: turtle.RawPen) -> None:
"""
Draws a single body part.
:param part: Either 'L', 'E', or ' '.
:param pen: The turtle.
"""
if part == 'L': # Leg
pen.down()
pen.forward(LEG_LENGTH)
elif part == 'E': # Eye
pen.forward(BODY_SIZE * 2 / 3)
pen.down()
pen.dot(3 * LEG_THICKNESS - 1, 'white')
pen.dot(LEG_THICKNESS, 'black')
elif part != ' ': # Nothing
raise ValueError("TURTLE_BODY_PARTS should only contain the characters 'L', 'E', and ' '")
def main():
# Screen Setup
screen = turtle.Screen()
screen.setup(width=WINDOW_SIZE, height=WINDOW_SIZE)
screen.screensize(SCREEN_SIZE, SCREEN_SIZE)
screen.title('Spider')
# Pen Setup
pen = turtle.RawPen(screen, visible=False)
pen.pencolor('black')
pen.speed('fastest')
pen.radians()
# Draw the Spider
draw_spider(pen)
# Mainloop until the window is clicked
screen.exitonclick()
if __name__ == '__main__':
main()
| 25.162791 | 98 | 0.649723 |
a10a9df4571f7d37edccb3fe30645bee020839b6 | 511 | py | Python | handlers/raw_idle.py | atas98/telegram-reddit-bot | e021533cb8acaa439ed57dc7e20e1b5a04970af8 | [
"MIT"
] | 4 | 2021-03-25T09:10:04.000Z | 2021-09-25T07:04:30.000Z | handlers/raw_idle.py | atas98/telegram-reddit-bot | e021533cb8acaa439ed57dc7e20e1b5a04970af8 | [
"MIT"
] | 2 | 2022-01-10T14:12:31.000Z | 2022-01-12T22:56:12.000Z | handlers/raw_idle.py | atas98/telegram-reddit-bot | e021533cb8acaa439ed57dc7e20e1b5a04970af8 | [
"MIT"
] | 1 | 2021-12-18T08:28:34.000Z | 2021-12-18T08:28:34.000Z | import re
import logging
from aiogram import types
from aiogram.dispatcher import FSMContext
from misc import reddit
from .type_handlers import type_handlers
async def raw_idle(message: types.Message, state: FSMContext):
try:
urls = re.findall(r"(https?://[^\s]+)", message.text)
for url in urls:
post = await reddit.get_post_by_url(url)
await type_handlers[post.type](message, post, state)
except ValueError as err:
logging.warning(err)
return
| 28.388889 | 64 | 0.686888 |
ca24c70d715b2a76ce1e57e32671965a138e2f4e | 154 | py | Python | virtual/bin/django-admin.py | Julia-Agasaro/award | cd25d62e646847dfc8429c2f3abfaa93927be1b5 | [
"MIT"
] | null | null | null | virtual/bin/django-admin.py | Julia-Agasaro/award | cd25d62e646847dfc8429c2f3abfaa93927be1b5 | [
"MIT"
] | 3 | 2020-06-05T23:54:06.000Z | 2021-06-10T22:09:40.000Z | virtual/bin/django-admin.py | Julia-Agasaro/award | cd25d62e646847dfc8429c2f3abfaa93927be1b5 | [
"MIT"
] | null | null | null | #!/home/wecode/Desktop/award/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 25.666667 | 47 | 0.785714 |
a2f3a9c9013dee8a504929f8f15674c4e19ee0a5 | 2,321 | py | Python | hw2/pymoo/util/misc.py | yyb1995/software_technology_project | 4b5d9bf04a744b220e0b931917372f32d5bbfcfb | [
"Apache-2.0"
] | null | null | null | hw2/pymoo/util/misc.py | yyb1995/software_technology_project | 4b5d9bf04a744b220e0b931917372f32d5bbfcfb | [
"Apache-2.0"
] | null | null | null | hw2/pymoo/util/misc.py | yyb1995/software_technology_project | 4b5d9bf04a744b220e0b931917372f32d5bbfcfb | [
"Apache-2.0"
] | null | null | null | import numpy as np
import scipy
def swap(M, a, b):
tmp = M[a]
M[a] = M[b]
M[b] = tmp
# repairs a numpy array to be in bounds
def repair(X, xl, xu):
larger_than_xu = X[0, :] > xu
X[0, larger_than_xu] = xu[larger_than_xu]
smaller_than_xl = X[0, :] < xl
X[0, smaller_than_xl] = xl[smaller_than_xl]
return X
def unique_rows(a):
a = np.ascontiguousarray(a)
unique_a = np.unique(a.view([('', a.dtype)] * a.shape[1]))
return unique_a.view(a.dtype).reshape((unique_a.shape[0], a.shape[1]))
def parameter_less_constraints(F, CV, F_max=None):
if F_max is None:
F_max = np.max(F)
has_constraint_violation = CV > 0
F[has_constraint_violation] = CV[has_constraint_violation] + F_max
return F
def random_permuations(n, l):
from pymoo.rand import random
perms = []
for i in range(n):
perms.append(random.perm(l))
P = np.concatenate(perms)
return P
def get_duplicates(M):
res = []
I = np.lexsort([M[:, i] for i in reversed(range(0, M.shape[1]))])
S = M[I, :]
i = 0
while i < S.shape[0] - 1:
l = []
while np.all(S[i, :] == S[i + 1, :]):
l.append(I[i])
i += 1
if len(l) > 0:
l.append(I[i])
res.append(l)
i += 1
return res
def cdist(A, B, **kwargs):
if A.dtype != np.object:
return scipy.spatial.distance.cdist(A, B, **kwargs)
else:
D = np.full((A.shape[0], B.shape[1]), np.inf, dtype=np.float)
for i in range(A.shape[0]):
for j in range(i + 1, B.shape[1]):
d = M[i].distance_to(M[j])
D[i, j], D[j, i] = d, d
return D
def vectorized_cdist(A, B, func_dist):
u = np.repeat(A, B.shape[0], axis=0)
v = np.tile(B, (A.shape[0], 1))
D = func_dist(u, v)
M = np.reshape(D, (A.shape[0], B.shape[0]))
return M
def covert_to_type(problem, X):
if problem.type_var == np.double:
return X.astype(np.double)
elif problem.type_var == np.int:
return np.round(X).astype(np.int)
elif problem.type_var == np.bool:
return X < (problem.xu - problem.xl) / 2
if __name__ == '__main__':
M = np.random.random((100, 3))
M[3, :] = M[55, :]
M[10, :] = M[55, :]
print(get_duplicates(M))
| 22.754902 | 74 | 0.547178 |
38a1941720be0100057d14a6a4a9b038f08cc2e4 | 306 | py | Python | src/jt/jpype/JClassUtil.py | karpierz/jtypes.jpype | 225b214757ef7f7e03c4569028906d8194e3345d | [
"Apache-2.0"
] | 5 | 2019-01-08T02:33:55.000Z | 2019-11-08T21:00:45.000Z | src/jt/jpype/JClassUtil.py | karpierz/jtypes.jpype | 225b214757ef7f7e03c4569028906d8194e3345d | [
"Apache-2.0"
] | 2 | 2018-06-29T15:46:34.000Z | 2020-11-15T01:01:10.000Z | src/jt/jpype/JClassUtil.py | karpierz/jtypes.jpype | 225b214757ef7f7e03c4569028906d8194e3345d | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2018 Adam Karpierz
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
def isInterface(t):
from ..jtypes._jclass import JavaObject
return (isinstance(t, type) and issubclass(t, JavaObject) and
t.__javaclass__.isInterface())
| 27.818182 | 65 | 0.722222 |
7287359755f68cce410c6cc4bba3da41c42f8003 | 6,311 | py | Python | solutions/2020/kws/day_12.py | kws/AdventOfCode | 8da337ece8a46d070185e1d81592745dae7f6744 | [
"MIT"
] | 1 | 2020-12-04T20:15:47.000Z | 2020-12-04T20:15:47.000Z | solutions/2020/kws/day_12.py | kws/AdventOfCode | 8da337ece8a46d070185e1d81592745dae7f6744 | [
"MIT"
] | 1 | 2020-12-02T08:31:35.000Z | 2020-12-02T20:24:34.000Z | solutions/2020/kws/day_12.py | kws/AdventOfCode | 8da337ece8a46d070185e1d81592745dae7f6744 | [
"MIT"
] | 3 | 2018-11-30T18:14:15.000Z | 2018-12-10T20:18:15.000Z | #!/usr/bin/env python
import argparse
import math
from collections import namedtuple
from time import sleep
Coordinates = namedtuple('Coordinates', 'x y')
class Ship:
def __init__(self, x, y, heading: int):
self.x = x
self.y = y
self.heading = heading
def __repr__(self):
x = "E" if self.x >= 0 else "W"
y = "S" if self.y >= 0 else "N"
return f"{abs(self.x)}{x} {abs(self.y)}{y} {self.heading}°"
class Navigator:
@staticmethod
def move(ship, instruction):
method = instruction[0]
value = int(instruction[1:])
function = getattr(Navigator, method)
function(ship, value)
@staticmethod
def N(ship: Ship, speed):
ship.y = ship.y - speed
@staticmethod
def S(ship: Ship, speed):
ship.y = ship.y + speed
@staticmethod
def E(ship: Ship, speed):
ship.x = ship.x + speed
@staticmethod
def W(ship: Ship, speed):
ship.x = ship.x - speed
@staticmethod
def L(ship: Ship, degrees):
ship.heading = (ship.heading + degrees) % 360
@staticmethod
def R(ship: Ship, degrees):
ship.heading = (ship.heading - degrees) % 360
@staticmethod
def F(ship: Ship, speed):
offset_x = math.sin(math.radians(ship.heading))
offset_y = math.cos(math.radians(ship.heading))
ship.x = round(ship.x + speed * offset_x)
ship.y = round(ship.y + speed * offset_y)
class WayPointNavigator:
@staticmethod
def move(ship, waypoint, instruction):
method = instruction[0]
value = int(instruction[1:])
function = getattr(WayPointNavigator, method)
function(ship, waypoint, value)
@staticmethod
def rotate(px, py, angle):
dx = math.cos(angle) * px - math.sin(angle) * py
dy = math.sin(angle) * px + math.cos(angle) * py
return round(dx), round(dy)
@staticmethod
def N(ship: Ship, waypoint: Ship, value):
waypoint.y = waypoint.y - value
@staticmethod
def S(ship: Ship, waypoint: Ship, value):
waypoint.y = waypoint.y + value
@staticmethod
def E(ship: Ship, waypoint: Ship, value):
waypoint.x = waypoint.x + value
@staticmethod
def W(ship: Ship, waypoint: Ship, value):
waypoint.x = waypoint.x - value
@staticmethod
def L(ship: Ship, waypoint: Ship, degrees):
waypoint.x, waypoint.y = WayPointNavigator.rotate(waypoint.x, waypoint.y, math.radians(-degrees))
@staticmethod
def R(ship: Ship, waypoint: Ship, degrees):
waypoint.x, waypoint.y = WayPointNavigator.rotate(waypoint.x, waypoint.y, math.radians(degrees))
@staticmethod
def F(ship: Ship, waypoint: Ship, speed):
ship.x = ship.x + waypoint.x * speed
ship.y = ship.y + waypoint.y * speed
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Day 12 of Advent of Code 2020')
parser.add_argument('file', metavar='filename', type=argparse.FileType('rt'),
help='filename to your personal inputs')
parser.add_argument('--debug', '-d', action='store_true', help='Print debug output of maps')
parser.add_argument('--turtle', action='store_true', help='Add a turtle')
args = parser.parse_args()
with args.file as FILE:
input_lines = FILE.readlines()
input_lines = [i.strip() for i in input_lines if len(i.strip()) > 0]
ShipShape = namedtuple("ShipShape", "instruction x y heading text")
ship = Ship(0, 0, 90)
states = []
for i in input_lines:
Navigator.move(ship, i)
states.append(ShipShape(i, ship.x, ship.y, ship.heading, repr(ship)))
max_x = max([0] + [ship.x for ship in states])
min_x = min([0] + [ship.x for ship in states])
max_y = max([0] + [ship.y for ship in states])
min_y = min([0] + [ship.y for ship in states])
x_scale = max_x - min_x
y_scale = max_y - min_y
if args.turtle:
import turtle
screen = turtle.Screen()
turtle.tracer(3)
screen.setworldcoordinates(min_x - x_scale/10, min_y - y_scale/10, max_x + x_scale/10, max_y + y_scale/10)
turtle_ship = turtle.Turtle()
for ship in states:
if args.turtle:
turtle_ship.setheading(90-ship.heading)
turtle_ship.goto(ship.x, ship.y)
turtle.update()
if args.debug:
print(ship.instruction, ship.text)
print(f"At the end of part 1 the ship is at {ship} with a manhattan distance of {abs(ship.x) + abs(ship.y)}")
if args.turtle:
sleep(2)
states = []
waypoints = []
ship = Ship(0, 0, 0)
waypoint = Ship(10, -1, 0)
for i in input_lines:
WayPointNavigator.move(ship, waypoint, i)
states.append(ShipShape(i, ship.x, ship.y, ship.heading, repr(ship)))
waypoints.append(ShipShape(i, waypoint.x, waypoint.y, waypoint.heading, repr(waypoint)))
max_x = max([0] + [ship.x for ship in states] + [ship.x for ship in waypoints])
min_x = min([0] + [ship.x for ship in states] + [ship.x for ship in waypoints])
max_y = max([0] + [ship.y for ship in states] + [ship.y for ship in waypoints])
min_y = min([0] + [ship.y for ship in states] + [ship.y for ship in waypoints])
x_scale = max_x - min_x
y_scale = max_y - min_y
if args.turtle:
screen.reset()
screen.setworldcoordinates(min_x - x_scale/10, min_y - y_scale/10, max_x + x_scale/10, max_y + y_scale/10)
turtle_ship.shape("circle")
turtle_ship.turtlesize(.1, .1)
turtle_waypoint = turtle.Turtle()
turtle_waypoint.shape("square")
turtle_waypoint.turtlesize(.1, .1)
turtle_waypoint.color("red")
for ix, ship in enumerate(states):
waypoint = waypoints[ix]
if args.turtle:
turtle_ship.setheading(90 - ship.heading)
turtle_waypoint.goto(ship.x + waypoint.x, ship.y + waypoint.y)
turtle_ship.goto(ship.x, ship.y)
turtle.update()
if args.debug:
print(ship.instruction, ship.text, waypoint.text)
print(f"At the end of part 2 the ship is at {ship} with a manhattan distance of {abs(ship.x) + abs(ship.y)}")
if args.turtle:
input("Press any key to continue")
| 31.08867 | 114 | 0.61274 |
f1bb8bbe33cd1497496bf1ef8ce9f235e99a0e6c | 187 | py | Python | HackerRank Solutions/Python/Strings/Designer Door Mat.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | HackerRank Solutions/Python/Strings/Designer Door Mat.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | HackerRank Solutions/Python/Strings/Designer Door Mat.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | N,M = map(int,str(input()).split())
for i in range(1,N,2):
print((i*".|.").center(M,'-'))
print("WELCOME".center(M,'-'))
for j in range(N-2,-1,-2):
print((j*".|.").center(M,'-'))
| 26.714286 | 35 | 0.508021 |
87983cada0ea3814a22544a2e8132cfcc6fa1cf5 | 3,682 | py | Python | configs/textdet/textsnake/textsnake_r50_fpn_unet_1200e_ctw1500.py | constanreedjohn/mmocr | 8f638f11fff56c5f4969a350c88ab013849b7fd4 | [
"Apache-2.0"
] | null | null | null | configs/textdet/textsnake/textsnake_r50_fpn_unet_1200e_ctw1500.py | constanreedjohn/mmocr | 8f638f11fff56c5f4969a350c88ab013849b7fd4 | [
"Apache-2.0"
] | null | null | null | configs/textdet/textsnake/textsnake_r50_fpn_unet_1200e_ctw1500.py | constanreedjohn/mmocr | 8f638f11fff56c5f4969a350c88ab013849b7fd4 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../../_base_/schedules/schedule_1200e.py',
'../../_base_/default_runtime.py'
]
model = dict(
type='TextSnake',
backbone=dict(
type='mmdet.ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=True),
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN_UNet', in_channels=[256, 512, 1024, 2048], out_channels=32),
bbox_head=dict(
type='TextSnakeHead',
in_channels=32,
text_repr_type='poly',
loss=dict(type='TextSnakeLoss')),
train_cfg=None,
test_cfg=None)
dataset_type = 'IcdarDataset'
data_root = 'data/ctw1500/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='LoadTextAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(
type='RandomCropPolyInstances',
instance_key='gt_masks',
crop_ratio=0.65,
min_side_ratio=0.3),
dict(
type='RandomRotatePolyInstances',
rotate_ratio=0.5,
max_angle=20,
pad_with_fixed_color=False),
dict(
type='ScaleAspectJitter',
img_scale=[(3000, 736)], # unused
ratio_range=(0.7, 1.3),
aspect_ratio_range=(0.9, 1.1),
multiscale_mode='value',
long_size_bound=800,
short_size_bound=480,
resize_type='long_short_bound',
keep_ratio=False),
dict(type='SquareResizePad', target_size=800, pad_ratio=0.6),
dict(type='RandomFlip', flip_ratio=0.5, direction='horizontal'),
dict(type='TextSnakeTargets'),
dict(type='Pad', size_divisor=32),
dict(
type='CustomFormatBundle',
keys=[
'gt_text_mask', 'gt_center_region_mask', 'gt_mask',
'gt_radius_map', 'gt_sin_map', 'gt_cos_map'
],
visualize=dict(flag=False, boundary_key='gt_text_mask')),
dict(
type='Collect',
keys=[
'img', 'gt_text_mask', 'gt_center_region_mask', 'gt_mask',
'gt_radius_map', 'gt_sin_map', 'gt_cos_map'
])
]
test_pipeline = [
dict(type='LoadImageFromFile', color_type='color_ignore_orientation'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 736),
flip=False,
transforms=[
dict(type='Resize', img_scale=(1333, 736), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=f'{data_root}/instances_training.json',
img_prefix=f'{data_root}/imgs',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=f'{data_root}/instances_test.json',
img_prefix=f'{data_root}/imgs',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=f'{data_root}/instances_test.json',
img_prefix=f'{data_root}/imgs',
pipeline=test_pipeline))
evaluation = dict(interval=10, metric='hmean-iou')
| 31.741379 | 78 | 0.609723 |
1c2de10847f0ffbd5d32cfa1851e433c0275227d | 1,654 | py | Python | catalyst/utils/__init__.py | olgaiv39/catalyst | 005a123482b0340c599a58856f396355a76a7db5 | [
"Apache-2.0"
] | null | null | null | catalyst/utils/__init__.py | olgaiv39/catalyst | 005a123482b0340c599a58856f396355a76a7db5 | [
"Apache-2.0"
] | null | null | null | catalyst/utils/__init__.py | olgaiv39/catalyst | 005a123482b0340c599a58856f396355a76a7db5 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
from .argparse import args_are_not_none, boolean_flag
from .checkpoint import pack_checkpoint, unpack_checkpoint, \
save_checkpoint, load_checkpoint
from .compression import pack, pack_if_needed, unpack, unpack_if_needed
from .config import load_ordered_yaml, get_environment_vars, dump_environment, \
parse_config_args, parse_args_uargs
# from .dataset import *
from .ddp import is_wrapped_with_ddp, get_real_module
# from .frozen import *
from .hash import get_hash, get_short_hash
from .image import imread, imwrite, mimwrite_with_meta, \
tensor_from_rgb_image, tensor_to_ndimage, \
binary_mask_to_overlay_image
from .initialization import create_optimal_inner_init, outer_init, \
constant_init, uniform_init, normal_init, xavier_init, kaiming_init, \
bias_init_with_prob
from .misc import pairwise, make_tuple, merge_dicts, append_dict, is_exception
from .numpy import np_softmax, geometric_cumsum, structed2dict, dict2structed
# from .pandas import *
from .parallel import Pool, DumbPool, get_pool, \
parallel_imap, tqdm_parallel_imap
from .plotly import plot_tensorboard_log
# from .registry import *
from .seed import set_global_seed, Seeder
from .serialization import serialize, deserialize
# from .tensorboard import *
from .torch import ce_with_logits, log1p_exp, normal_sample, normal_logprob, \
soft_update, get_optimizable_params, \
get_optimizer_momentum, set_optimizer_momentum, assert_fp16_available, \
get_device, get_activation_fn, any2device, get_available_gpus, \
prepare_cudnn, process_model_params
from .visualization import plot_confusion_matrix, render_figure_to_tensor
| 47.257143 | 80 | 0.819831 |
beb288e165bbc55181808beb613005d0dcde2006 | 34,315 | py | Python | biostar/recipes/auth.py | tangibleai/biostar-central | 8c1cce4d3d95d49071db8ec225aa88a66b0b3910 | [
"MIT"
] | 477 | 2015-01-01T00:18:54.000Z | 2022-03-21T10:29:29.000Z | biostar/recipes/auth.py | coreydipsy/biostar-central | 2b2f09199a6332877885ef54d9ac588ed0765770 | [
"MIT"
] | 247 | 2015-01-02T08:12:03.000Z | 2022-02-24T15:20:58.000Z | biostar/recipes/auth.py | coreydipsy/biostar-central | 2b2f09199a6332877885ef54d9ac588ed0765770 | [
"MIT"
] | 251 | 2015-01-01T16:05:57.000Z | 2022-03-25T21:32:44.000Z | import difflib
import logging
import uuid, copy, base64
import json
import base64
import io
import subprocess
import random
from mimetypes import guess_type
import mistune
import toml as hjson
from django.conf import settings
from django.contrib import messages
from django.contrib.messages.storage import fallback
from django.db.models import Q
from django.template import Template, Context
from django.template import loader
from django.shortcuts import reverse
from django.utils.safestring import mark_safe
from django.utils.timezone import now
from biostar.recipes import models
from biostar.recipes import util
from biostar.recipes.const import *
from biostar.recipes.models import Data, Analysis, Job, Project, Access
logger = logging.getLogger("engine")
JOB_COLORS = {Job.SPOOLED: "spooled",
Job.ERROR: "errored", Job.QUEUED: "queued",
Job.RUNNING: "running", Job.COMPLETED: "completed"
}
def get_uuid(limit=32):
return str(uuid.uuid4())[:limit]
def generate_uuid(prefix, suffix):
uid = f"{prefix}-{suffix}"
return uid
def join(*args):
return os.path.abspath(os.path.join(*args))
def access_denied_message(user, needed_access):
"""
Generates the access denied message
"""
tmpl = loader.get_template('widgets/access_denied_message.html')
# Get the string format of the access.
needed_access = dict(Access.ACCESS_CHOICES).get(needed_access)
context = dict(user=user, needed_access=needed_access)
return tmpl.render(context=context)
def recent_clipboard(request):
"""
Return most recent item copied in the clipboard.
"""
board = request.session.get(settings.CLIPBOARD_NAME, {})
# Get the first item in the clipboard
if len(board):
board = list(board.items())[0]
key, vals = board
return key, vals
return "", []
def copy_file(request, fullpath):
if not os.path.exists(fullpath):
messages.error(request, "Path does not exist.")
return []
if request.user.is_anonymous:
messages.error(request, "You need to be logged in.")
return []
clipboard = request.session.get(settings.CLIPBOARD_NAME, {})
board_items = clipboard.get(COPIED_FILES) or []
board_items.append(fullpath)
# Set new values in clipboard.
clipboard = {COPIED_FILES: list(set(board_items))}
# Clear the clipboard before copying files.
clear(request=request)
request.session.update({settings.CLIPBOARD_NAME: clipboard})
return board_items
def copy_uid(request, uid, board):
"""
Used to append instance.uid into request.session[board]
"""
if request.user.is_anonymous:
messages.error(request, "You need to be logged in.")
return []
# Get the clipboard item
clipboard = request.session.get(settings.CLIPBOARD_NAME, {})
board_items = clipboard.get(board) or []
board_items.append(uid)
# No duplicates in clipboard
clipboard = {board: list(set(board_items))}
# Clear the clipboard before copying items.
clear(request=request)
request.session.update({settings.CLIPBOARD_NAME: clipboard})
return board_items
def get_token(request):
"""
Fetch user token from request.
"""
# Try and retrieve from a file
token = request.FILES.get("token")
if token:
token = token.readline()
# If none found in file, search in GET and POST requests.
token = token or request.GET.get("token") or request.POST.get("token")
return token
def validate_file(source, maxsize=50):
# Maximum size for a data to be updated via api.
try:
if source and source.size > maxsize * 1024 * 1024.0:
curr_size = source.size / 1024 / 1024.0
error_msg = f"File too large, {curr_size:0.1f}MB should be < {maxsize:0.1f}MB"
return False, error_msg
except Exception as exc:
error_msg = f"File size validation error: {exc}"
return False, error_msg
return True, ""
def authorize_run(user, recipe):
"""
Returns runnable.
"""
# An anonymous user cannot run recipes.
if user.is_anonymous:
return False
# Only users with access can run recipes
readable = is_readable(user=user, obj=recipe.project, strict=True)
if not readable:
return False
# A trusted user can run recipes that they have access to.
if user.profile.trusted and recipe.runnable():
return True
# A superuser can run all recipes.
if user.is_superuser:
return True
return False
def generate_script(job):
"""
Generates a script from a job.
"""
work_dir = job.path
json_data = hjson.loads(job.json_text)
# The base url to the site.
url_base = f'{settings.PROTOCOL}://{settings.SITE_DOMAIN}{settings.HTTP_PORT}'
# Extra context added to the script.
runtime = dict(
media_root=settings.MEDIA_ROOT,
media_url=settings.MEDIA_URL,
work_dir=work_dir, local_root=settings.LOCAL_ROOT,
user_id=job.owner.id, user_email=job.owner.email,
job_id=job.id, job_name=job.name,
job_url=f'{url_base}{settings.MEDIA_URL}{job.get_url()}'.rstrip("/"),
project_id=job.project.id, project_name=job.project.name,
analyis_name=job.analysis.name,
analysis_id=job.analysis.id,
domain=settings.SITE_DOMAIN, protocol=settings.PROTOCOL,
)
# Add the runtime context to the data.
json_data['runtime'] = runtime
try:
# Generate the script.
template = Template(job.template)
except Exception as exc:
template = Template(f"Error loading script template : \n{exc}.")
context = Context(json_data)
script = template.render(context)
return json_data, script
def detect_cores(request):
# Check if the Origin in the request is allowed
origin = request.headers.get('Origin', '')
if origin in settings.CORS_ORIGIN_WHITELIST:
return origin
return ''
def link_file(source, target_dir):
base, filename = os.path.split(source)
target = os.path.join(target_dir, filename)
# Link the file if it do
if not os.path.exists(target):
# Ensure base dir exists in target
os.makedirs(target_dir, exist_ok=True)
os.symlink(source, target)
return target
def add_file(target_dir, source):
"""
Deposit file stream into a target directory.
"""
# Link an existing file
if isinstance(source, str) and os.path.exists(source):
return link_file(source=source, target_dir=target_dir)
# Write a stream to a new file
if hasattr(source, 'read'):
# Get the absolute path
dest = os.path.abspath(target_dir)
# Create the directory
os.makedirs(dest, exist_ok=True)
# Get the name
fname = source.name
path = os.path.abspath(os.path.join(dest, fname))
# Write the stream into file.
util.write_stream(stream=source, dest=path)
return path
return
def get_project_list(user, include_public=True, include_deleted=False):
"""
Return projects visible to a user.
"""
privacy = None
if include_public:
privacy = Project.PUBLIC
if user is None or user.is_anonymous:
# Unauthenticated users see public projects.
cond = Q(privacy=Project.PUBLIC)
else:
# Authenticated users see public projects and private projects with access rights.
cond = Q(owner=user, privacy=Project.PRIVATE) | \
Q(privacy=privacy) | \
Q(access__user=user, access__access__in=[Access.READ_ACCESS,
Access.WRITE_ACCESS,
Access.SHARE_ACCESS])
# Generate the query.
if include_deleted:
query = Project.objects.filter(cond).distinct()
else:
query = Project.objects.filter(cond, deleted=False).distinct()
return query
def compute_rank(source, top=None, bottom=None, maxrank=5000, klass=None):
"""
top, bottom, and source are all objects with the .rank attribute.
maxrank is the maximum rank to aim for when placing objects at the top.
"""
# No top, move to the top.
if not top:
# Add to the max rank and bump to the top
rank = maxrank + (source.rank / 2)
return rank
# No bottom, move as bottom.
if not bottom:
# Reduce from top to place on bottom.
rank = top.rank - (top.rank / 2)
return rank
# Get the ranks of the top and bottom objects
brank = bottom.rank
trank = top.rank
# Deal with corner case: top and bottom ranks are the same.
if brank == trank:
# Reduce the bottom rank by a given bias
brank = bottom.rank = brank - 1
# Update the bottom rank to be below top.
klass.objects.filter(pk=bottom.pk).update(rank=bottom.rank)
# Place the current rank in between the top and bottom.
rank = (trank + brank) / 2
return rank
def get_thumbnail():
return os.path.join(settings.STATIC_ROOT, "images", "placeholder.png")
def render_script(recipe, tmpl=None):
try:
# Fill in the script with json data.
json_data = fill_data_by_name(project=recipe.project, json_data=recipe.json_data)
context = Context(json_data)
tmpl = tmpl or recipe.template
script_template = Template(tmpl)
script = script_template.render(context)
except Exception as exc:
logger.error(exc)
script = recipe.template
return script
def overwrite_image(obj, strimg):
strimg = strimg.encode()
strimg = base64.decodebytes(strimg)
stream = io.BytesIO(initial_bytes=strimg)
# Over write the image
name = obj.image.name or f"{obj.uid}"
obj.image.save(name, stream, save=True)
return
def update_recipe(obj, user, stream=None, data={}, uid="", project=None, create=False, save=True):
"""
Update an existing recipe using data found in data dict.
"""
if not obj and create:
obj = create_analysis(project=project, user=user, uid=uid)
elif not obj:
return
try:
data = data or json.load(stream)
except Exception as exc:
return {'error': f"Error loading json: {exc}"}
obj.json_text = data.get('json', obj.json_text)
obj.template = data.get('template', obj.template)
obj.name = data.get('name', obj.name)
obj.text = data.get('text', obj.text)
# Fetch the base64 image string and write to file.
strimg = data.get('image')
if strimg:
overwrite_image(obj=obj, strimg=strimg)
# Swap the binary image
if save:
obj.save()
result = obj.api_data
return result
def update_project(obj, user, data={}, stream=None, uid="", create=False, save=True):
"""
Update an existing project using data found in data dict.
"""
# Create a project when one does not exist.
if not obj and create:
obj = create_project(user=user, uid=uid)
elif not obj:
return
try:
data = data or json.load(stream)
except Exception as exc:
return {'error': f"Error loading json: {exc}"}
# Set the project text and name.
obj.text = data.get('text', obj.text)
obj.name = data.get('name', obj.name)
# Fetch the base64 image string and write to file.
strimg = data.get('image')
# Get the list of recipes
recipes = data.get('recipes', [])
if strimg:
overwrite_image(obj=obj, strimg=strimg)
if save:
obj.save()
# Iterate over and update recipes.
for rec in recipes:
recipe = Analysis.objects.filter(uid=rec['uid'], project=obj).first()
update_recipe(obj=recipe, data=rec, save=True, project=obj, create=create, user=user)
# Re-fetch updated data from the database.
result = obj.api_data
return result
def create_project(user, name="", uid=None, summary='', text='', stream=None, label=None,
privacy=Project.PRIVATE, update=False):
name = name or "My New Project"
text = text or "Project information goes here."
# Attempts to select the project.
project = Project.objects.filter(uid=uid).first()
# If it is not an update request return the project unchanged.
if project and not update:
return project
if project:
# Update existing project.
text = text or project.text
name = name or project.name
Project.objects.filter(id=project.pk).update(text=text, name=name)
project = Project.objects.filter(pk=project.pk).first()
logger.info(f"Updated project: {project.name} uid: {project.uid}")
# Create a new project.
else:
# Set uid here as well so it has a value when save()
# hasn't been called inside of create() ( i.e in tests ).
pid = uid or get_uuid(4)
project = Project.objects.create(name=name, text=text, uid=pid,
owner=user, privacy=privacy)
logger.info(f"Created project: {project.name} uid: {project.uid}")
# Update the uid when passed
if uid:
Project.objects.filter(id=project.pk).update(uid=uid)
project = Project.objects.filter(pk=project.pk).first()
logger.info(f"Changed the uid: {uid}")
# Update the image for the project.
if stream:
project.image.save(stream.name, stream, save=True)
return project
def create_analysis(project, json_text='', template='# code', uid=None, user=None, summary='', rank=100,
name='', text='', stream=None, security=Analysis.NOT_AUTHORIZED, update=False,
root=None):
owner = user or project.owner
analysis = Analysis.objects.filter(uid=uid)
# Only update when there is a flag
if analysis and update:
# Update analysis
current = analysis.first()
text = text or current.text
name = name or current.name
template = template or current.template
json_text = json_text or current.json_text
analysis.update(text=text, name=name, template=template, json_text=json_text, rank=rank)
analysis = analysis.first()
logger.info(f"Updated analysis: uid={analysis.uid} name={analysis.name}")
else:
# Create a new analysis
uid = None if analysis else uid
analysis = Analysis.objects.create(project=project, uid=uid, json_text=json_text, rank=rank,
owner=owner, name=name, text=text, security=security,
template=template, root=root)
# Update the projects last edit user when a recipe is created
Project.objects.filter(uid=analysis.project.uid).update(lastedit_user=user,
lastedit_date=now())
logger.info(f"Created analysis: uid={analysis.uid} name={analysis.name}")
if stream:
analysis.image.save(stream.name, stream, save=True)
return analysis
def make_job_title(recipe, data):
"""
Creates informative job title that shows job parameters.
"""
params = data.values()
# Extracts the field that gets displayed for a parameter
def extract(param):
if param.get("source"):
return param.get("name")
if param.get('display') == UPLOAD:
return os.path.basename(param.get('value')) if param.get('value') else None
if not param.get("display"):
return None
return param.get("value")
vals = map(extract, params)
vals = filter(None, vals)
vals = map(str, vals)
vals = ", ".join(vals)
if vals:
name = f"Results for {recipe.name}: {vals}"
else:
name = f"Results for {recipe.name}"
return name
def validate_recipe_run(user, recipe):
"""
Validate that a user can run a given recipe.
"""
if user.is_anonymous:
msg = "You must be logged in."
return False, msg
if not authorize_run(user=user, recipe=recipe):
msg = "Insufficient permission to execute recipe."
return False, msg
if recipe.deleted:
msg = "Can not run a deleted recipe."
return False, msg
# Not trusted users have job limits.
running_jobs = Job.objects.filter(owner=user, state=Job.RUNNING)
if not user.profile.trusted and running_jobs.count() >= settings.MAX_RUNNING_JOBS:
msg = "Exceeded maximum amount of running jobs allowed. Please wait until some finish."
return False, msg
return True, ""
def recipe_paste(instance, user, project, clone=False):
root = None
if clone:
root = instance.root if instance.is_cloned else instance
try:
stream = instance.image
except Exception as exc:
logger.error(exc)
stream = None
recipe = create_analysis(project=project, user=user, root=root,
json_text=instance.json_text, security=instance.security,
template=instance.template,
name=instance.name, text=instance.text, stream=stream)
return recipe
def data_paste(user, project, instance=None, path=""):
dtype = instance.type if isinstance(instance, Data) else None
# Copy an existing instance
if instance:
return create_data(project=project, path=instance.get_data_dir(),
user=user, name=instance.name,
type=dtype, text=instance.text)
# Link an existing file.
elif path and os.path.exists(path):
return create_data(project=project, path=path, user=user)
def clear(request):
request.session.update({settings.CLIPBOARD_NAME: {}})
return
def resolve_paste_url(key, project):
"""
Resolve redirect url after pasting or moving.
"""
url = project.url()
if key == COPIED_RECIPES:
url = reverse("recipe_list", kwargs=dict(uid=project.uid))
elif key in [COPIED_DATA, COPIED_FILES]:
url = reverse("data_list", kwargs=dict(uid=project.uid))
return url
def move(uids, project, user, otype="data"):
type_map = {'data': Data, 'recipes': Analysis}
klass = type_map.get(otype)
if not klass:
logger.error("Invalid class type given.")
return
items = [klass.objects.filter(uid=uid).first() for uid in uids]
for item in items:
# Get previous project to reset counts after swapping.
previous = item.project
# Check for write access before moving object from project.
if not is_writable(user=user, project=previous):
continue
item.project = project
# Swap projects
item.save()
# Reset counts for the previous project.
previous.set_counts()
def paste(project, user, board, clone=False):
"""
Paste items into project from clipboard.
"""
obj_map = {COPIED_RESULTS: Job, COPIED_DATA: Data, COPIED_RECIPES: Analysis}
key, vals = board
def copier(instance):
if key == COPIED_RECIPES:
# Paste objects in clipboard as recipes
return recipe_paste(user=user, project=project, clone=clone, instance=instance)
else:
# Paste objects in clipboard as data
return data_paste(user=user, project=project, instance=instance)
# Special case to paste files.
if key == COPIED_FILES:
# Add each path in clipboard as a data object.
new = [data_paste(project=project, user=user, path=p) for p in vals]
return new
# Map the objects in the clipboard to a database class.
klass = obj_map.get(key)
if not klass:
return []
# Select existing object by uid.
objs = [klass.objects.filter(uid=uid).first() for uid in vals]
objs = filter(None, objs)
# Apply copier to each object.
new = list(map(copier, objs))
return new
def fill_in(item, value):
value = str(value)
item['files'] = []
item['toc'] = value
item['file_list'] = value
item['id'] = 0
item['name'] = os.path.basename(value)
item['uid'] = None
item['data_dir'] = value
item['project_dir'] = value
item['data_url'] = "/"
return item
def fill_json_data(project, job=None, source_data={}, fill_with={}):
"""
Produces a filled in JSON data based on user input.
"""
# Creates a data.id to data mapping.
store = dict((data.id, data) for data in project.data_set.all())
# Make a copy of the original json data used to render the form.
json_data = copy.deepcopy(source_data)
# Get default dictionary to fill with from json data 'value'
default = {field: item.get('value', '') for field, item in json_data.items()}
fill_with = fill_with or default
# Alter the json data and fill in the extra information.
for field, item in json_data.items():
# If the field is a data field then fill in more information.
if item.get("source") == "PROJECT" and fill_with.get(field, '').isalnum():
try:
data_id = int(fill_with.get(field))
data = store.get(data_id)
# This mutates the `item` dictionary!
data.fill_dict(item)
except Exception as exc:
logger.error(exc)
# This mutates the `item` dictionary!
value = fill_with.get(field, "MISSING")
fill_in(item=item, value=value)
continue
# The JSON value will be overwritten with the selected field value.
if field in fill_with:
item["value"] = fill_with[field]
# Clean the textbox value
if item.get('display') == TEXTBOX:
item["value"] = util.clean_text(fill_with[field])
if item.get('display') == UPLOAD:
# Add uploaded file to job directory.
upload_value = fill_with.get(field)
if not upload_value:
item['value'] = ''
continue
# Link or write the stream located in the fill_with
path = add_file(target_dir=job.get_data_dir(), source=upload_value)
item['value'] = path
return json_data
def create_job(analysis, user=None, json_text='', json_data={}, name=None, state=Job.QUEUED, uid=None, save=True,
fill_with={}):
"""
Note: Parameter 'fill_with' needs to be a flat key:value dictionary.
"""
state = state or Job.QUEUED
owner = user or analysis.project.owner
project = analysis.project
if json_data:
json_text = hjson.dumps(json_data)
else:
json_text = json_text or analysis.json_text
# Needs the json_data to set the summary.
json_data = hjson.loads(json_text)
# Generate a meaningful job title.
name = make_job_title(recipe=analysis, data=json_data)
uid = uid or util.get_uuid(8)
# Create the job instance.
job = Job.objects.create(name=name, state=state, json_text=json_text,
security=Job.AUTHORIZED, project=project, analysis=analysis, owner=owner,
template=analysis.template, uid=uid)
# Fill the json data.
json_data = fill_json_data(job=job, source_data=json_data, project=project, fill_with=fill_with)
# Generate a meaningful job title.
name = make_job_title(recipe=analysis, data=json_data)
# Update the json_text and name
job.json_text = hjson.dumps(json_data)
job.name = name
# Append parameter summary to job on creation.
job.text = f"{job.text}\n{job.parameter_summary}"
job.html = mistune.markdown(text=job.text, escape=False)
if save:
# Save the updated json_text and name.
job.save()
# Update the projects lastedit user when a job is created
logger.info(f"Created job id={job.id} name={job.name}")
return job
def delete_object(obj, request):
access = is_writable(user=request.user, project=obj.project)
# Toggle the delete state if the user has write access
if access:
obj.deleted = not obj.deleted
obj.save()
return obj.deleted
def delete_recipe(recipe, user):
"""
Toggle the delete state on a recipe and it's clones.
"""
access = is_writable(user=user, project=recipe.project)
# Bail out when user has no write access
if not access:
return
# New recipe delete state.
state = not recipe.deleted
# Toggle the root recipe
recipe.deleted = state
recipe.save()
# Do not restore all cloned recipes.
if not recipe.deleted:
return
clones = Analysis.objects.filter(root=recipe)
# Update clones to the same state as the parent.
clones.update(deleted=state, lastedit_date=recipe.lastedit_date)
# Set the correct count for projects with cloned recipes.
for clone in clones:
clone.project.set_counts()
def transform(root, node, path):
# Image extension types.
IMAGE_EXT = {"png", "jpg", "gif", "jpeg"}
# Get the absolute path /root/node/path.
path = os.path.abspath(os.path.join(node, path))
# Find the relative path of the current node/path to the root.
relative = os.path.relpath(path, root)
# Follow symlinks and get the real path.
real = os.path.realpath(path)
# Get the parent directory
parent = os.path.dirname(path)
tstamp, size = 0, 0
if os.path.exists(path):
# Time stamp and size info.
tstamp = os.stat(path).st_mtime
size = os.stat(path).st_size
# Get the elements. i.e. foo/bar.txt -> ['foo', 'bar.txt']
elems = os.path.split(relative)
is_dir = os.path.isdir(path)
# Get all directories.
dirs = elems[:-1]
dirs = [] if dirs[0] == '' else dirs
# Get the last node.
last = elems[-1]
is_image = last.split(".")[-1] in IMAGE_EXT
return real, relative, dirs, last, tstamp, size, is_image, parent, is_dir
def listing(root, node=None, show_all=True):
paths = []
node = node or root
try:
# Walk the root filesystem and collect all files.
if show_all:
for fpath, fdirs, fnames in os.walk(root, followlinks=True):
paths.extend([join(fpath, fname) for fname in fnames])
# Get the list of file in current directory node being traversed.
else:
paths = os.listdir(node)
# Add metadata to each path.
transformer = lambda path: transform(root=root, node=node, path=path)
paths = list(map(transformer, paths))
paths = sorted(paths, key=lambda x: x[0])
except Exception as exc:
paths = []
logger.error(exc)
return paths
def job_color(job):
try:
if isinstance(job, Job):
return JOB_COLORS.get(job.state, "")
except Exception as exc:
logger.error(exc)
return ''
return
def guess_mimetype(fname):
"Return mimetype for a known text filename"
mimetype, encoding = guess_type(fname)
ext = os.path.splitext(fname)[1].lower()
# Known text extensions ( .fasta, .fastq, etc.. )
if ext in KNOWN_TEXT_EXTENSIONS:
mimetype = 'text/plain'
return mimetype
def create_path(fname, data):
"""
Returns a proposed path based on fname to the storage folder of the data.
Attempts to preserve the extension but also removes all whitespace from the filenames.
"""
# Select the file name.
fname = os.path.basename(fname)
# The data storage directory.
data_dir = data.get_data_dir()
# Make the data directory if it does not exist.
os.makedirs(data_dir, exist_ok=True)
# Build the file name under the new location.
path = os.path.abspath(os.path.join(data_dir, fname))
return path
def new_uid(obj, objtype, default=None, prefix=""):
"""
Ensure an objects uid is unique.
"""
uid = default or generate_uuid(prefix=prefix, suffix=obj.id)
while objtype.objects.filter(uid=uid).exclude(uid=obj.uid).exists():
uid = generate_uuid(prefix=prefix, suffix=f"{get_uuid(3)}")
return uid
def data_link(path, data):
dest = create_path(fname=path, data=data)
if not os.path.exists(dest):
os.symlink(path, dest)
return dest
def create_data_link(path, data):
# The path is a file.
if os.path.isfile(path):
data_link(path=path, data=data)
logger.info(f"Linked file: {path}")
# The path is a directory.
if os.path.isdir(path):
for p in os.scandir(path):
data_link(path=p.path, data=data)
logger.info(f"Linked dir: {path}")
def is_readable(user, obj, strict=False):
"""
strict=True policy ensures public projects still get their access checked.
"""
project = obj.project
if project.is_public and not strict:
return True
if user.is_anonymous:
return False
query = Q(access=Access.READ_ACCESS) | Q(access=Access.WRITE_ACCESS) | Q(access=Access.SHARE_ACCESS)
access = Access.objects.filter(query, project=project, user=user)
return access.exists()
def is_writable(user, project, owner=None):
"""
Returns True if a user has write access to an instance
"""
# Anonymous user may not have write access.
if not user or user.is_anonymous:
return False
# Users that may access a project.
cond1 = user.is_staff or user.is_superuser
# User has been given write access to the project
cond2 = models.Access.objects.filter(user=user, project=project,
access=models.Access.WRITE_ACCESS).first()
# User owns this project.
owner = owner or project.owner
cond3 = user == owner
# One of the conditions has to be true.
access = cond1 or cond2 or cond3
return access
def writeable_recipe(user, source, project=None):
"""
Check if a user can write to a 'source' recipe.
"""
if user.is_anonymous:
return False
if source.is_cloned:
# Check write access using root recipe information for clones.
target_owner = source.root.owner
project = source.root.project
else:
target_owner = source.owner
project = project or source.project
access = is_writable(user=user, project=project, owner=target_owner)
return access
def fill_data_by_name(project, json_data):
"""
Fills json information by name.
Used when filling in demonstration data and not user selection.
"""
json_data = copy.deepcopy(json_data)
# A mapping of data by name
for field, item in json_data.items():
# If the field is a data field then fill in more information.
val = item.get("value", '')
if item.get("source") == "PROJECT":
name = item.get("value")
item['toc'] = "FILE-LIST"
item['file_list'] = "FILE-LIST"
item['value'] = name or 'FILENAME'
item['data_dir'] = "DATA_DIR"
item['id'] = "DATA_ID"
item['name'] = "DATA_NAME"
item['uid'] = "DATA_UID"
item['project_dir'] = project.get_data_dir()
item['data_url'] = "/"
continue
# Give a placeholder so templates do not have **MISSING**.
if val is None or len(str(val)) == 0:
item['value'] = f'{str(field).upper()}'
return json_data
def create_data(project, user=None, stream=None, path='', name='', text='', type='', uid=None):
# We need absolute paths with no trailing slashes.
path = os.path.abspath(path).rstrip("/") if path else ""
# Create the data.
dtype = type or "DATA"
# The owner of the data will be the first admin user if not set otherwise.
owner = user or models.User.objects.filter(is_staff=True).first()
# Create the data object.
data = Data.objects.create(name=name, owner=owner, state=Data.PENDING,
project=project, type=dtype, text=text, uid=uid)
# Set the uid.
uid = new_uid(obj=data, objtype=Data, default=uid, prefix="data")
data.uid = uid
# Write this stream into a path then link that into the data.
if stream:
name = name or stream.name
fname = '_'.join(name.split())
# Create path for the stream
path = create_path(data=data, fname=fname)
# Write stream into newly created path.
util.write_stream(stream=stream, dest=path)
# Mark incoming file as uploaded
data.method = Data.UPLOAD
# Link path to this data object.
create_data_link(path=path, data=data)
# Invalid paths and empty streams still create the data
# but set the data state will be set to error.
missing = not (path or stream or os.path.isdir(path) or os.path.isfile(path))
# An invalid entry here.
if path and missing:
state = Data.ERROR
logger.error(f"Invalid data path: {path}")
else:
state = Data.READY
# Set updated attributes
data.state = state
data.name = name or os.path.basename(path) or 'Data'
# Trigger another save to remake the toc file.
data.save()
# Set log for data creation.
logger.info(f"Added data type={data.type} name={data.name} pk={data.pk}")
return data
def get_or_create(**kwargs):
"""
Get or create a data object associated with a file.
"""
fname = kwargs["file"]
project = kwargs['project']
uid = kwargs.get('uid')
# Get the data if it exists.
data = Data.objects.filter(uid=uid).first()
if data and not data.deleted:
create_data_link(path=fname, data=data)
logger.info("Updated data file, name, and text.")
else:
# Create new data.
data = create_data(project=project, path=fname, uid=uid, user=kwargs.get('user'))
# Update the name, text, and type.
data.name = kwargs.get('name') or data.name
data.text = kwargs.get("text") or data.text
data.type = kwargs.get("type", '').upper() or data.type or "DATA"
# Trigger save to update the toc file, last edit date, etc.
data.save()
return data
| 29.354149 | 113 | 0.633659 |
3806b52a404327bd92f5250a2f976dc9be8c28a7 | 1,369 | py | Python | diabetes_csv_parser.py | odia101/Class4 | d1d2309d229eb7548210948a3c69f9d670483cff | [
"MIT"
] | null | null | null | diabetes_csv_parser.py | odia101/Class4 | d1d2309d229eb7548210948a3c69f9d670483cff | [
"MIT"
] | null | null | null | diabetes_csv_parser.py | odia101/Class4 | d1d2309d229eb7548210948a3c69f9d670483cff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import csv
myfilename = "diabetes.tab.txt"
# This is Class4 Homework
with open(myfilename, 'r') as file_handle:
diabeteslist = []
next(file_handle) #Skips header row
for line in file_handle.readlines():
line_clean = line.replace(' ', ' ').replace(' ', ' ')
line_clean = line_clean.strip()
values = line_clean.split('\t')
caster = []
for value in values:
if '.' in value:
caster.append(float(value))
else:
caster.append(int(value))
diabeteslist += [caster]
print("Display first 2 rows ONLY")
print(diabeteslist[0:2]) #Just printing the first 2 rows
print("End of list!!!. Now Converting Columns to Rows")
rotated_list = [[diabeteslist[jdx][idx]for jdx, row in enumerate(diabeteslist)]for idx, column in
enumerate(diabeteslist[0])]
print("Diplay first column ONLY")
print(rotated_list[0]) #Just showing the first column rotated to row
#This is the REACH
print("End of rotated_list")
diabetes_features = np.asarray(rotated_list) #Converting list to numpy arrays
print(diabetes_features)
print("The mean of each feature")
mean_features = np.mean(diabetes_features, axis=1) #Computes mean
print(mean_features)
print("The standard deviation of each feature")
std_features = np.std(diabetes_features, axis=1) #computes standard deviation
print(std_features)
| 27.38 | 98 | 0.734843 |
8ca7e2b084c1990273daf6a60252e2a75b7265a6 | 15,584 | py | Python | Experimental/BipedalWalker_v4.py | ProGamerCode/FitML | 3b44160bbf6c0587b8df198d3ceef10a42e2bfca | [
"MIT"
] | 171 | 2017-11-07T09:59:20.000Z | 2022-03-29T13:59:18.000Z | Experimental/BipedalWalker_v4.py | ProGamerCode/FitML | 3b44160bbf6c0587b8df198d3ceef10a42e2bfca | [
"MIT"
] | 1 | 2017-12-24T20:08:18.000Z | 2018-01-31T22:26:49.000Z | Experimental/BipedalWalker_v4.py | ProGamerCode/FitML | 3b44160bbf6c0587b8df198d3ceef10a42e2bfca | [
"MIT"
] | 44 | 2017-11-07T12:08:05.000Z | 2022-01-04T15:53:12.000Z | '''
BipedalWalker solution by Michel Aka
https://github.com/FitMachineLearning/FitML/
https://www.youtube.com/channel/UCi7_WxajoowBl4_9P0DhzzA/featured
Using Actor Critic
Note that I prefer the terms Action Predictor Network and Q/Reward Predictor network better
Starts to hope/run at 1000 episodes
Also navigates relief
Update
Significantly improve Selective Memory equation / selection criteria
Reduced Selective Memory Size
Cleaned up variables and more readable memory
Improved hyper parameters for better performance
Added memory array loading and saving
Initial Observiations 60 -> 150
Larger memory 1M -> 2M
Larger Selective memoryA x/10 -> x/3
More weights 2048 -> 4096
training eporchs 4 -> 2
'''
import numpy as np
import keras
import gym
import os
import h5py
import matplotlib.pyplot as plt
import math
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Embedding
from keras.layers import LSTM
from keras import optimizers
num_env_variables = 24
num_env_actions = 4
num_initial_observation = 40
learning_rate = 0.005
apLearning_rate = 0.001
version_name = "Waker-SM-v6"
weigths_filename = version_name+"-weights.h5"
apWeights_filename = version_name+"-weights-ap.h5"
#range within wich the SmartCrossEntropy action parameters will deviate from
#remembered optimal policy
sce_range = 0.2
b_discount = 0.98
max_memory_len = 2000000
experience_replay_size = 10000
random_every_n = 10
starting_explore_prob = 0.15
training_epochs = 3
mini_batch = 256
load_previous_weights = False
observe_and_train = True
save_weights = True
save_memory_arrays = True
load_memory_arrays = False
num_games_to_play = 6000
#One hot encoding array
possible_actions = np.arange(0,num_env_actions)
actions_1_hot = np.zeros((num_env_actions,num_env_actions))
actions_1_hot[np.arange(num_env_actions),possible_actions] = 1
#Create testing enviroment
env = gym.make('BipedalWalker-v2')
env.reset()
#initialize training matrix with random states and actions
dataX = np.random.random(( 5,num_env_variables+num_env_actions ))
#Only one output for the total score / reward
dataY = np.random.random((5,1))
#initialize training matrix with random states and actions
apdataX = np.random.random(( 5,num_env_variables ))
apdataY = np.random.random((5,num_env_actions))
def custom_error(y_true, y_pred, Qsa):
cce=0.001*(y_true - y_pred)*Qsa
return cce
#nitialize the Reward predictor model
Qmodel = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
Qmodel.add(Dense(4096, activation='relu', input_dim=dataX.shape[1]))
Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(64, activation='relu'))
#Qmodel.add(Dropout(0.2))
#Qmodel.add(Dense(8, activation='relu'))
#Qmodel.add(Dropout(0.1))
Qmodel.add(Dense(dataY.shape[1]))
opt = optimizers.adam(lr=learning_rate)
Qmodel.compile(loss='mse', optimizer=opt, metrics=['accuracy'])
#initialize the action predictor model
action_predictor_model = Sequential()
#model.add(Dense(num_env_variables+num_env_actions, activation='tanh', input_dim=dataX.shape[1]))
action_predictor_model.add(Dense(4096, activation='relu', input_dim=apdataX.shape[1]))
action_predictor_model.add(Dropout(0.2))
#action_predictor_model.add(Dense(62, activation='relu'))
#action_predictor_model.add(Dropout(0.2))
action_predictor_model.add(Dense(apdataY.shape[1]))
opt2 = optimizers.adam(lr=apLearning_rate)
action_predictor_model.compile(loss='mse', optimizer=opt2, metrics=['accuracy'])
#load previous model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+weigths_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
Qmodel.load_weights(weigths_filename)
else:
print("File ",weigths_filename," does not exis. Retraining... ")
#load previous action predictor model weights if they exist
if load_previous_weights:
dir_path = os.path.realpath(".")
fn = dir_path + "/"+ apWeights_filename
print("filepath ", fn)
if os.path.isfile(fn):
print("loading weights")
action_predictor_model.load_weights(apWeights_filename)
else:
print("File ",apWeights_filename," does not exis. Retraining... ")
memorySA = np.zeros(shape=(1,num_env_variables+num_env_actions))
memoryS = np.zeros(shape=(1,num_env_variables))
memoryA = np.zeros(shape=(1,1))
memoryR = np.zeros(shape=(1,1))
memoryRR = np.zeros(shape=(1,1))
if load_memory_arrays:
if os.path.isfile(version_name+'memorySA.npy'):
print("Memory Files exist. Loading...")
memorySA = np.load(version_name+'memorySA.npy')
memoryRR = np.load(version_name+'memoryRR.npy')
memoryS = np.load(version_name+'memoryS.npy')
memoryA = np.load(version_name+'memoryA.npy')
memoryR = np.load(version_name+'memoryR.npy')
else:
print("No memory Files. Recreating")
mstats = []
def predictTotalRewards(qstate, action):
qs_a = np.concatenate((qstate,action), axis=0)
predX = np.zeros(shape=(1,num_env_variables+num_env_actions))
predX[0] = qs_a
#print("trying to predict reward at qs_a", predX[0])
pred = Qmodel.predict(predX[0].reshape(1,predX.shape[1]))
remembered_total_reward = pred[0][0]
return remembered_total_reward
def GetRememberedOptimalPolicy(qstate):
predX = np.zeros(shape=(1,num_env_variables))
predX[0] = qstate
#print("trying to predict reward at qs_a", predX[0])
pred = action_predictor_model.predict(predX[0].reshape(1,predX.shape[1]))
r_remembered_optimal_policy = pred[0]
return r_remembered_optimal_policy
def addToMemory(reward,averegeReward,memMax):
#diff = reward - ((averegeReward+memMax)/2)
diff = reward - averegeReward
prob = 0.05
if reward > averegeReward:
prob = prob + 0.95 * (diff / 50)
#print("add reward",reward,"diff",diff,"prob",prob,"average", averegeReward,"max",memMax)
else:
prob = prob + 0.05/100 * (diff / (40+math.fabs(diff)))
if np.random.rand(1)<=prob :
#print("Adding reward",reward," based on prob ", prob)
return True
else:
return False
if observe_and_train:
#Play the game 500 times
for game in range(num_games_to_play):
gameSA = np.zeros(shape=(1,num_env_variables+num_env_actions))
gameS = np.zeros(shape=(1,num_env_variables))
gameA = np.zeros(shape=(1,num_env_actions))
gameR = np.zeros(shape=(1,1))
#Get the Q state
qs = env.reset()
#print("qs ", qs)
'''
if game < num_initial_observation:
print("Observing game ", game)
else:
print("Learning & playing game ", game)
'''
for step in range (5000):
if game < num_initial_observation:
#take a radmon action
a = env.action_space.sample()
else:
prob = np.random.rand(1)
explore_prob = starting_explore_prob-(starting_explore_prob/num_games_to_play)*game
#Chose between prediction and chance
if prob < explore_prob or game%random_every_n==0:
#take a random action
a = env.action_space.sample()
else:
#Get Remembered optiomal policy
remembered_optimal_policy = GetRememberedOptimalPolicy(qs)
stock = np.zeros(9)
stockAction = np.zeros(shape=(9,num_env_actions))
for i in range(9):
stockAction[i] = env.action_space.sample()
stock[i] = predictTotalRewards(qs,stockAction[i])
best_index = np.argmax(stock)
randaction = stockAction[best_index]
#Compare R for SmartCrossEntropy action with remembered_optimal_policy and select the best
#if predictTotalRewards(qs,remembered_optimal_policy) > utility_possible_actions[best_sce_i]:
if predictTotalRewards(qs,remembered_optimal_policy) > predictTotalRewards(qs,randaction):
a = remembered_optimal_policy
#print(" | selecting remembered_optimal_policy ",a)
else:
a = randaction
#print(" - selecting generated optimal policy ",a)
env.render()
qs_a = np.concatenate((qs,a), axis=0)
#get the target state and reward
s,r,done,info = env.step(a)
#record only the first x number of states
if step ==0:
gameSA[0] = qs_a
gameS[0] = qs
gameR[0] = np.array([r])
gameA[0] = np.array([r])
else:
gameSA= np.vstack((gameSA, qs_a))
gameS= np.vstack((gameS, qs))
gameR = np.vstack((gameR, np.array([r])))
gameA = np.vstack((gameA, np.array([a])))
if step > 800:
done = True
if done :
tempGameSA = np.zeros(shape=(1,num_env_variables+num_env_actions))
tempGameS = np.zeros(shape=(1,num_env_variables))
tempGameA = np.zeros(shape=(1,num_env_actions))
tempGameR = np.zeros(shape=(1,1))
tempGameRR = np.zeros(shape=(1,1))
#Calculate Q values from end to start of game
#mstats.append(step)
for i in range(0,gameR.shape[0]):
#print("Updating total_reward at game epoch ",(gameY.shape[0]-1) - i)
if i==0:
#print("reward at the last step ",gameY[(gameY.shape[0]-1)-i][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]
else:
#print("local error before Bellman", gameY[(gameY.shape[0]-1)-i][0],"Next error ", gameY[(gameY.shape[0]-1)-i+1][0])
gameR[(gameR.shape[0]-1)-i][0] = gameR[(gameR.shape[0]-1)-i][0]+b_discount*gameR[(gameR.shape[0]-1)-i+1][0]
#print("reward at step",i,"away from the end is",gameY[(gameY.shape[0]-1)-i][0])
if i==gameR.shape[0]-1 and game%5==0:
print("Training Game #",game,"last everage",memoryR[:-1000].mean(),"game mean",gameR.mean(),"memoryR",memoryR.shape[0], "SelectiveMem Size ",memoryRR.shape[0],"Selective Mem mean",memoryRR.mean(axis=0)[0], " steps = ", step ,"last reward", r," finished with headscore ", gameR[(gameR.shape[0]-1)-i][0])
if memoryR.shape[0] ==1:
memorySA = gameSA
memoryR = gameR
memoryA = gameA
memoryS = gameS
memoryRR = gameR
tempGameA = tempGameA[1:]
tempGameS = tempGameS[1:]
tempGameRR = tempGameRR[1:]
tempGameR = tempGameR[1:]
tempGameSA = tempGameSA[1:]
for i in range(gameR.shape[0]):
tempGameSA = np.vstack((tempGameSA,gameSA[i]))
tempGameR = np.vstack((tempGameR,gameR[i]))
#Add experience to memory
#memorySA = np.concatenate((memorySA,gameSA),axis=0)
#memoryR = np.concatenate((memoryR,gameR),axis=0)
#print("memoryR average", memoryR.mean(axis=0)[0])
for i in range(0,gameR.shape[0]):
if game > 3 and addToMemory(gameR[i][0],memoryRR.mean(axis=0)[0],memoryRR.max()):
tempGameA = np.vstack((tempGameA,gameA[i]))
tempGameS = np.vstack((tempGameS,gameS[i]))
tempGameRR = np.vstack((tempGameRR,gameR[i]))
if memoryR.shape[0] ==1:
memoryA = tempGameA
memoryS = tempGameS
memoryRR = tempGameRR
memoryR = tempGameR
memorySA = tempGameSA
else:
#Add experience to memory
memoryS = np.concatenate((memoryS,tempGameS),axis=0)
memoryRR = np.concatenate((memoryRR,tempGameRR),axis=0)
memoryA = np.concatenate((memoryA,tempGameA),axis=0)
memorySA = np.concatenate((memorySA,tempGameSA),axis=0)
memoryR = np.concatenate((memoryR,tempGameR),axis=0)
#if memory is full remove first element
if np.alen(memoryR) >= max_memory_len:
memorySA = memorySA[gameR.shape[0]:]
memoryR = memoryR[gameR.shape[0]:]
if np.alen(memoryA) >= max_memory_len/100:
memoryA = memoryA[gameR.shape[0]:]
memoryS = memoryS[gameR.shape[0]:]
memoryRR = memoryRR[gameR.shape[0]:]
#Update the states
qs=s
#Retrain every X failures after num_initial_observation
if done and game >= num_initial_observation and game >= 15:
if game%2 == 0:
if game%25 == 0:
print("Training game# ", game,"momory size", memorySA.shape[0])
tSA = (memorySA)
tR = (memoryR)
tX = (memoryS)
tY = (memoryA)
#sw = (memoryAdv)
train_Q = np.random.randint(tR.shape[0],size=experience_replay_size)
train_A = np.random.randint(tY.shape[0],size=int(experience_replay_size/3))
tX = tX[train_A,:]
tY = tY[train_A,:]
#sw = sw[train_idx,:]
tR = tR[train_Q,:]
tSA = tSA[train_Q,:]
#training Reward predictor model
Qmodel.fit(tSA,tR, batch_size=mini_batch,epochs=training_epochs,verbose=0)
#training action predictor model
action_predictor_model.fit(tX,tY, batch_size=mini_batch, epochs=training_epochs,verbose=0)
if done and game >= num_initial_observation:
if save_weights and game%20 == 0 and game >35:
#Save model
#print("Saving weights")
Qmodel.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
if save_memory_arrays and game%20 == 0 and game >35:
np.save(version_name+'memorySA.npy',memorySA)
np.save(version_name+'memoryRR.npy',memoryRR)
np.save(version_name+'memoryS.npy',memoryS)
np.save(version_name+'memoryA.npy',memoryA)
np.save(version_name+'memoryR.npy',memoryR)
if done:
'''
#Game won conditions
if step > 197:
print("Game ", game," WON *** " )
else:
print("Game ",game," ended with positive reward ")
#Game ended - Break
'''
break
plt.plot(mstats)
plt.show()
if save_weights:
#Save model
print("Saving weights")
Qmodel.save_weights(weigths_filename)
action_predictor_model.save_weights(apWeights_filename)
| 36.32634 | 326 | 0.598948 |
b2eaeaecffd90a681013c334caaad60d04ad04ba | 200 | py | Python | October/Stone Game IV.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | October/Stone Game IV.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | October/Stone Game IV.py | parikshitgupta1/leetcode | eba6c11740dc7597204af127c0f4c2163376294f | [
"MIT"
] | null | null | null | class Solution:
def winnerSquareGame(self, n: int) -> bool:
dp = [0] * (n+1)
for i in range(1, n+1):
j = 1
while j * j <= i and not dp[i]:
dp[i] = dp[i-j*j]^1
j += 1
return dp[n]
| 20 | 44 | 0.51 |
7c30bf67640b3920cad947b610a7646956f307cb | 436 | py | Python | bucketlist/users/urls.py | junngo/bucket-list | e8bfc17b6ea6aa957711813dd54a1804cdbaa914 | [
"MIT"
] | null | null | null | bucketlist/users/urls.py | junngo/bucket-list | e8bfc17b6ea6aa957711813dd54a1804cdbaa914 | [
"MIT"
] | 2 | 2020-06-05T20:04:49.000Z | 2021-03-19T22:33:31.000Z | bucketlist/users/urls.py | junngo/bucket-list | e8bfc17b6ea6aa957711813dd54a1804cdbaa914 | [
"MIT"
] | null | null | null | from django.urls import path
from bucketlist.users.views import (
user_list_view,
user_redirect_view,
user_update_view,
user_detail_view,
)
app_name = "users"
urlpatterns = [
path("", view=user_list_view, name="list"),
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 25.647059 | 66 | 0.694954 |
abd6491abe99147c3490885c77d2a564220dfc73 | 25,621 | py | Python | src/commercetools/types/_shipping_method.py | mbarga/commercetools-python-sdk | 464b2ea2518bafe4e2694a723550e0041db1f4c9 | [
"MIT"
] | null | null | null | src/commercetools/types/_shipping_method.py | mbarga/commercetools-python-sdk | 464b2ea2518bafe4e2694a723550e0041db1f4c9 | [
"MIT"
] | 1 | 2019-07-15T07:27:06.000Z | 2019-07-15T07:27:06.000Z | src/commercetools/types/_shipping_method.py | mbarga/commercetools-python-sdk | 464b2ea2518bafe4e2694a723550e0041db1f4c9 | [
"MIT"
] | null | null | null | # DO NOT EDIT! This file is automatically generated
import datetime
import enum
import typing
from commercetools.types._abstract import _BaseType
from commercetools.types._common import (
BaseResource,
Reference,
ReferenceTypeId,
ResourceIdentifier,
)
if typing.TYPE_CHECKING:
from ._common import Money, TypedMoney
from ._tax_category import TaxCategoryReference, TaxCategoryResourceIdentifier
from ._zone import ZoneReference, ZoneResourceIdentifier
__all__ = [
"CartClassificationTier",
"CartScoreTier",
"CartValueTier",
"PriceFunction",
"ShippingMethod",
"ShippingMethodAddShippingRateAction",
"ShippingMethodAddZoneAction",
"ShippingMethodChangeIsDefaultAction",
"ShippingMethodChangeNameAction",
"ShippingMethodChangeTaxCategoryAction",
"ShippingMethodDraft",
"ShippingMethodPagedQueryResponse",
"ShippingMethodReference",
"ShippingMethodRemoveShippingRateAction",
"ShippingMethodRemoveZoneAction",
"ShippingMethodResourceIdentifier",
"ShippingMethodSetDescriptionAction",
"ShippingMethodSetKeyAction",
"ShippingMethodSetPredicateAction",
"ShippingMethodUpdate",
"ShippingMethodUpdateAction",
"ShippingRate",
"ShippingRateDraft",
"ShippingRatePriceTier",
"ShippingRateTierType",
"ZoneRate",
"ZoneRateDraft",
]
class PriceFunction(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.PriceFunctionSchema`."
#: :class:`str` `(Named` ``currencyCode`` `in Commercetools)`
currency_code: typing.Optional["str"]
#: :class:`str`
function: typing.Optional[str]
def __init__(
self,
*,
currency_code: typing.Optional["str"] = None,
function: typing.Optional[str] = None
) -> None:
self.currency_code = currency_code
self.function = function
super().__init__()
def __repr__(self) -> str:
return "PriceFunction(currency_code=%r, function=%r)" % (
self.currency_code,
self.function,
)
class ShippingMethod(BaseResource):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodSchema`."
#: Optional :class:`str`
key: typing.Optional[str]
#: :class:`str`
name: typing.Optional[str]
#: Optional :class:`str`
description: typing.Optional[str]
#: :class:`commercetools.types.TaxCategoryReference` `(Named` ``taxCategory`` `in Commercetools)`
tax_category: typing.Optional["TaxCategoryReference"]
#: List of :class:`commercetools.types.ZoneRate` `(Named` ``zoneRates`` `in Commercetools)`
zone_rates: typing.Optional[typing.List["ZoneRate"]]
#: :class:`bool` `(Named` ``isDefault`` `in Commercetools)`
is_default: typing.Optional[bool]
#: Optional :class:`str`
predicate: typing.Optional[str]
def __init__(
self,
*,
id: typing.Optional[str] = None,
version: typing.Optional[int] = None,
created_at: typing.Optional[datetime.datetime] = None,
last_modified_at: typing.Optional[datetime.datetime] = None,
key: typing.Optional[str] = None,
name: typing.Optional[str] = None,
description: typing.Optional[str] = None,
tax_category: typing.Optional["TaxCategoryReference"] = None,
zone_rates: typing.Optional[typing.List["ZoneRate"]] = None,
is_default: typing.Optional[bool] = None,
predicate: typing.Optional[str] = None
) -> None:
self.key = key
self.name = name
self.description = description
self.tax_category = tax_category
self.zone_rates = zone_rates
self.is_default = is_default
self.predicate = predicate
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
)
def __repr__(self) -> str:
return (
"ShippingMethod(id=%r, version=%r, created_at=%r, last_modified_at=%r, key=%r, name=%r, description=%r, tax_category=%r, zone_rates=%r, is_default=%r, predicate=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.key,
self.name,
self.description,
self.tax_category,
self.zone_rates,
self.is_default,
self.predicate,
)
)
class ShippingMethodDraft(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodDraftSchema`."
#: Optional :class:`str`
key: typing.Optional[str]
#: :class:`str`
name: typing.Optional[str]
#: Optional :class:`str`
description: typing.Optional[str]
#: :class:`commercetools.types.TaxCategoryResourceIdentifier` `(Named` ``taxCategory`` `in Commercetools)`
tax_category: typing.Optional["TaxCategoryResourceIdentifier"]
#: List of :class:`commercetools.types.ZoneRateDraft` `(Named` ``zoneRates`` `in Commercetools)`
zone_rates: typing.Optional[typing.List["ZoneRateDraft"]]
#: :class:`bool` `(Named` ``isDefault`` `in Commercetools)`
is_default: typing.Optional[bool]
#: Optional :class:`str`
predicate: typing.Optional[str]
def __init__(
self,
*,
key: typing.Optional[str] = None,
name: typing.Optional[str] = None,
description: typing.Optional[str] = None,
tax_category: typing.Optional["TaxCategoryResourceIdentifier"] = None,
zone_rates: typing.Optional[typing.List["ZoneRateDraft"]] = None,
is_default: typing.Optional[bool] = None,
predicate: typing.Optional[str] = None
) -> None:
self.key = key
self.name = name
self.description = description
self.tax_category = tax_category
self.zone_rates = zone_rates
self.is_default = is_default
self.predicate = predicate
super().__init__()
def __repr__(self) -> str:
return (
"ShippingMethodDraft(key=%r, name=%r, description=%r, tax_category=%r, zone_rates=%r, is_default=%r, predicate=%r)"
% (
self.key,
self.name,
self.description,
self.tax_category,
self.zone_rates,
self.is_default,
self.predicate,
)
)
class ShippingMethodPagedQueryResponse(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodPagedQueryResponseSchema`."
#: :class:`int`
count: typing.Optional[int]
#: Optional :class:`int`
total: typing.Optional[int]
#: :class:`int`
offset: typing.Optional[int]
#: List of :class:`commercetools.types.ShippingMethod`
results: typing.Optional[typing.Sequence["ShippingMethod"]]
def __init__(
self,
*,
count: typing.Optional[int] = None,
total: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
results: typing.Optional[typing.Sequence["ShippingMethod"]] = None
) -> None:
self.count = count
self.total = total
self.offset = offset
self.results = results
super().__init__()
def __repr__(self) -> str:
return (
"ShippingMethodPagedQueryResponse(count=%r, total=%r, offset=%r, results=%r)"
% (self.count, self.total, self.offset, self.results)
)
class ShippingMethodReference(Reference):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodReferenceSchema`."
#: Optional :class:`commercetools.types.ShippingMethod`
obj: typing.Optional["ShippingMethod"]
def __init__(
self,
*,
type_id: typing.Optional["ReferenceTypeId"] = None,
id: typing.Optional[str] = None,
obj: typing.Optional["ShippingMethod"] = None
) -> None:
self.obj = obj
super().__init__(type_id=ReferenceTypeId.SHIPPING_METHOD, id=id)
def __repr__(self) -> str:
return "ShippingMethodReference(type_id=%r, id=%r, obj=%r)" % (
self.type_id,
self.id,
self.obj,
)
class ShippingMethodResourceIdentifier(ResourceIdentifier):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodResourceIdentifierSchema`."
def __init__(
self,
*,
type_id: typing.Optional["ReferenceTypeId"] = None,
id: typing.Optional[str] = None,
key: typing.Optional[str] = None
) -> None:
super().__init__(type_id=ReferenceTypeId.SHIPPING_METHOD, id=id, key=key)
def __repr__(self) -> str:
return "ShippingMethodResourceIdentifier(type_id=%r, id=%r, key=%r)" % (
self.type_id,
self.id,
self.key,
)
class ShippingMethodUpdate(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodUpdateSchema`."
#: :class:`int`
version: typing.Optional[int]
#: :class:`list`
actions: typing.Optional[list]
def __init__(
self,
*,
version: typing.Optional[int] = None,
actions: typing.Optional[list] = None
) -> None:
self.version = version
self.actions = actions
super().__init__()
def __repr__(self) -> str:
return "ShippingMethodUpdate(version=%r, actions=%r)" % (
self.version,
self.actions,
)
class ShippingMethodUpdateAction(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodUpdateActionSchema`."
#: :class:`str`
action: typing.Optional[str]
def __init__(self, *, action: typing.Optional[str] = None) -> None:
self.action = action
super().__init__()
def __repr__(self) -> str:
return "ShippingMethodUpdateAction(action=%r)" % (self.action,)
class ShippingRate(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingRateSchema`."
#: :class:`commercetools.types.TypedMoney`
price: typing.Optional["TypedMoney"]
#: Optional :class:`commercetools.types.TypedMoney` `(Named` ``freeAbove`` `in Commercetools)`
free_above: typing.Optional["TypedMoney"]
#: Optional :class:`bool` `(Named` ``isMatching`` `in Commercetools)`
is_matching: typing.Optional[bool]
#: List of :class:`commercetools.types.ShippingRatePriceTier`
tiers: typing.Optional[typing.List["ShippingRatePriceTier"]]
def __init__(
self,
*,
price: typing.Optional["TypedMoney"] = None,
free_above: typing.Optional["TypedMoney"] = None,
is_matching: typing.Optional[bool] = None,
tiers: typing.Optional[typing.List["ShippingRatePriceTier"]] = None
) -> None:
self.price = price
self.free_above = free_above
self.is_matching = is_matching
self.tiers = tiers
super().__init__()
def __repr__(self) -> str:
return "ShippingRate(price=%r, free_above=%r, is_matching=%r, tiers=%r)" % (
self.price,
self.free_above,
self.is_matching,
self.tiers,
)
class ShippingRateDraft(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingRateDraftSchema`."
#: :class:`commercetools.types.Money`
price: typing.Optional["Money"]
#: Optional :class:`commercetools.types.Money` `(Named` ``freeAbove`` `in Commercetools)`
free_above: typing.Optional["Money"]
#: Optional list of :class:`commercetools.types.ShippingRatePriceTier`
tiers: typing.Optional[typing.List["ShippingRatePriceTier"]]
def __init__(
self,
*,
price: typing.Optional["Money"] = None,
free_above: typing.Optional["Money"] = None,
tiers: typing.Optional[typing.List["ShippingRatePriceTier"]] = None
) -> None:
self.price = price
self.free_above = free_above
self.tiers = tiers
super().__init__()
def __repr__(self) -> str:
return "ShippingRateDraft(price=%r, free_above=%r, tiers=%r)" % (
self.price,
self.free_above,
self.tiers,
)
class ShippingRatePriceTier(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingRatePriceTierSchema`."
#: :class:`commercetools.types.ShippingRateTierType`
type: typing.Optional["ShippingRateTierType"]
def __init__(self, *, type: typing.Optional["ShippingRateTierType"] = None) -> None:
self.type = type
super().__init__()
def __repr__(self) -> str:
return "ShippingRatePriceTier(type=%r)" % (self.type,)
class ShippingRateTierType(enum.Enum):
CART_VALUE = "CartValue"
CART_CLASSIFICATION = "CartClassification"
CART_SCORE = "CartScore"
class ZoneRate(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ZoneRateSchema`."
#: :class:`commercetools.types.ZoneReference`
zone: typing.Optional["ZoneReference"]
#: List of :class:`commercetools.types.ShippingRate` `(Named` ``shippingRates`` `in Commercetools)`
shipping_rates: typing.Optional[typing.List["ShippingRate"]]
def __init__(
self,
*,
zone: typing.Optional["ZoneReference"] = None,
shipping_rates: typing.Optional[typing.List["ShippingRate"]] = None
) -> None:
self.zone = zone
self.shipping_rates = shipping_rates
super().__init__()
def __repr__(self) -> str:
return "ZoneRate(zone=%r, shipping_rates=%r)" % (self.zone, self.shipping_rates)
class ZoneRateDraft(_BaseType):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ZoneRateDraftSchema`."
#: :class:`commercetools.types.ZoneResourceIdentifier`
zone: typing.Optional["ZoneResourceIdentifier"]
#: List of :class:`commercetools.types.ShippingRateDraft` `(Named` ``shippingRates`` `in Commercetools)`
shipping_rates: typing.Optional[typing.List["ShippingRateDraft"]]
def __init__(
self,
*,
zone: typing.Optional["ZoneResourceIdentifier"] = None,
shipping_rates: typing.Optional[typing.List["ShippingRateDraft"]] = None
) -> None:
self.zone = zone
self.shipping_rates = shipping_rates
super().__init__()
def __repr__(self) -> str:
return "ZoneRateDraft(zone=%r, shipping_rates=%r)" % (
self.zone,
self.shipping_rates,
)
class CartClassificationTier(ShippingRatePriceTier):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CartClassificationTierSchema`."
#: :class:`str`
value: typing.Optional[str]
#: :class:`commercetools.types.Money`
price: typing.Optional["Money"]
#: Optional :class:`bool` `(Named` ``isMatching`` `in Commercetools)`
is_matching: typing.Optional[bool]
def __init__(
self,
*,
type: typing.Optional["ShippingRateTierType"] = None,
value: typing.Optional[str] = None,
price: typing.Optional["Money"] = None,
is_matching: typing.Optional[bool] = None
) -> None:
self.value = value
self.price = price
self.is_matching = is_matching
super().__init__(type=ShippingRateTierType.CART_CLASSIFICATION)
def __repr__(self) -> str:
return "CartClassificationTier(type=%r, value=%r, price=%r, is_matching=%r)" % (
self.type,
self.value,
self.price,
self.is_matching,
)
class CartScoreTier(ShippingRatePriceTier):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CartScoreTierSchema`."
#: :class:`int`
score: typing.Optional[int]
#: Optional :class:`commercetools.types.Money`
price: typing.Optional["Money"]
#: Optional :class:`commercetools.types.PriceFunction` `(Named` ``priceFunction`` `in Commercetools)`
price_function: typing.Optional["PriceFunction"]
#: Optional :class:`bool` `(Named` ``isMatching`` `in Commercetools)`
is_matching: typing.Optional[bool]
def __init__(
self,
*,
type: typing.Optional["ShippingRateTierType"] = None,
score: typing.Optional[int] = None,
price: typing.Optional["Money"] = None,
price_function: typing.Optional["PriceFunction"] = None,
is_matching: typing.Optional[bool] = None
) -> None:
self.score = score
self.price = price
self.price_function = price_function
self.is_matching = is_matching
super().__init__(type=ShippingRateTierType.CART_SCORE)
def __repr__(self) -> str:
return (
"CartScoreTier(type=%r, score=%r, price=%r, price_function=%r, is_matching=%r)"
% (self.type, self.score, self.price, self.price_function, self.is_matching)
)
class CartValueTier(ShippingRatePriceTier):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CartValueTierSchema`."
#: :class:`int` `(Named` ``minimumCentAmount`` `in Commercetools)`
minimum_cent_amount: typing.Optional[int]
#: :class:`commercetools.types.Money`
price: typing.Optional["Money"]
#: Optional :class:`bool` `(Named` ``isMatching`` `in Commercetools)`
is_matching: typing.Optional[bool]
def __init__(
self,
*,
type: typing.Optional["ShippingRateTierType"] = None,
minimum_cent_amount: typing.Optional[int] = None,
price: typing.Optional["Money"] = None,
is_matching: typing.Optional[bool] = None
) -> None:
self.minimum_cent_amount = minimum_cent_amount
self.price = price
self.is_matching = is_matching
super().__init__(type=ShippingRateTierType.CART_VALUE)
def __repr__(self) -> str:
return (
"CartValueTier(type=%r, minimum_cent_amount=%r, price=%r, is_matching=%r)"
% (self.type, self.minimum_cent_amount, self.price, self.is_matching)
)
class ShippingMethodAddShippingRateAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodAddShippingRateActionSchema`."
#: :class:`commercetools.types.ZoneResourceIdentifier`
zone: typing.Optional["ZoneResourceIdentifier"]
#: :class:`commercetools.types.ShippingRateDraft` `(Named` ``shippingRate`` `in Commercetools)`
shipping_rate: typing.Optional["ShippingRateDraft"]
def __init__(
self,
*,
action: typing.Optional[str] = None,
zone: typing.Optional["ZoneResourceIdentifier"] = None,
shipping_rate: typing.Optional["ShippingRateDraft"] = None
) -> None:
self.zone = zone
self.shipping_rate = shipping_rate
super().__init__(action="addShippingRate")
def __repr__(self) -> str:
return (
"ShippingMethodAddShippingRateAction(action=%r, zone=%r, shipping_rate=%r)"
% (self.action, self.zone, self.shipping_rate)
)
class ShippingMethodAddZoneAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodAddZoneActionSchema`."
#: :class:`commercetools.types.ZoneResourceIdentifier`
zone: typing.Optional["ZoneResourceIdentifier"]
def __init__(
self,
*,
action: typing.Optional[str] = None,
zone: typing.Optional["ZoneResourceIdentifier"] = None
) -> None:
self.zone = zone
super().__init__(action="addZone")
def __repr__(self) -> str:
return "ShippingMethodAddZoneAction(action=%r, zone=%r)" % (
self.action,
self.zone,
)
class ShippingMethodChangeIsDefaultAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodChangeIsDefaultActionSchema`."
#: :class:`bool` `(Named` ``isDefault`` `in Commercetools)`
is_default: typing.Optional[bool]
def __init__(
self,
*,
action: typing.Optional[str] = None,
is_default: typing.Optional[bool] = None
) -> None:
self.is_default = is_default
super().__init__(action="changeIsDefault")
def __repr__(self) -> str:
return "ShippingMethodChangeIsDefaultAction(action=%r, is_default=%r)" % (
self.action,
self.is_default,
)
class ShippingMethodChangeNameAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodChangeNameActionSchema`."
#: :class:`str`
name: typing.Optional[str]
def __init__(
self, *, action: typing.Optional[str] = None, name: typing.Optional[str] = None
) -> None:
self.name = name
super().__init__(action="changeName")
def __repr__(self) -> str:
return "ShippingMethodChangeNameAction(action=%r, name=%r)" % (
self.action,
self.name,
)
class ShippingMethodChangeTaxCategoryAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodChangeTaxCategoryActionSchema`."
#: :class:`commercetools.types.TaxCategoryResourceIdentifier` `(Named` ``taxCategory`` `in Commercetools)`
tax_category: typing.Optional["TaxCategoryResourceIdentifier"]
def __init__(
self,
*,
action: typing.Optional[str] = None,
tax_category: typing.Optional["TaxCategoryResourceIdentifier"] = None
) -> None:
self.tax_category = tax_category
super().__init__(action="changeTaxCategory")
def __repr__(self) -> str:
return "ShippingMethodChangeTaxCategoryAction(action=%r, tax_category=%r)" % (
self.action,
self.tax_category,
)
class ShippingMethodRemoveShippingRateAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodRemoveShippingRateActionSchema`."
#: :class:`commercetools.types.ZoneResourceIdentifier`
zone: typing.Optional["ZoneResourceIdentifier"]
#: :class:`commercetools.types.ShippingRateDraft` `(Named` ``shippingRate`` `in Commercetools)`
shipping_rate: typing.Optional["ShippingRateDraft"]
def __init__(
self,
*,
action: typing.Optional[str] = None,
zone: typing.Optional["ZoneResourceIdentifier"] = None,
shipping_rate: typing.Optional["ShippingRateDraft"] = None
) -> None:
self.zone = zone
self.shipping_rate = shipping_rate
super().__init__(action="removeShippingRate")
def __repr__(self) -> str:
return (
"ShippingMethodRemoveShippingRateAction(action=%r, zone=%r, shipping_rate=%r)"
% (self.action, self.zone, self.shipping_rate)
)
class ShippingMethodRemoveZoneAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodRemoveZoneActionSchema`."
#: :class:`commercetools.types.ZoneResourceIdentifier`
zone: typing.Optional["ZoneResourceIdentifier"]
def __init__(
self,
*,
action: typing.Optional[str] = None,
zone: typing.Optional["ZoneResourceIdentifier"] = None
) -> None:
self.zone = zone
super().__init__(action="removeZone")
def __repr__(self) -> str:
return "ShippingMethodRemoveZoneAction(action=%r, zone=%r)" % (
self.action,
self.zone,
)
class ShippingMethodSetDescriptionAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodSetDescriptionActionSchema`."
#: Optional :class:`str`
description: typing.Optional[str]
def __init__(
self,
*,
action: typing.Optional[str] = None,
description: typing.Optional[str] = None
) -> None:
self.description = description
super().__init__(action="setDescription")
def __repr__(self) -> str:
return "ShippingMethodSetDescriptionAction(action=%r, description=%r)" % (
self.action,
self.description,
)
class ShippingMethodSetKeyAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodSetKeyActionSchema`."
#: Optional :class:`str`
key: typing.Optional[str]
def __init__(
self, *, action: typing.Optional[str] = None, key: typing.Optional[str] = None
) -> None:
self.key = key
super().__init__(action="setKey")
def __repr__(self) -> str:
return "ShippingMethodSetKeyAction(action=%r, key=%r)" % (self.action, self.key)
class ShippingMethodSetPredicateAction(ShippingMethodUpdateAction):
"Corresponding marshmallow schema is :class:`commercetools.schemas.ShippingMethodSetPredicateActionSchema`."
#: Optional :class:`str`
predicate: typing.Optional[str]
def __init__(
self,
*,
action: typing.Optional[str] = None,
predicate: typing.Optional[str] = None
) -> None:
self.predicate = predicate
super().__init__(action="setPredicate")
def __repr__(self) -> str:
return "ShippingMethodSetPredicateAction(action=%r, predicate=%r)" % (
self.action,
self.predicate,
)
| 35.09726 | 177 | 0.652082 |
f7b3ecac38a79cedfd8060a3dcba5e8e124a0110 | 16,532 | py | Python | python/plugin/figa_circ.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | 1 | 2021-10-21T17:15:26.000Z | 2021-10-21T17:15:26.000Z | python/plugin/figa_circ.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | python/plugin/figa_circ.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
This module is intend to solve the matrix equation
sum_i=1^r Ax_i X Ay_i = F
Where Ay_i are circulant matrices
Ax_i is in general are band matrices
"""
import numpy as np
from scipy.linalg import circulant, inv
from scipy.sparse import csr_matrix, diags
from scipy.sparse.linalg import gmres, splu
from scipy.sparse import kron
from scipy.io import mmwrite, mmread
from scipy.optimize import minimize
from scipy.sparse.linalg import LinearOperator
# -----------------------
pi = np.pi
cos = np.cos
sin = np.sin
# -----------------------
# ...
def CMPLX(x,y):
return x + y * 1j
# ...
# ...
def genTestMatrices(r, nx, ny, p, EXPORT=False, IMPORT=False):
list_Ax = [] ; list_Ay = []
# ... Define non singular diagonal matrices for the x-direction
shift = 0
for i in range(0,r):
if IMPORT:
Ax = mmread("figa/Ax"+str(i)+".mtx")
else:
# a = np.random.random(nx)
a = np.ones(nx)
Ax = diags(a,shift)
Ax = csr_matrix(Ax)
if EXPORT:
mmwrite("figa/Ax"+str(i)+".mtx", Ax)
list_Ax.append(Ax)
# ...
# ... Define circulant matrices for the x-direction
for i in range(0,r):
if IMPORT:
Ay = mmread("figa/Ay"+str(i)+".mtx")
else:
ay = np.zeros(ny)
ay[:2*p+1] = np.random.random(2*p+1)
Ay = circulant(ay)
Ay = csr_matrix(Ay)
if EXPORT:
mmwrite("figa/Ay"+str(i)+".mtx", Ay)
list_Ay.append(Ay)
# ...
return list_Ax, list_Ay
# ...
# ...
def computeEigenValues(list_Ay, cmplx=True):
# ...
def computeEigenVal(A):
dtype = np.double
if cmplx:
dtype = np.complex
n,m = A.shape
a = np.zeros(n)
for i in range(0,n):
a[i] = A[0,i]
eigenA = np.zeros(n, dtype=dtype)
for k in range(0,n):
ck = 2*pi*k/n
for j in range(0,n):
if cmplx:
eigenA[k] += a[j] * CMPLX( cos(ck*j) , sin(ck*j) )
else:
eigenA[k] += a[j] * cos(ck*j)
return eigenA
# ...
list_eigenAy = []
for Ay in list_Ay:
eigenAy = computeEigenVal(Ay)
list_eigenAy.append(eigenAy)
return list_eigenAy
# ...
# ...
def AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy):
"""
j must be in range(0,ny)
"""
Sp = np.zeros((nx,nx))
for Ax,eigenAy in zip(list_Ax,list_eigenAy):
Sp = Sp + eigenAy[j] * Ax.todense()
return csr_matrix(Sp)
# ...
# ...
def solveSp(Sp, b):
x = gmres(Sp,b)[0]
return x
# ...
# ...
def rsolve(list_Ax, list_eigenAy, F):
fft = np.fft.rfft
ifft = np.fft.irfft
# ...
nx,ny = F.shape
n = nx ; m = ny
mmax = m/2 -1
x = F.transpose()
_F = np.zeros((m, n))
U = np.zeros_like(_F)
# ...
# ...
y = np.zeros((m/2 + 1, n), dtype=np.complex)
for j in range(0, n):
x1d = x[:,j]
y1d = fft(x1d)
y[:,j] = y1d
# ...
# ... if ny is even
for j in range(0,n):
_F[0,j] = y[0,j].real
for i in range(1,mmax+1):
z = y[i,j]
_F[2*i-1,j] = z.real
_F[2*i ,j] = z.imag
_F[m-1,j] = y[m/2,j].real
# ...
# ...
# ... treatment of the 0-mode
f1d = _F[0, :]
Sp = AssembleColumnMatrix(0, nx, ny, list_Ax, list_eigenAy)
u1d = solveSp(Sp, f1d)
U[0, :] = u1d
for j in range(1, mmax+1):
Sp = AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy)
# ... treatment of the mode 2j-1
f1d = _F[2*j-1, :]
u1d = solveSp(Sp, f1d)
U[2*j-1, :] = u1d
# ... treatment of the mode 2j
f1d = _F[2*j, :]
u1d = solveSp(Sp, f1d)
U[2*j, :] = u1d
# ... treatment of the last mode
f1d = _F[m-1, :]
Sp = AssembleColumnMatrix(mmax+1, nx, ny, list_Ax, list_eigenAy)
u1d = solveSp(Sp, f1d)
U[m-1, :] = u1d
# ...
# ... if ny is even
y = np.zeros_like(y)
for j in range(0,n):
y[0, j] = CMPLX(U[0, j], 0.0)
for i in range(1, mmax+1):
y[i, j] = CMPLX ( U[2*i - 1, j] , U[2*i, j] )
y[m/2, j] = CMPLX ( U[m-1, j] , 0.0 )
# ...
# ...
x = np.zeros_like(x)
for j in range(0, n):
y1d = y[:,j]
x1d = ifft(y1d)
x[:,j] = x1d
# ...
# ...
X = x.transpose()
#print X
# ...
return X
# ...
# ...
def csolve(list_Ax, list_eigenAy, F, EXPORT=False, list_opSj=None):
fft = np.fft.fft
ifft = np.fft.ifft
X = np.zeros_like(F)
Yp = np.zeros_like(F, dtype=np.complex)
Xp = np.zeros_like(F, dtype=np.complex)
# ...
for i in range(0, nx):
# ... extract the i^th line as a vector
y = F[i,:]
# ... move to the commun basis using FFT
yp = fft(y)
Yp[i,:] = yp
# ...
# ...
for j in range(0, ny):
if list_opSj is None:
# ... assemble the 1D matrix
Sj = AssembleColumnMatrix(j, nx, ny, list_Ax, list_eigenAy)
if EXPORT:
mmwrite("figa/S"+str(j)+".mtx", Sj)
# ... extract the j^th column as a vector
yp = Yp[:,j]
# ... solve the 1D linear system in the commun basis
if list_opSj is None:
xp = gmres(Sj,yp)[0]
else:
opSj = list_opSj[j]
xp = opSj.solve(yp)
Xp[:,j] = xp
# ...
# ...
for i in range(0, nx):
xp = Xp[i,:]
# ... come back to the real space
x = ifft(xp)
# ... ... make sur that it is real
x = x.real
# ... update the global matrix
X[i,:] = x
# ...
return X
# ...
# ...
def verification(list_Ax, list_Ay, X, F):
_F = np.zeros_like(X)
for Ax,Ay in zip(list_Ax, list_Ay):
_F += Ax * X * Ay.transpose()
# print "F ", F
# print "_F ", _F
print((np.allclose(F, _F)))
# assert(np.allclose(F, _F))
# ...
# ...
def constructGlobalSystem(list_Ax, list_Ay):
# ...
list_eigenAy = computeEigenValues(list_Ay)
# ...
# ...
Ax0 = list_Ax[0]
Ay0 = list_Ay[0]
S = kron(Ay0, Ax0)
r = len(list_Ax)
for i in range(1, r):
Ax = list_Ax[i]
Ay = list_Ay[i]
S = S + kron(Ay, Ax)
return S
# ...
# ...
# ...
class nearestCirculant(object):
"""
this class constructs a list of circulant matrices that approche a given
list of matrices A by minimizing the Frobenius norm
"""
def __init__(self, list_A, cost=0):
self.list_A = list_A
self.method = method
norm = lambda M: np.linalg.norm(M, 'fro')
# ...
def cost0(M, c):
C = circulant(c)
nr = norm(M-C)
return nr
# ...
# ...
def cost1(M, c):
n,m = M.shape
C = circulant(c)
invC = inv(C)
I = np.eye(n)
nr = norm(I-invC*M)
return nr
# ...
# ...
def cost2(M, c):
diag = M.diagonal()
shift = 0
D = diags(diag,shift)
Z = M-D
C = circulant(c)
nr = norm(Z-C)
return nr
# ...
self.cost0 = cost0
self.cost1 = cost1
self.cost2 = cost2
self.cost = getattr(self, 'cost%d' % cost)
def construct(self, method='BFGS', tol = 1.e-7):
list_C = []
for A in self.list_A:
# ...
if method is None:
n,m = A.shape
MD = A.todense()
c = np.zeros(n)
for k in range(0,n):
c1 =0.; c2=0.
for i in range(0,n-k):
c1 += MD[i,k+i]
for i in range(n-k,n):
c2 += MD[i,k+i-n]
c[k] = ( c1 + c2 ) / n
else:
cost = lambda c: self.cost(A,c)
n,m = A.shape
x0 = np.zeros(n)
x0[0] = 1.
res = minimize( cost, x0 \
, method=method \
, options={'gtol': tol, 'disp': verbose})
c = res.x
# ...
C = circulant(c)
C = csr_matrix(C)
list_C.append(C)
return list_C
# ...
# ...
class circulantPrecond(object):
def __init__(self, list_Ax, list_Ay \
, cost=0, method='BFGS' \
, tol = 1.e-7, verbose=False):
# ... construct the nearest circulant matrices for list_Ay
nearCirc = nearestCirculant(list_Ay, cost=cost)
list_C = nearCirc.construct(method=method, tol=tol)
# ...
self.list_C = list_C
# ...
self.list_eigenC = computeEigenValues(list_C)
# ...
# ...
n,m = list_Ax[0].shape ; nx = n
n,m = list_Ay[0].shape ; ny = n
self.n = [nx,ny]
# ...
# ...
r = len(list_Ax)
Ax0 = list_Ax[0]
C0 = list_C[0]
P = kron(C0, Ax0)
for i in range(1, r):
Ax = list_Ax[i]
C = list_C[i]
P = P + kron(C, Ax)
self.P = P
# ...
# ...
list_opSj = []
for j in range(0, ny):
# ... assemble the 1D matrix
Sj = AssembleColumnMatrix(j, nx, ny, list_Ax, self.list_eigenC)
opSj = splu(Sj.tocsc())
list_opSj.append(opSj)
self.list_opSj = list_opSj
# ...
def aspreconditioner(self):
"""Create a preconditioner
Returns
-------
precond : LinearOperator
Preconditioner suitable for the iterative solvers in defined in
the scipy.sparse.linalg module (e.g. cg, gmres) and any other
solver that uses the LinearOperator interface. Refer to the
LinearOperator documentation in scipy.sparse.linalg
See Also
--------
scipy.sparse.linalg.LinearOperator
Examples
--------
>>>
"""
shape = self.P.shape
dtype = self.P.dtype
nx, ny = self.n
self.i = 0
def matvec(b):
F = b.reshape((ny,nx))
F = F.transpose()
X = csolve(self.list_C, self.list_eigenC, F, list_opSj=self.list_opSj)
x = X.transpose().reshape(nx*ny)
# print ">> iteration ", self.i
self.i += 1
return x
return LinearOperator(shape, matvec, dtype=dtype)
# ...
# ...
def testcase(r, nx, ny, p, EXPORT=False, IMPORT=False):
# ...
if IMPORT:
F = np.genfromtxt("figa/F.txt")
try:
nx,ny = F.shape
except:
nx = 1
ny, = F.shape
_F = F
F = np.zeros((nx,ny))
F[0,:] = _F
else:
F = np.random.random((nx,ny))
np.savetxt("figa/F.txt", F)
# ...
# ...
list_Ax, list_Ay = genTestMatrices(r, nx, ny, p \
, EXPORT=EXPORT \
, IMPORT=IMPORT)
# ...
return list_Ax, list_Ay, F
# ...
# ...
def testcase_poisson(scale=False):
Mx = mmread("figa/Mx.mtx") ; Mx = Mx.tocsr()
Sx = mmread("figa/Sx.mtx") ; Sx = Sx.tocsr()
Kx = mmread("figa/Kx.mtx") ; Kx = Kx.tocsr()
KTx = Kx.transpose().tocsr()
My = mmread("figa/My.mtx") ; My = My.tocsr()
Sy = mmread("figa/Sy.mtx") ; Sy = Sy.tocsr()
Ky = mmread("figa/Ky.mtx") ; Ky = Ky.tocsr()
KTy = Ky.transpose().tocsr()
# # ...
# list_Ax = [Mx, Sx, Kx, KTx]
# list_A = [Sy, My, KTy, Ky]
# # ...
# # ...
# Kmx = np.sqrt(2) * (Kx+KTx)
# Kjx = - np.sqrt(2) * (Kx-KTx)
#
# Kmy = np.sqrt(2) * (Ky+KTy)
# Kjy = np.sqrt(2) * (Ky-KTy)
#
# list_Ax = [Mx, Sx, Kmx, Kjx]
# list_A = [Sy, My, Kmy, Kjy]
# # ...
# # ...
# list_Ax = [ Kx, KTx, Sx]
# list_A = [KTy, Ky, My]
# # ...
# ...
list_Ax = [Mx, Sx]
list_A = [Sy, My]
# ...
if scale:
print("MUST IMPROVED: WE HAVE TO MULTIPLY BY ONE MATRIX FOR ALL MATRICES")
shift = 0
list_Ay = []
for A in list_A:
diag = 1./A.diagonal()
D = diags(diag, shift).tocsr()
Ay = A * D
Ay.tocsr()
list_Ay.append(Ay)
else:
list_Ay = list_A
n,m = Mx.shape ; nx = n
n,m = My.shape ; ny = n
# F = np.random.random((nx,ny))
F = np.ones((nx,ny))
return list_Ax, list_Ay, F
# ...
# ---------------------------------------------------------------
if __name__=="__main__":
from time import time
# -------------------------
# nx = 512 ; ny = 512
# nx = 256 ; ny = 256
# nx = 128 ; ny = 128
# nx = 64 ; ny = 64
nx = 32 ; ny = 32
# nx = 16 ; ny = 16
r = 4
p = 3
# EXPORT = True
EXPORT = False
IMPORT = False
# IMPORT = True
method = None
cost = 0
# method = 'BFGS'
tol = 1.e-7
# verbose = True
verbose = False
# scale = True
scale = False
# CIRCULANT = True
CIRCULANT = False
# -------------------------
# ...
if CIRCULANT:
list_Ax, list_Ay, F = testcase(r, nx, ny, p, EXPORT=False, IMPORT=False)
else:
list_Ax, list_Ay, F = testcase_poisson(scale=scale)
# n,m = list_Ax[0].shape
# r = len(list_Ax)
# list_Ax = []
# for i in range(0,r):
## diag = np.random.random(n)
# diag = np.ones(n)
# shift = 0
# A = diags(diag, shift)
# list_Ax.append(A)
# _list_Ax = list_Ax[:3]
# _list_Ay = list_Ay[:3]
_list_Ax = list_Ax[:2]
_list_Ay = list_Ay[:2]
PrecConstruct = circulantPrecond(_list_Ax, _list_Ay \
, cost=cost, method=method \
, tol=tol, verbose=verbose)
mmwrite('figa/P.mtx', PrecConstruct.P)
# mmwrite('figa/C_Sy.mtx', PrecConstruct.list_C[0])
# mmwrite('figa/C_My.mtx', PrecConstruct.list_C[1])
# mmwrite('figa/C_Kmy.mtx', PrecConstruct.list_C[2])
# mmwrite('figa/Kmy.mtx', list_Ay[2])
# mmwrite('figa/C_KTy.mtx', PrecConstruct.list_C[2])
# mmwrite('figa/C_Ky.mtx' , PrecConstruct.list_C[3])
# mmwrite('figa/C_KTy.mtx', PrecConstruct.list_C[0])
# mmwrite('figa/C_Ky.mtx' , PrecConstruct.list_C[1])
# mmwrite('figa/C_My.mtx' , PrecConstruct.list_C[2])
mmwrite('figa/C_Sy.mtx', PrecConstruct.list_C[0])
mmwrite('figa/C_My.mtx', PrecConstruct.list_C[1])
Precond = PrecConstruct.aspreconditioner()
# ...
# ...
n,m = list_Ax[0].shape ; nx = n
n,m = list_Ay[0].shape ; ny = n
# ...
# ...
S = constructGlobalSystem(list_Ax, list_Ay)
mmwrite('figa/S.mtx', S)
# ...
# ...
print("=============================")
print(" nx, ny ", nx, ny)
print(" size ", S.shape)
print(" nnz ", S.nnz)
print("=============================")
# ...
# import sys ; sys.exit(0)
# ...
print("=============================")
print(">>> using the global system")
y = F.transpose().reshape(nx*ny)
tb = time()
Xg,it = gmres(S, y)
Xg = Xg.reshape((ny,nx))
Xg = Xg.transpose()
te = time()
print("Elapsed time ", te-tb)
# ...
# ...
if CIRCULANT:
print("=============================")
print(">>> using circulant fast solver")
list_eigenAy = computeEigenValues(list_Ay)
tb = time()
X = csolve(list_Ax, list_eigenAy, F)
te = time()
print("Elapsed time ", te-tb)
print("Internal verification ")
verification(list_Ax, list_Ay, X, F)
else:
print("=============================")
print(">>> using circulant preconditioner solver")
tb = time()
y = F.transpose().reshape(nx*ny)
x,it = gmres(S, y, M=Precond)
X = x.reshape((ny,nx))
X = X.transpose()
te = time()
print("Elapsed time ", te-tb)
# ...
# ...
print("=============================")
print("Is everything OK?")
print(np.allclose(Xg,X, rtol=1e-07) \
, " with error ", np.linalg.norm(Xg-X)/np.linalg.norm(X))
| 24.71151 | 82 | 0.461166 |
304c96b8ed50204a559f4564b73d74920c348703 | 80 | py | Python | tests/test_app.py | moroclash/python-github-actions | 1ce20b0c96df1bfb869cdbc95f39ab6e076befa8 | [
"MIT"
] | null | null | null | tests/test_app.py | moroclash/python-github-actions | 1ce20b0c96df1bfb869cdbc95f39ab6e076befa8 | [
"MIT"
] | null | null | null | tests/test_app.py | moroclash/python-github-actions | 1ce20b0c96df1bfb869cdbc95f39ab6e076befa8 | [
"MIT"
] | 1 | 2021-02-04T00:25:21.000Z | 2021-02-04T00:25:21.000Z | from app import index
def test_index():
assert index() == "Hello, World!"
| 13.333333 | 37 | 0.65 |
9ce3e018a549d9d8b3ccb160651a535c93916cab | 4,065 | py | Python | app/account/admin.py | rogeriopaulos/gep | e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2 | [
"MIT"
] | null | null | null | app/account/admin.py | rogeriopaulos/gep | e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2 | [
"MIT"
] | 2 | 2021-09-02T04:22:45.000Z | 2021-09-02T04:52:26.000Z | app/account/admin.py | rogeriopaulos/gep | e56fd0450bdb8f572e2e35cc59a74ab0f0b372e2 | [
"MIT"
] | 1 | 2021-09-15T02:16:38.000Z | 2021-09-15T02:16:38.000Z | # -*- coding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Permission, User
from .forms import UserAdminForm, UserAdmminCreationForm
from .models import Cargo, Orgao, Profile
class UserAdmin(BaseUserAdmin):
add_form = UserAdmminCreationForm
add_fieldsets = (
(None, {
'fields': ('username', 'first_name', 'last_name', 'email', 'password1', 'password2')
}),
)
form = UserAdminForm
fieldsets = (
(None, {
'fields': ('username', 'email', 'password')
}),
('Informações Básicas', {
'fields': ('first_name', 'last_name', 'last_login')
}),
('Permissões', {
'fields': (
'is_active', 'is_staff', 'is_superuser', 'groups',
'user_permissions'
)
}),
)
date_hierarchy = 'date_joined'
search_fields = ('username', 'first_name', 'last_name', 'email')
list_filter = ('is_active', 'groups', 'is_staff')
list_display = ('__str__', 'full_name', 'email', 'profile__cargo', 'get_orgao', 'date_joined', 'is_active')
ordering = ('-date_joined',)
def full_name(self, obj):
return obj.get_full_name().upper()
full_name.short_description = 'Nome'
def profile__cargo(self, obj):
return obj.profile.cargo_link
profile__cargo.short_description = 'Cargo'
def get_orgao(self, instance):
return instance.profile.orgao_link.sigla if instance.profile.orgao_link else None
get_orgao.short_description = 'Órgão'
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
class ProfileAdmin(admin.ModelAdmin):
date_hierarchy = 'user__date_joined'
search_fields = ('user__first_name', 'user__last_name', 'funcao')
list_display = ('get_username', 'full_name', 'matricula', 'get_orgao', 'cargo_link', 'get_cadastro', 'get_active')
list_filter = ('user__is_active', 'cargo_link__cargo', 'subscritor')
permission = 'account.view_profile'
ordering = ('-user__date_joined',)
def get_readonly_fields(self, request, obj=None):
if not request.user.is_superuser and request.user.has_perm(self.permission):
return [f.name for f in self.model._meta.fields]
return super(ProfileAdmin, self).get_readonly_fields(request, obj=obj)
def get_username(self, obj):
return obj.user.username
get_username.admin_order_field = 'user__username'
get_username.short_description = 'Usuário'
def full_name(self, obj):
return obj.user.get_full_name().upper()
full_name.admin_order_field = 'user__first_name'
full_name.short_description = 'Nome'
def get_cadastro(self, obj):
return obj.user.date_joined
get_cadastro.admin_order_field = 'user__date_joined'
get_cadastro.short_description = 'Cadastro'
def get_orgao(self, obj):
return obj.orgao_link.sigla if obj.orgao_link else None
get_orgao.short_description = 'Órgão'
def get_active(self, obj):
if obj.user.is_active:
return 'Sim'
else:
return 'Não'
get_active.short_description = 'Ativo'
admin.site.register(Profile, ProfileAdmin)
class CargoAdmin(admin.ModelAdmin):
list_display = ('str_name', 'orgao', 'cargo')
list_filter = ('orgao',)
def str_name(self, obj):
return f'{obj.cargo} ({obj.orgao})'
str_name.admin_order_field = 'cargo'
str_name.short_description = 'Cargos'
admin.site.register(Cargo, CargoAdmin)
class OrgaoAdmin(admin.ModelAdmin):
list_display = ('orgao', 'sigla')
def render_change_form(self, request, context, *args, **kwargs):
context['adminform'].form.fields['permissions'].queryset = Permission.objects.filter(
content_type__app_label='account', content_type__model='orgao', codename__istartswith='access_')
return super().render_change_form(request, context, *args, **kwargs)
admin.site.register(Orgao, OrgaoAdmin)
| 30.111111 | 118 | 0.668635 |
65a8ac4a0e50d74ad8af4a972cc47d2e03a314a8 | 9,705 | py | Python | pypureclient/flasharray/FA_2_8/api/directory_quotas_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_8/api/directory_quotas_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_8/api/directory_quotas_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class DirectoryQuotasApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api28_directory_quotas_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
directory_ids=None, # type: List[str]
directory_names=None, # type: List[str]
filter=None, # type: str
limit=None, # type: int
offset=None, # type: int
policy_ids=None, # type: List[str]
policy_names=None, # type: List[str]
sort=None, # type: List[str]
total_item_count=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.DirectoryQuotasGetResponse
"""List directories with attached quota policies
Displays a list of directories and the quota policies attached to them. Directories with multiple policies are listed repeatedly (once per policy). The directories without a policy attached are not listed.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api28_directory_quotas_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param list[str] directory_ids: Performs the operation on the unique managed directory IDs specified. Enter multiple managed directory IDs in comma-separated format. The `directory_ids` and `directory_names` parameters cannot be provided together.
:param list[str] directory_names: Performs the operation on the managed directory names specified. Enter multiple full managed directory names in comma-separated format. For example, `fs:dir01,fs:dir02`.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] policy_ids: Performs the operation on the unique policy IDs specified. Enter multiple policy IDs in comma-separated format. The `policy_ids` and `policy_names` parameters cannot be provided together.
:param list[str] policy_names: Performs the operation on the policy names specified. Enter multiple policy names in comma-separated format. For example, `name01,name02`.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: DirectoryQuotasGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if directory_ids is not None:
if not isinstance(directory_ids, list):
directory_ids = [directory_ids]
if directory_names is not None:
if not isinstance(directory_names, list):
directory_names = [directory_names]
if policy_ids is not None:
if not isinstance(policy_ids, list):
policy_ids = [policy_ids]
if policy_names is not None:
if not isinstance(policy_names, list):
policy_names = [policy_names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api28_directory_quotas_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api28_directory_quotas_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'directory_ids' in params:
query_params.append(('directory_ids', params['directory_ids']))
collection_formats['directory_ids'] = 'csv'
if 'directory_names' in params:
query_params.append(('directory_names', params['directory_names']))
collection_formats['directory_names'] = 'csv'
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'policy_ids' in params:
query_params.append(('policy_ids', params['policy_ids']))
collection_formats['policy_ids'] = 'csv'
if 'policy_names' in params:
query_params.append(('policy_names', params['policy_names']))
collection_formats['policy_names'] = 'csv'
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.8/directory-quotas', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='DirectoryQuotasGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 56.424419 | 671 | 0.676146 |
42ae7aedf1965014385cd5a863721cee9a6ba23f | 3,243 | py | Python | tests/unit/test_sumgram.py | oduwsdl/sumgram | c584e7eba1fab7deac1489aefa3f48f1b0d3ab64 | [
"MIT"
] | 46 | 2019-09-09T16:05:24.000Z | 2022-03-17T08:45:28.000Z | tests/unit/test_sumgram.py | oduwsdl/sumgram | c584e7eba1fab7deac1489aefa3f48f1b0d3ab64 | [
"MIT"
] | 16 | 2019-08-28T23:48:35.000Z | 2022-02-27T22:48:31.000Z | tests/unit/test_sumgram.py | oduwsdl/sumgram | c584e7eba1fab7deac1489aefa3f48f1b0d3ab64 | [
"MIT"
] | 12 | 2019-08-28T21:54:12.000Z | 2022-02-08T20:44:14.000Z | import json
import unittest
from sumgram.sumgram import get_top_sumgrams
class TestSumgram(unittest.TestCase):
ngram = 2
def test_basic_script(self):
doc_lst = [
{'id': 0, 'text': 'The eye of Category 4 Hurricane Harvey is now over Aransas Bay. A station at Aransas Pass run by the Texas Coastal Observing Network recently reported a sustained wind of 102 mph with a gust to 132 mph. A station at Aransas Wildlife Refuge run by the Texas Coastal Observing Network recently reported a sustained wind of 75 mph with a gust to 99 mph. A station at Rockport reported a pressure of 945 mb on the western side of the eye.'},
{'id': 1, 'text': 'Eye of Category 4 Hurricane Harvey is almost onshore. A station at Aransas Pass run by the Texas Coastal Observing Network recently reported a sustained wind of 102 mph with a gust to 120 mph.'},
{'id': 2, 'text': 'Hurricane Harvey has become a Category 4 storm with maximum sustained winds of 130 mph. Sustained hurricane-force winds are spreading onto the middle Texas coast.'}
]
params = {'top_sumgram_count': 10}
sumgrams = get_top_sumgrams(doc_lst, TestSumgram.ngram, params=params)
self.assertTrue( sumgrams['top_sumgrams'][0]['ngram'] != '', "Error statement: sumgrams['top_sumgrams'][0]['ngram']" )
self.assertGreater( sumgrams['ranked_docs'][0]['score'], 0, "sumgrams['ranked_docs'][0]['score']" )
self.assertGreater( sumgrams['ranked_sentences'][0]['avg_overlap'], 0, "sumgrams['ranked_sentences'][0]['avg_overlap']" )
def test_multiple_opts(self):
doc_lst = [
{'id': 0, 'text': 'The eye of Category 4 Hurricane Harvey is now over Aransas Bay. A station at Aransas Pass run by the Texas Coastal Observing Network recently reported a sustained wind of 102 mph with a gust to 132 mph. A station at Aransas Wildlife Refuge run by the Texas Coastal Observing Network recently reported a sustained wind of 75 mph with a gust to 99 mph. A station at Rockport reported a pressure of 945 mb on the western side of the eye.'},
{'id': 1, 'text': 'Eye of Category 4 Hurricane Harvey is almost onshore. A station at Aransas Pass run by the Texas Coastal Observing Network recently reported a sustained wind of 102 mph with a gust to 120 mph.'},
{'id': 2, 'text': 'Hurricane Harvey has become a Category 4 storm with maximum sustained winds of 130 mph. Sustained hurricane-force winds are spreading onto the middle Texas coast.'}
]
params = {
'top_sumgram_count': 10,
'add_stopwords': 'image',
'no_rank_docs': True,
'no_rank_sentences': True,
'title': 'Top sumgrams for Hurricane Harvey text collection'
}
sumgrams = get_top_sumgrams(doc_lst, TestSumgram.ngram, params=params)
self.assertTrue( sumgrams['top_sumgrams'][0]['ngram'] != '', "Error statement: sumgrams['top_sumgrams'][0]['ngram']" )
self.assertTrue( 'ranked_docs' not in sumgrams , "'ranked_docs' not in sumgrams" )
self.assertTrue( 'ranked_sentences' not in sumgrams , "'ranked_sentences' not in sumgrams" )
if __name__ == '__main__':
unittest.main() | 72.066667 | 468 | 0.690718 |
f05af08aff988cc7a33bed210526f1bafcdbf6a2 | 15,710 | py | Python | scripts/summarize_results.py | PMMAraujo/snappy | e9eedbb9d181e18de1d25aee60747ac4002e455b | [
"MIT"
] | null | null | null | scripts/summarize_results.py | PMMAraujo/snappy | e9eedbb9d181e18de1d25aee60747ac4002e455b | [
"MIT"
] | null | null | null | scripts/summarize_results.py | PMMAraujo/snappy | e9eedbb9d181e18de1d25aee60747ac4002e455b | [
"MIT"
] | 3 | 2019-06-19T12:32:13.000Z | 2022-02-22T12:19:19.000Z | import pandas as pd
import numpy as np
from collections import Counter
#from Bio import Phylo
from ete3 import Tree
def process_blast_recomb(name):
"""Extract results from sliding window BLAST
This function parses the results from the slidding window BLAST
('recblast_{id_of_the_fasta_sequence}.txt'), filters the top results,
divide the results by bins (of 50 nucleotides), and outputs the majority
rule result for each bin. Finaly an output for the given sequence is
returned, if there is multiple results they are separated by '/'.
Args:
name (txt): Sliding window BLAST result.
Returns:
List with two items: name and processed result.
"""
name_out = name[:-4].replace('blast/recblast_', '')
with open(name, 'r') as check_file:
file_txt = check_file.read()
if (file_txt == 'failed recombination BLAST\n'):
return [name_out, [np.NaN]]
else:
try:
df = pd.read_csv(name, header=None)
df = df[df[2] < 0.1e-105].copy()
except:
return [name_out, [np.NaN]]
dif_splits = df[0].unique()
as_array = []
for split in dif_splits:
position = int(split.split('_')[-1])
result = list(int(position / 50) * '-')
split_df = df[df[0] == split]
split_df = split_df.sort_values(by=[3], ascending=False)
try:
best_score = split_df[3].values[0]
refs_best_score = split_df[split_df[3] == best_score][1].values
subs_best = list(set([x.split('-')[0] for x in refs_best_score]))
if len(subs_best) == 1:
subtype = subs_best[0]
else:
subtype = '-'
result += [subtype] * 8
except:
pass
bins_number = (int(dif_splits[-1].split('_')[-1]) + 400) / 50
result += list('-' * int(bins_number - len(result)))
as_array.append(np.array(result))
most_comon_in_array = [Counter([i for i in x if i != '-']).most_common(1) for x in np.array(as_array).T]
pass_res = [x[0] for x in most_comon_in_array if len(x) == 1]
final = sorted(list(set([x[0] for x in pass_res if x[1] > 4])))
return [name_out, ["/".join(final)]]
def process_trees(tree):
"""Extract results from the phylogenetic inference
This function parses the results from the phylogenetic trees created, roots
the trees on the outgrouop ('CONSENSUS_CPZ'), and evaluated if the target
sequence is or not in a monophyletic clade with reference of only one
subtype/circulating recombinat form (crf). If the previously stated conditions
occur a subtype/crf is outputed, otherwise np.nan is outputed.
Args:
tree (nwk): Phylogenetic tree in nwk format.
Returns:
List with two items: name and processed result.
"""
name_target = tree[:-9].replace('trees/all_', '').replace('trees/pure_', '').replace('trees/recomb_', '')
with open(tree, 'r') as check_tree:
tree_txt = check_tree.read()
if (tree_txt == 'not enough genomic information\n'):
return [name_target, np.NaN, 0]
else:
t = Tree(tree)
t.set_outgroup('CONSENSUS_CPZ')
t.ladderize()
target_node = t.search_nodes(name=name_target)[0]
result = []
for node in target_node.get_ancestors():
subtypes_in_node = [leaf.split('-')[0] for leaf in node.get_leaf_names() if leaf != name_target]
if len(set(subtypes_in_node)) == 1:
result = [name_target, subtypes_in_node[0], node.support]
break
else:
pass
if result == []:
result = [name_target, np.NaN, 0]
else:
pass
return result
def get_clossest_blast(blast_res):
"""Extract closser reference from the BLAST
This function parses the results from the BLAST, turn it into a pandas
dataframe, verifies how many diferent subtype sequences have the top BLAST
score, if more than one the output is np.nan, if only one the output is that
subtype/CRF.
Args:
blast_res (txt): BLAST result. Is csv.
Returns:
List with two items: name and processed result.
"""
name_out = blast_res.replace('blast/blast_', '').replace('.txt', '')
with open(blast_res,'r') as check_blast:
blast_txt = check_blast.read()
if (blast_txt == 'not enough genomic information\n'):
return [name_out, np.NaN]
else:
df = pd.read_csv(blast_res)
df.columns = [0,1,2,3]
filter_df = df.sort_values(by=[3], ascending=False).copy()
best_score = filter_df[3].values[0]
refs_best_score = filter_df[filter_df[3] == best_score][1].values
subs_best = list(set([x.split('-')[0] for x in refs_best_score]))
if len(subs_best) == 1:
return [name_out, subs_best[0]]
else:
return [name_out, np.NaN]
def make_decision(idx, df):
"""Create final result based on all analysis
This function consists in a series of if statments. If statement can be seen
as a 'rule' with requirements that need to be meet to the final SNAPPy
result to be created.
Args:
idx (str): Internal SNAPPy id.
df (dataframe): Tabular like file with the outputs from all the analysis
performed.
Returns:
List with two items: rule used and final SNAPPy output.
"""
to_process = list(df.loc[idx])
# all methods agree
## rule_p1: no recomb, tree all equal tree pure, recomb equal tree all, tree all equal closser
if ((to_process[8] == 0) & (str(to_process[1]) != 'nan') &
(to_process[1] == to_process[3]) & (to_process[0] == to_process[1]) &
(to_process[1] == to_process[7])):
return ['rule_p1', to_process[1]]
## rule_c1: all trees and recomb trees and closser ref agree plus recomb is simple
elif ((str(to_process[1]) != 'nan') & (to_process[1] == to_process[5]) &
(to_process[2] >= 90.0 ) & (to_process[6] >= 90.0 ) &
(to_process[8] == 1) & (to_process[1] == to_process[7]) &
(str(to_process[1]) != 'nan')):
return ['rule_c1', to_process[1]]
# both trees plus 1 method agree
## rule_p2: tree pure agrees with tree all and recomb
elif ((str(to_process[3]) != 'nan') & (to_process[3] == to_process[1]) &
(to_process[4] >=90.0 ) & (to_process[2] >=90.0 ) &
(to_process[3] == to_process[0])):
return ['rule_p2', to_process[3]]
## rule_p3: tree pure agrees with tree all and closser
elif ((str(to_process[3]) != 'nan') & (to_process[3] == to_process[1]) &
(to_process[4] >=90.0 ) & (to_process[2] >=90.0 ) &
(to_process[3] == to_process[7])):
return ['rule_p3', to_process[3]]
## rule_c2: tree recomb agrees with tree all and closser and there is recomb
elif ((str(to_process[5]) != 'nan') & (to_process[5] == to_process[1]) &
(to_process[6] >=90.0 ) & (to_process[2] >=90.0 ) &
(to_process[5] == to_process[7])):
return ['rule_c2', to_process[5]]
# one tree plus recomb and closser
## rule_p4: tree pure agrees with recomb and closser
elif ((str(to_process[3]) != 'nan') & (to_process[4] >=95.0) &
(to_process[3] == to_process[0]) & (to_process[3] == to_process[7])):
return ['rule_p4', to_process[3]]
## rule_c3: tree recomb agrees with closser and recomb is simple
elif ((str(to_process[5]) != 'nan') & (to_process[6] >=95.0) &
(to_process[8] == 1) & (to_process[5] == to_process[7])):
return ['rule_c3', to_process[5]]
## rule_b1: tree all agrees with recomb and closser
elif ((str(to_process[1]) != 'nan') & (to_process[2] >=95.0) &
(to_process[1] == to_process[0]) & (to_process[1] == to_process[7])):
return ['rule_b1', to_process[1]]
# ecomb gives complex
## rules_c4: tree all agrees tree recomb, and their result is a crf
elif ((to_process[8] == 2)):
if ((to_process[1] == to_process[5]) & (to_process[2] >= 90.0 ) &
(to_process[6] >= 90.0 ) & ('_' in str(to_process[1])) &
(str(to_process[1]) != 'nan')):
return ['rule_c4', to_process[1]]
## rules_p5: tree all agrees tree pure, and closser, great support for 1 tree
elif ((to_process[1] == to_process[3]) & (to_process[1] == to_process[7]) &
((to_process[2] >= 95.0) | (to_process[4] >=95.0)) &
(str(to_process[1]) != 'nan')):
return ['rule_p5', to_process[1]]
## rules_c5: tree all agrees tree recomb, and closser, and trees give crf
elif ((to_process[1] == to_process[5]) & ('_' in str(to_process[1])) &
(to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')):
return ['rule_c5', to_process[1]]
## rules_p6: tree all agrees tree pure, and closser
elif ((to_process[1] == to_process[3]) &
(to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')):
return ['rule_p6', to_process[1]]
## rules_u1: remaining cases are an complex URF
else:
return ['rule_u1', 'URF_CPX']
# recomb gives simple
## rules_c6: tree all agrees tree recomb, and their result is a crf
elif ((to_process[8] == 1)):
if ((to_process[1] == to_process[5]) & (to_process[2] >= 90.0 ) &
(to_process[6] >= 90.0 ) & ('_' in str(to_process[1])) &
(str(to_process[1]) != 'nan')):
return ['rule_c6', to_process[1]]
## rules_p7: tree all agrees tree pure, and closser, great support for 1 tree
elif ((to_process[1] == to_process[3]) & (to_process[1] == to_process[7]) &
((to_process[2] >= 95.0) | (to_process[4] >=95.0)) &
(str(to_process[1]) != 'nan')):
return ['rule_p7', to_process[1]]
## rules_c7: tree all agrees tree recomb, and closser, and trees give crf
elif ((to_process[1] == to_process[5]) & ('_' in str(to_process[1])) &
(to_process[1] == to_process[7]) &
(str(to_process[1]) != 'nan')):
return ['rule_c7', to_process[1]]
## rules_p8: tree all agrees tree pure, and closser
elif ((to_process[1] == to_process[3]) &
(to_process[1] == to_process[7]) & (str(to_process[1]) != 'nan')):
return ['rule_p8', to_process[1]]
## rules_u1: remaining cases are an URF
else:
return ['rule_u2', f'URF_{"".join([str(x)[:2] for x in sorted(str(to_process[0]).split("/"))])}']
# no evidence of recomb
## rule_p9: pure and all trees agree
elif ((to_process[1] == to_process[3]) &
(to_process[4] >=90.0 ) & (to_process[2] >=90.0 ) &
(str(to_process[1]) != 'nan')):
return ['rule_p9', to_process[1]]
# final, deal with problems of missing data
## rule_f1: if recomb res mssing output and closser res not missing give closser result
elif (((str(to_process[0]) == '') | (str(to_process[0]) == 'nan')) &
(~(str(to_process[7]) == 'nan') & ~(str(to_process[7]) == ''))):
return ['rule_f1', to_process[7]]
## rule_f2: if recomb res and closser outputs misisng and trees agree give trees result
elif (((str(to_process[0]) == '') | (str(to_process[0]) == 'nan')) &
((str(to_process[7]) == 'nan') | (str(to_process[7]) == ''))):
if ((to_process[1] == to_process[3]) & (str(to_process[1]) != 'nan')):
return ['rule_f2', to_process[1]]
## rule_f3: if recomb res and closser outputs misisng and trees agree give trees result
elif ((to_process[1] == to_process[5]) & (str(to_process[1]) != 'nan')):
return ['rule_f3', to_process[1]]
## rule_f4: else return impossible to dtermine
else:
return ['rule_f4', 'impossible_to_determine']
## rule_f5: give what is ouputed be recomb test, there is no recomb
else:
return ['rule_f5', to_process[0]]
def has_recomb(str_recomb):
n_recombs = len(str(str_recomb).split('/'))
if n_recombs == 1:
return 0
elif n_recombs == 2:
return 1
elif n_recombs > 2:
return 2
else:
print('ERRRRROOR')
if __name__ == '__main__':
input_list = list(snakemake.input)
keys = list(snakemake.params.k)
ids = list(snakemake.params.i)
all_trees_inputs = [x for x in input_list if (x[-9:] == '.treefile') & (x[:10] == 'trees/all_')]
pure_trees_inputs = [x for x in input_list if (x[-9:] == '.treefile') & (x[:11] == 'trees/pure_')]
recomb_trees_inputs = [x for x in input_list if (x[-9:] == '.treefile') & (x[:13] == 'trees/recomb_')]
blast_c = [x for x in input_list if (x[-4:] == '.txt') & (x[:12] == 'blast/blast_')]
blast_inputs = [x for x in input_list if (x[-4:] == '.txt') & (x[:15] == 'blast/recblast_')]
results = {}
for pos, key in enumerate(keys):
results[key] = [ids[pos]]
for blast_res in blast_inputs:
output = process_blast_recomb(blast_res)
results[output[0]] += output[1]
for tree in all_trees_inputs:
output = process_trees(tree)
results[output[0]] += output[1:]
for tree in pure_trees_inputs:
output = process_trees(tree)
results[output[0]] += output[1:]
for tree in recomb_trees_inputs:
output = process_trees(tree)
results[output[0]] += output[1:]
for blast in blast_c:
output = get_clossest_blast(blast)
results[output[0]] += [output[1]]
df_report = pd.DataFrame.from_dict(results, orient='index')
df_report.columns = ['id', 'recomb_result', 'node_all_refs', 's_node_all_refs',
'node_pure_refs', 's_node_pure_refs', 'node_recomb_refs',
's_node_recomb_refs', 'closser_ref']
to_make_decision = df_report.set_index(['id']).copy()
to_make_decision['has_recomb'] = to_make_decision['recomb_result'].apply(lambda x: has_recomb(x))
result_dict = {}
for idx in list(to_make_decision.index):
result_dict[idx] = make_decision(idx, to_make_decision)
my_res = pd.DataFrame.from_dict(result_dict, orient='index')
my_res.columns = ['rule', 'my_result']
my_res['my_result'] = my_res['my_result'].str.upper()
my_res['my_result'] = my_res['my_result'].str.replace('32_06A6', '32_06A1')
my_res['my_result'] = my_res['my_result'].str.replace('URF_A1A2', 'A')
my_res['my_result'] = my_res['my_result'].str.replace('URF_F1F2', 'F')
my_res.reset_index(inplace=True)
df_report.reset_index(inplace=True)
df_report['rule'] = my_res['rule']
df_report['result'] = my_res['my_result']
df_report.columns = ['id', 'name', 'recomb_result', 'node_all_refs',
's_node_all_refs', 'node_pure_refs', 's_node_pure_refs',
'node_recomb_refs', 's_node_recomb_refs', 'closser_ref',
'rule', 'result']
df_report[['id', 'name', 'result', 'recomb_result', 'node_all_refs',
's_node_all_refs', 'node_pure_refs', 's_node_pure_refs',
'node_recomb_refs', 's_node_recomb_refs', 'closser_ref',
'rule']].to_csv('report_subtype_results.csv', index=None)
my_res['id'] = df_report['id']
my_res.columns = ['name', 'rule', 'result', 'id']
my_res[['id', 'name', 'result']].to_csv('subtype_results.csv', index=None)
| 40.385604 | 112 | 0.585169 |
570f3238510f64a7011fae167140ab2c0b26b64e | 22 | py | Python | message/__init__.py | khallaghi/sheltie | eacbc6304a46bfc6fce336193a0727e41434a9ee | [
"MIT"
] | null | null | null | message/__init__.py | khallaghi/sheltie | eacbc6304a46bfc6fce336193a0727e41434a9ee | [
"MIT"
] | null | null | null | message/__init__.py | khallaghi/sheltie | eacbc6304a46bfc6fce336193a0727e41434a9ee | [
"MIT"
] | null | null | null | from message import * | 22 | 22 | 0.772727 |
b6ff0b8a4bbed4a9cfd88e93a54551247d525553 | 8,485 | py | Python | Chemical Equation Balancer/equation.py | Potato-Developing/Reast | f055752c8cd9535d229f084a57031ff885e862b6 | [
"MIT"
] | 37 | 2017-10-29T23:50:08.000Z | 2022-02-27T06:08:41.000Z | Chemical Equation Balancer/equation.py | Potato-Developing/Reast | f055752c8cd9535d229f084a57031ff885e862b6 | [
"MIT"
] | null | null | null | Chemical Equation Balancer/equation.py | Potato-Developing/Reast | f055752c8cd9535d229f084a57031ff885e862b6 | [
"MIT"
] | 81 | 2017-11-08T19:24:11.000Z | 2022-02-20T01:29:50.000Z | from random import randint
from math import gcd
from functools import reduce
class Equation:
"""
A chemical equation
=== Attributes ===
@type left: list[dict]
@type right: list[dict]
"""
def __init__(self, equation):
"""
Initializes an Equation object
@type self: Equation
@type equation: str
@rtype: None
"""
self.left = list()
self.right = list()
self.balanced = True
integers = '0123456789'
split = equation.split(' = ')
left = split[0]
right = split[1]
left_components = left.split(' + ')
right_components = right.split(' + ')
total_left = dict()
total_right = dict()
for component in left_components:
left_counts = dict()
for ind in range(0, len(component)):
if component[ind] == ')':
if component[ind - 2] == '(':
element = component[ind - 1]
elif component[ind - 3] == '(':
element = component[ind - 2:ind]
try:
if component[ind + 3] in integers:
number = int(component[ind + 1:ind + 4])
elif component[ind + 2] in integers:
number = int(component[ind + 1:ind + 3])
else:
number = int(component[ind + 1])
except IndexError:
try:
if component[ind + 2] in integers:
number = int(component[ind + 1:ind + 3])
else:
number = int(component[ind + 1])
except IndexError:
number = int(component[ind + 1])
if element in left_counts:
left_counts[element] += number
else:
left_counts[element] = number
if element in total_left:
total_left[element] += number
else:
total_left[element] = number
self.left.append(left_counts)
for component in right_components:
right_counts = dict()
for ind in range(0, len(component)):
if component[ind] == ')':
if component[ind - 2] == '(':
element = component[ind - 1]
elif component[ind - 3] == '(':
element = component[ind - 2:ind]
try:
if component[ind + 3] in integers:
number = int(component[ind + 1:ind + 4])
elif component[ind + 2] in integers:
number = int(component[ind + 1:ind + 3])
else:
number = int(component[ind + 1])
except IndexError:
try:
if component[ind + 2] in integers:
number = int(component[ind + 1:ind + 3])
else:
number = int(component[ind + 1])
except IndexError:
number = int(component[ind + 1])
if element in right_counts:
right_counts[element] += number
else:
right_counts[element] = number
if element in total_right:
total_right[element] += number
else:
total_right[element] = number
self.right.append(right_counts)
for key in total_left:
if total_left[key] != total_right[key]:
self.balanced = False
else:
continue
def balance(self):
"""
Actually balances the Equation object
@type self: Equation
@rtype: str
"""
if self.balanced:
string = str()
for dictionary in self.left:
compound = str()
for key in dictionary:
compound += key
compound += str(dictionary[key])
string += compound
string += ' + '
string = string[:len(string) - 3] + ' = '
for dictionary in self.right:
compound = str()
for key in dictionary:
compound += key
compound += str(dictionary[key])
string += compound
string += ' + '
string = string[:len(string) - 2]
return string
else:
while not self.balanced:
temp_left = list()
temp_right = list()
total_left = dict()
total_right = dict()
for item in self.left:
new_dict = dict()
for key in item:
new_dict[key] = item[key]
temp_left.append(new_dict)
for item in self.right:
new_dict = dict()
for key in item:
new_dict[key] = item[key]
temp_right.append(new_dict)
left_coefficients = [randint(1, 10) for _ in range(len(temp_left))]
right_coefficients = [randint(1, 10) for _ in range(len(temp_right))]
for index in range(0, len(left_coefficients)):
for key in temp_left[index]:
temp_left[index][key] *= left_coefficients[index]
if key not in total_left:
total_left[key] = temp_left[index][key]
else:
total_left[key] += temp_left[index][key]
for index in range(0, len(right_coefficients)):
for key in temp_right[index]:
temp_right[index][key] *= right_coefficients[index]
if key not in total_right:
total_right[key] = temp_right[index][key]
else:
total_right[key] += temp_right[index][key]
self.balanced = True
for key in total_left:
if total_left[key] != total_right[key]:
self.balanced = False
else:
continue
big_tup = tuple(left_coefficients + right_coefficients)
left_coefficients = list(map(lambda x: int(x / reduce(gcd, big_tup)), left_coefficients))
right_coefficients = list(map(lambda x: int(x / reduce(gcd, big_tup)), right_coefficients))
string = str()
for index in range(0, len(self.left)):
if left_coefficients[index] != 1:
compound = str(left_coefficients[index])
else:
compound = str()
for key in self.left[index]:
compound += key
if self.left[index][key] != 1:
compound += str(self.left[index][key])
else:
continue
string += compound
string += ' + '
string = string[:len(string) - 3] + ' = '
for index in range(0, len(self.right)):
if right_coefficients[index] != 1:
compound = str(right_coefficients[index])
else:
compound = str()
for key in self.right[index]:
compound += key
if self.right[index][key] != 1:
compound += str(self.right[index][key])
else:
continue
string += compound
string += ' + '
string = string[:len(string) - 2]
return string
| 40.023585 | 104 | 0.416618 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.